Niansuh commited on
Commit
dc7c293
·
verified ·
1 Parent(s): ca8e367

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +87 -57
main.py CHANGED
@@ -9,6 +9,7 @@ import asyncio
9
  import time
10
  from collections import defaultdict
11
  from typing import List, Dict, Any, Optional, Union, AsyncGenerator
 
12
 
13
  from aiohttp import ClientSession, ClientResponseError
14
  from fastapi import FastAPI, HTTPException, Request, Depends, Header
@@ -38,14 +39,6 @@ rate_limit_store = defaultdict(lambda: {"count": 0, "timestamp": time.time()})
38
  CLEANUP_INTERVAL = 60 # seconds
39
  RATE_LIMIT_WINDOW = 60 # seconds
40
 
41
- # Define ImageResponse for handling image outputs
42
- class ImageResponseModel(BaseModel):
43
- images: str
44
- alt: str
45
-
46
- # Define Messages type for better type hinting
47
- Messages = List[Dict[str, Any]]
48
-
49
  class Blackbox:
50
  label = "Blackbox AI"
51
  url = "https://www.blackbox.ai"
@@ -200,12 +193,10 @@ class Blackbox:
200
  cls,
201
  model: str,
202
  messages: List[Dict[str, str]],
203
- temperature: Optional[float] = 1.0,
204
- max_tokens: Optional[int] = 1024,
205
- websearch: bool = False,
206
  proxy: Optional[str] = None,
 
207
  **kwargs
208
- ) -> Union[str, ImageResponseModel]:
209
  model = cls.get_model(model)
210
  chat_id = cls.generate_random_string()
211
  next_action = cls.generate_next_action()
@@ -269,9 +260,9 @@ class Blackbox:
269
  "trendingAgentMode": trending_agent_mode,
270
  "isMicMode": False,
271
  "userSystemPrompt": None,
272
- "maxTokens": max_tokens,
273
  "playgroundTopP": 0.9,
274
- "playgroundTemperature": temperature,
275
  "isChromeExt": False,
276
  "githubToken": None,
277
  "clickedAnswer2": False,
@@ -283,6 +274,18 @@ class Blackbox:
283
  "userSelectedModel": cls.userSelectedModel.get(model, model)
284
  }
285
 
 
 
 
 
 
 
 
 
 
 
 
 
286
  async with ClientSession(headers=common_headers) as session:
287
  try:
288
  async with session.post(
@@ -299,10 +302,9 @@ class Blackbox:
299
  match = re.search(r'!\[.*?\]\((https?://[^\)]+)\)', cleaned_response)
300
  if match:
301
  image_url = match.group(1)
302
- image_response = ImageResponseModel(images=image_url, alt="Generated Image")
303
- return image_response
304
  else:
305
- return cleaned_response
306
  else:
307
  if websearch:
308
  match = re.search(r'\$~~~\$(.*?)\$~~~\$', cleaned_response, re.DOTALL)
@@ -328,7 +330,30 @@ class Blackbox:
328
  else:
329
  final_response = cleaned_response
330
 
331
- return final_response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
332
  except ClientResponseError as e:
333
  error_text = f"Error {e.status}: {e.message}"
334
  try:
@@ -337,9 +362,9 @@ class Blackbox:
337
  error_text += f" - {cleaned_error}"
338
  except Exception:
339
  pass
340
- return error_text
341
  except Exception as e:
342
- return f"Unexpected error during /api/chat request: {str(e)}"
343
 
344
  # Custom exception for model not working
345
  class ModelNotWorkingException(Exception):
@@ -433,12 +458,13 @@ class ChatRequest(BaseModel):
433
  temperature: Optional[float] = 1.0
434
  top_p: Optional[float] = 1.0
435
  n: Optional[int] = 1
436
- max_tokens: Optional[int] = 1024
437
  presence_penalty: Optional[float] = 0.0
438
  frequency_penalty: Optional[float] = 0.0
439
  logit_bias: Optional[Dict[str, float]] = None
440
  user: Optional[str] = None
441
- websearch: Optional[bool] = False # Added websearch parameter
 
442
 
443
  @app.post("/v1/chat/completions", dependencies=[Depends(rate_limiter_per_ip)])
444
  async def chat_completions(request: ChatRequest, req: Request, api_key: str = Depends(get_api_key)):
@@ -446,7 +472,7 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
446
  # Redact user messages only for logging purposes
447
  redacted_messages = [{"role": msg.role, "content": "[redacted]"} for msg in request.messages]
448
 
449
- logger.info(f"Received chat completions request from API key: {api_key} | IP: {client_ip} | Model: {request.model} | Messages: {redacted_messages} | Websearch: {request.websearch}")
450
 
451
  try:
452
  # Validate that the requested model is available
@@ -454,8 +480,8 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
454
  logger.warning(f"Attempt to use unavailable model: {request.model} from IP: {client_ip}")
455
  raise HTTPException(status_code=400, detail="Requested model is not available.")
456
 
457
- # Generate response using the updated Blackbox class
458
- response_content = await Blackbox.generate_response(
459
  model=request.model,
460
  messages=[{"role": msg.role, "content": msg.content} for msg in request.messages],
461
  temperature=request.temperature,
@@ -463,34 +489,32 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
463
  websearch=request.websearch
464
  )
465
 
466
- # Handle image responses
467
- if isinstance(response_content, ImageResponseModel):
468
- logger.info(f"Generated image for API key: {api_key} | IP: {client_ip}")
469
- return {
470
- "id": f"chatcmpl-{uuid.uuid4()}",
471
- "object": "chat.completion",
472
- "created": int(datetime.now().timestamp()),
473
- "model": request.model,
474
- "choices": [
475
- {
476
- "index": 0,
477
- "message": {
478
- "role": "assistant",
479
- "content": response_content.images,
480
- "alt": response_content.alt
481
- },
482
- "finish_reason": "stop"
483
- }
484
- ],
485
- "usage": {
486
- "prompt_tokens": sum(len(msg.content.split()) for msg in request.messages),
487
- "completion_tokens": len(response_content.images.split()),
488
- "total_tokens": sum(len(msg.content.split()) for msg in request.messages) + len(response_content.images.split())
489
- },
490
- }
491
  else:
 
 
 
 
 
 
 
 
 
 
492
  logger.info(f"Completed response generation for API key: {api_key} | IP: {client_ip}")
493
- return {
 
494
  "id": f"chatcmpl-{uuid.uuid4()}",
495
  "object": "chat.completion",
496
  "created": int(datetime.now().timestamp()),
@@ -500,17 +524,23 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
500
  "index": 0,
501
  "message": {
502
  "role": "assistant",
503
- "content": response_content
504
  },
505
  "finish_reason": "stop"
506
  }
507
  ],
508
  "usage": {
509
  "prompt_tokens": sum(len(msg.content.split()) for msg in request.messages),
510
- "completion_tokens": len(response_content.split()),
511
- "total_tokens": sum(len(msg.content.split()) for msg in request.messages) + len(response_content.split())
512
  },
513
  }
 
 
 
 
 
 
514
  except ModelNotWorkingException as e:
515
  logger.warning(f"Model not working: {e} | IP: {client_ip}")
516
  raise HTTPException(status_code=503, detail=str(e))
@@ -523,14 +553,14 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
523
 
524
  # Endpoint: GET /v1/models
525
  @app.get("/v1/models", dependencies=[Depends(rate_limiter_per_ip)])
526
- async def get_models(req: Request, api_key: str = Depends(get_api_key)):
527
  client_ip = req.client.host
528
  logger.info(f"Fetching available models from IP: {client_ip}")
529
  return {"data": [{"id": model, "object": "model"} for model in Blackbox.models]}
530
 
531
  # Endpoint: GET /v1/health
532
  @app.get("/v1/health", dependencies=[Depends(rate_limiter_per_ip)])
533
- async def health_check(req: Request, api_key: str = Depends(get_api_key)):
534
  client_ip = req.client.host
535
  logger.info(f"Health check requested from IP: {client_ip}")
536
  return {"status": "ok"}
@@ -554,4 +584,4 @@ async def http_exception_handler(request: Request, exc: HTTPException):
554
 
555
  if __name__ == "__main__":
556
  import uvicorn
557
- uvicorn.run(app, host="0.0.0.0", port=8000)
 
9
  import time
10
  from collections import defaultdict
11
  from typing import List, Dict, Any, Optional, Union, AsyncGenerator
12
+ from datetime import datetime
13
 
14
  from aiohttp import ClientSession, ClientResponseError
15
  from fastapi import FastAPI, HTTPException, Request, Depends, Header
 
39
  CLEANUP_INTERVAL = 60 # seconds
40
  RATE_LIMIT_WINDOW = 60 # seconds
41
 
 
 
 
 
 
 
 
 
42
  class Blackbox:
43
  label = "Blackbox AI"
44
  url = "https://www.blackbox.ai"
 
193
  cls,
194
  model: str,
195
  messages: List[Dict[str, str]],
 
 
 
196
  proxy: Optional[str] = None,
197
+ websearch: bool = False,
198
  **kwargs
199
+ ) -> AsyncGenerator[Union[str, Dict[str, Any]], None]:
200
  model = cls.get_model(model)
201
  chat_id = cls.generate_random_string()
202
  next_action = cls.generate_next_action()
 
260
  "trendingAgentMode": trending_agent_mode,
261
  "isMicMode": False,
262
  "userSystemPrompt": None,
263
+ "maxTokens": 1024,
264
  "playgroundTopP": 0.9,
265
+ "playgroundTemperature": 0.5,
266
  "isChromeExt": False,
267
  "githubToken": None,
268
  "clickedAnswer2": False,
 
274
  "userSelectedModel": cls.userSelectedModel.get(model, model)
275
  }
276
 
277
+ headers_chat = {
278
+ 'Accept': 'text/x-component',
279
+ 'Content-Type': 'text/plain;charset=UTF-8',
280
+ 'Referer': f'{cls.url}/chat/{chat_id}?model={model}',
281
+ 'next-action': next_action,
282
+ 'next-router-state-tree': next_router_state_tree,
283
+ 'next-url': '/'
284
+ }
285
+ headers_chat_combined = {**common_headers, **headers_chat}
286
+
287
+ data_chat = '[]'
288
+
289
  async with ClientSession(headers=common_headers) as session:
290
  try:
291
  async with session.post(
 
302
  match = re.search(r'!\[.*?\]\((https?://[^\)]+)\)', cleaned_response)
303
  if match:
304
  image_url = match.group(1)
305
+ yield {"type": "image", "url": image_url}
 
306
  else:
307
+ yield {"type": "text", "content": cleaned_response}
308
  else:
309
  if websearch:
310
  match = re.search(r'\$~~~\$(.*?)\$~~~\$', cleaned_response, re.DOTALL)
 
330
  else:
331
  final_response = cleaned_response
332
 
333
+ yield {"type": "text", "content": final_response}
334
+ except ClientResponseError as e:
335
+ error_text = f"Error {e.status}: {e.message}"
336
+ try:
337
+ error_response = await e.response.text()
338
+ cleaned_error = cls.clean_response(error_response)
339
+ error_text += f" - {cleaned_error}"
340
+ except Exception:
341
+ pass
342
+ yield {"type": "error", "content": error_text}
343
+ except Exception as e:
344
+ yield {"type": "error", "content": f"Unexpected error during /api/chat request: {str(e)}"}
345
+
346
+ chat_url = f'{cls.url}/chat/{chat_id}?model={model}'
347
+
348
+ try:
349
+ async with session.post(
350
+ chat_url,
351
+ headers=headers_chat_combined,
352
+ data=data_chat,
353
+ proxy=proxy
354
+ ) as response_chat:
355
+ response_chat.raise_for_status()
356
+ pass
357
  except ClientResponseError as e:
358
  error_text = f"Error {e.status}: {e.message}"
359
  try:
 
362
  error_text += f" - {cleaned_error}"
363
  except Exception:
364
  pass
365
+ yield {"type": "error", "content": error_text}
366
  except Exception as e:
367
+ yield {"type": "error", "content": f"Unexpected error during /chat/{chat_id} request: {str(e)}"}
368
 
369
  # Custom exception for model not working
370
  class ModelNotWorkingException(Exception):
 
458
  temperature: Optional[float] = 1.0
459
  top_p: Optional[float] = 1.0
460
  n: Optional[int] = 1
461
+ max_tokens: Optional[int] = None
462
  presence_penalty: Optional[float] = 0.0
463
  frequency_penalty: Optional[float] = 0.0
464
  logit_bias: Optional[Dict[str, float]] = None
465
  user: Optional[str] = None
466
+ stream: Optional[bool] = False
467
+ websearch: Optional[bool] = False
468
 
469
  @app.post("/v1/chat/completions", dependencies=[Depends(rate_limiter_per_ip)])
470
  async def chat_completions(request: ChatRequest, req: Request, api_key: str = Depends(get_api_key)):
 
472
  # Redact user messages only for logging purposes
473
  redacted_messages = [{"role": msg.role, "content": "[redacted]"} for msg in request.messages]
474
 
475
+ logger.info(f"Received chat completions request from API key: {api_key} | IP: {client_ip} | Model: {request.model} | Messages: {redacted_messages}")
476
 
477
  try:
478
  # Validate that the requested model is available
 
480
  logger.warning(f"Attempt to use unavailable model: {request.model} from IP: {client_ip}")
481
  raise HTTPException(status_code=400, detail="Requested model is not available.")
482
 
483
+ # Process the request with actual message content, but don't log it
484
+ response_generator = Blackbox.generate_response(
485
  model=request.model,
486
  messages=[{"role": msg.role, "content": msg.content} for msg in request.messages],
487
  temperature=request.temperature,
 
489
  websearch=request.websearch
490
  )
491
 
492
+ if request.stream:
493
+ async def stream_response():
494
+ async for chunk in response_generator:
495
+ if chunk["type"] == "text":
496
+ yield f"data: {json.dumps({'choices': [{'delta': {'content': chunk['content']}}]})}\n\n"
497
+ elif chunk["type"] == "image":
498
+ yield f"data: {json.dumps({'choices': [{'delta': {'image': chunk['url']}}]})}\n\n"
499
+ elif chunk["type"] == "error":
500
+ yield f"data: {json.dumps({'error': chunk['content']})}\n\n"
501
+ yield "data: [DONE]\n\n"
502
+
503
+ return StreamingResponse(stream_response(), media_type="text/event-stream")
 
 
 
 
 
 
 
 
 
 
 
 
 
504
  else:
505
+ full_response = ""
506
+ image_url = None
507
+ async for chunk in response_generator:
508
+ if chunk["type"] == "text":
509
+ full_response += chunk["content"]
510
+ elif chunk["type"] == "image":
511
+ image_url = chunk["url"]
512
+ elif chunk["type"] == "error":
513
+ raise HTTPException(status_code=500, detail=chunk["content"])
514
+
515
  logger.info(f"Completed response generation for API key: {api_key} | IP: {client_ip}")
516
+
517
+ response = {
518
  "id": f"chatcmpl-{uuid.uuid4()}",
519
  "object": "chat.completion",
520
  "created": int(datetime.now().timestamp()),
 
524
  "index": 0,
525
  "message": {
526
  "role": "assistant",
527
+ "content": full_response
528
  },
529
  "finish_reason": "stop"
530
  }
531
  ],
532
  "usage": {
533
  "prompt_tokens": sum(len(msg.content.split()) for msg in request.messages),
534
+ "completion_tokens": len(full_response.split()),
535
+ "total_tokens": sum(len(msg.content.split()) for msg in request.messages) + len(full_response.split())
536
  },
537
  }
538
+
539
+ if image_url:
540
+ response["choices"][0]["message"]["image"] = image_url
541
+
542
+ return response
543
+
544
  except ModelNotWorkingException as e:
545
  logger.warning(f"Model not working: {e} | IP: {client_ip}")
546
  raise HTTPException(status_code=503, detail=str(e))
 
553
 
554
  # Endpoint: GET /v1/models
555
  @app.get("/v1/models", dependencies=[Depends(rate_limiter_per_ip)])
556
+ async def get_models(req: Request):
557
  client_ip = req.client.host
558
  logger.info(f"Fetching available models from IP: {client_ip}")
559
  return {"data": [{"id": model, "object": "model"} for model in Blackbox.models]}
560
 
561
  # Endpoint: GET /v1/health
562
  @app.get("/v1/health", dependencies=[Depends(rate_limiter_per_ip)])
563
+ async def health_check(req: Request):
564
  client_ip = req.client.host
565
  logger.info(f"Health check requested from IP: {client_ip}")
566
  return {"status": "ok"}
 
584
 
585
  if __name__ == "__main__":
586
  import uvicorn
587
+ uvicorn.run(app, host="0.0.0.0", port=8000)