Update main.py
Browse files
main.py
CHANGED
@@ -14,6 +14,7 @@ from aiohttp import ClientSession, ClientResponseError
|
|
14 |
from fastapi import FastAPI, HTTPException, Request, Depends, Header
|
15 |
from fastapi.responses import JSONResponse
|
16 |
from pydantic import BaseModel
|
|
|
17 |
|
18 |
# Configure logging
|
19 |
logging.basicConfig(
|
@@ -38,8 +39,8 @@ rate_limit_store = defaultdict(lambda: {"count": 0, "timestamp": time.time()})
|
|
38 |
CLEANUP_INTERVAL = 60 # seconds
|
39 |
RATE_LIMIT_WINDOW = 60 # seconds
|
40 |
|
41 |
-
# Define ImageResponse
|
42 |
-
class
|
43 |
images: str
|
44 |
alt: str
|
45 |
|
@@ -302,8 +303,198 @@ class Blackbox:
|
|
302 |
except Exception as e:
|
303 |
return f"Unexpected error during /api/chat request: {str(e)}"
|
304 |
|
305 |
-
|
306 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
307 |
|
308 |
# Custom exception for model not working
|
309 |
class ModelNotWorkingException(Exception):
|
@@ -488,6 +679,49 @@ async def http_exception_handler(request: Request, exc: HTTPException):
|
|
488 |
},
|
489 |
)
|
490 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
491 |
if __name__ == "__main__":
|
492 |
import uvicorn
|
493 |
uvicorn.run(app, host="0.0.0.0", port=8000)
|
|
|
14 |
from fastapi import FastAPI, HTTPException, Request, Depends, Header
|
15 |
from fastapi.responses import JSONResponse
|
16 |
from pydantic import BaseModel
|
17 |
+
from datetime import datetime
|
18 |
|
19 |
# Configure logging
|
20 |
logging.basicConfig(
|
|
|
39 |
CLEANUP_INTERVAL = 60 # seconds
|
40 |
RATE_LIMIT_WINDOW = 60 # seconds
|
41 |
|
42 |
+
# Define ImageResponse using Pydantic
|
43 |
+
class ImageResponse(BaseModel):
|
44 |
images: str
|
45 |
alt: str
|
46 |
|
|
|
303 |
except Exception as e:
|
304 |
return f"Unexpected error during /api/chat request: {str(e)}"
|
305 |
|
306 |
+
@classmethod
|
307 |
+
async def create_async_generator(
|
308 |
+
cls,
|
309 |
+
model: str,
|
310 |
+
messages: List[Dict[str, str]],
|
311 |
+
proxy: Optional[str] = None,
|
312 |
+
websearch: bool = False,
|
313 |
+
**kwargs
|
314 |
+
) -> AsyncGenerator[Union[str, ImageResponse], None]:
|
315 |
+
"""
|
316 |
+
Creates an asynchronous generator for streaming responses from Blackbox AI.
|
317 |
+
|
318 |
+
Parameters:
|
319 |
+
model (str): Model to use for generating responses.
|
320 |
+
messages (List[Dict[str, str]]): Message history.
|
321 |
+
proxy (Optional[str]): Proxy URL, if needed.
|
322 |
+
websearch (bool): Enables or disables web search mode.
|
323 |
+
**kwargs: Additional keyword arguments.
|
324 |
+
|
325 |
+
Yields:
|
326 |
+
Union[str, ImageResponse]: Segments of the generated response or ImageResponse objects.
|
327 |
+
"""
|
328 |
+
model = cls.get_model(model)
|
329 |
+
|
330 |
+
chat_id = cls.generate_random_string()
|
331 |
+
next_action = cls.generate_next_action()
|
332 |
+
next_router_state_tree = cls.generate_next_router_state_tree()
|
333 |
+
|
334 |
+
agent_mode = cls.agentMode.get(model, {})
|
335 |
+
trending_agent_mode = cls.trendingAgentMode.get(model, {})
|
336 |
+
|
337 |
+
prefix = cls.model_prefixes.get(model, "")
|
338 |
+
|
339 |
+
formatted_prompt = ""
|
340 |
+
for message in messages:
|
341 |
+
role = message.get('role', '').capitalize()
|
342 |
+
content = message.get('content', '')
|
343 |
+
if role and content:
|
344 |
+
formatted_prompt += f"{role}: {content}\n"
|
345 |
+
|
346 |
+
if prefix:
|
347 |
+
formatted_prompt = f"{prefix} {formatted_prompt}".strip()
|
348 |
+
|
349 |
+
referer_path = cls.model_referers.get(model, f"/?model={model}")
|
350 |
+
referer_url = f"{cls.url}{referer_path}"
|
351 |
+
|
352 |
+
common_headers = {
|
353 |
+
'accept': '*/*',
|
354 |
+
'accept-language': 'en-US,en;q=0.9',
|
355 |
+
'cache-control': 'no-cache',
|
356 |
+
'origin': cls.url,
|
357 |
+
'pragma': 'no-cache',
|
358 |
+
'priority': 'u=1, i',
|
359 |
+
'sec-ch-ua': '"Chromium";v="129", "Not=A?Brand";v="8"',
|
360 |
+
'sec-ch-ua-mobile': '?0',
|
361 |
+
'sec-ch-ua-platform': '"Linux"',
|
362 |
+
'sec-fetch-dest': 'empty',
|
363 |
+
'sec-fetch-mode': 'cors',
|
364 |
+
'sec-fetch-site': 'same-origin',
|
365 |
+
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) '
|
366 |
+
'AppleWebKit/537.36 (KHTML, like Gecko) '
|
367 |
+
'Chrome/129.0.0.0 Safari/537.36'
|
368 |
+
}
|
369 |
+
|
370 |
+
headers_api_chat = {
|
371 |
+
'Content-Type': 'application/json',
|
372 |
+
'Referer': referer_url
|
373 |
+
}
|
374 |
+
headers_api_chat_combined = {**common_headers, **headers_api_chat}
|
375 |
+
|
376 |
+
payload_api_chat = {
|
377 |
+
"messages": [
|
378 |
+
{
|
379 |
+
"id": chat_id,
|
380 |
+
"content": formatted_prompt,
|
381 |
+
"role": "user"
|
382 |
+
}
|
383 |
+
],
|
384 |
+
"id": chat_id,
|
385 |
+
"previewToken": None,
|
386 |
+
"userId": None,
|
387 |
+
"codeModelMode": True,
|
388 |
+
"agentMode": agent_mode,
|
389 |
+
"trendingAgentMode": trending_agent_mode,
|
390 |
+
"isMicMode": False,
|
391 |
+
"userSystemPrompt": None,
|
392 |
+
"maxTokens": 1024,
|
393 |
+
"playgroundTopP": 0.9,
|
394 |
+
"playgroundTemperature": 0.5,
|
395 |
+
"isChromeExt": False,
|
396 |
+
"githubToken": None,
|
397 |
+
"clickedAnswer2": False,
|
398 |
+
"clickedAnswer3": False,
|
399 |
+
"clickedForceWebSearch": False,
|
400 |
+
"visitFromDelta": False,
|
401 |
+
"mobileClient": False,
|
402 |
+
"webSearchMode": websearch,
|
403 |
+
"userSelectedModel": cls.userSelectedModel.get(model, model)
|
404 |
+
}
|
405 |
+
|
406 |
+
headers_chat = {
|
407 |
+
'Accept': 'text/x-component',
|
408 |
+
'Content-Type': 'text/plain;charset=UTF-8',
|
409 |
+
'Referer': f'{cls.url}/chat/{chat_id}?model={model}',
|
410 |
+
'next-action': next_action,
|
411 |
+
'next-router-state-tree': next_router_state_tree,
|
412 |
+
'next-url': '/'
|
413 |
+
}
|
414 |
+
headers_chat_combined = {**common_headers, **headers_chat}
|
415 |
+
|
416 |
+
data_chat = '[]'
|
417 |
+
|
418 |
+
async with ClientSession(headers=common_headers) as session:
|
419 |
+
try:
|
420 |
+
async with session.post(
|
421 |
+
cls.api_endpoint,
|
422 |
+
headers=headers_api_chat_combined,
|
423 |
+
json=payload_api_chat,
|
424 |
+
proxy=proxy
|
425 |
+
) as response_api_chat:
|
426 |
+
response_api_chat.raise_for_status()
|
427 |
+
text = await response_api_chat.text()
|
428 |
+
cleaned_response = cls.clean_response(text)
|
429 |
+
|
430 |
+
if model in cls.image_models:
|
431 |
+
match = re.search(r'!\[.*?\]\((https?://[^\)]+)\)', cleaned_response)
|
432 |
+
if match:
|
433 |
+
image_url = match.group(1)
|
434 |
+
image_response = ImageResponse(images=image_url, alt="Generated Image")
|
435 |
+
yield image_response
|
436 |
+
else:
|
437 |
+
yield cleaned_response
|
438 |
+
else:
|
439 |
+
if websearch:
|
440 |
+
match = re.search(r'\$~~~\$(.*?)\$~~~\$', cleaned_response, re.DOTALL)
|
441 |
+
if match:
|
442 |
+
source_part = match.group(1).strip()
|
443 |
+
answer_part = cleaned_response[match.end():].strip()
|
444 |
+
try:
|
445 |
+
sources = json.loads(source_part)
|
446 |
+
source_formatted = "**Source:**\n"
|
447 |
+
for item in sources:
|
448 |
+
title = item.get('title', 'No Title')
|
449 |
+
link = item.get('link', '#')
|
450 |
+
position = item.get('position', '')
|
451 |
+
source_formatted += f"{position}. [{title}]({link})\n"
|
452 |
+
final_response = f"{answer_part}\n\n{source_formatted}"
|
453 |
+
except json.JSONDecodeError:
|
454 |
+
final_response = f"{answer_part}\n\nSource information is unavailable."
|
455 |
+
else:
|
456 |
+
final_response = cleaned_response
|
457 |
+
else:
|
458 |
+
if '$~~~$' in cleaned_response:
|
459 |
+
final_response = cleaned_response.split('$~~~$')[0].strip()
|
460 |
+
else:
|
461 |
+
final_response = cleaned_response
|
462 |
+
|
463 |
+
yield final_response
|
464 |
+
except ClientResponseError as e:
|
465 |
+
error_text = f"Error {e.status}: {e.message}"
|
466 |
+
try:
|
467 |
+
error_response = await e.response.text()
|
468 |
+
cleaned_error = cls.clean_response(error_response)
|
469 |
+
error_text += f" - {cleaned_error}"
|
470 |
+
except Exception:
|
471 |
+
pass
|
472 |
+
yield error_text
|
473 |
+
except Exception as e:
|
474 |
+
yield f"Unexpected error during /api/chat request: {str(e)}"
|
475 |
+
|
476 |
+
chat_url = f'{cls.url}/chat/{chat_id}?model={model}'
|
477 |
+
|
478 |
+
try:
|
479 |
+
async with session.post(
|
480 |
+
chat_url,
|
481 |
+
headers=headers_chat_combined,
|
482 |
+
data=data_chat,
|
483 |
+
proxy=proxy
|
484 |
+
) as response_chat:
|
485 |
+
response_chat.raise_for_status()
|
486 |
+
pass
|
487 |
+
except ClientResponseError as e:
|
488 |
+
error_text = f"Error {e.status}: {e.message}"
|
489 |
+
try:
|
490 |
+
error_response = await e.response.text()
|
491 |
+
cleaned_error = cls.clean_response(error_response)
|
492 |
+
error_text += f" - {cleaned_error}"
|
493 |
+
except Exception:
|
494 |
+
pass
|
495 |
+
yield error_text
|
496 |
+
except Exception as e:
|
497 |
+
yield f"Unexpected error during /chat/{chat_id} request: {str(e)}"
|
498 |
|
499 |
# Custom exception for model not working
|
500 |
class ModelNotWorkingException(Exception):
|
|
|
679 |
},
|
680 |
)
|
681 |
|
682 |
+
# Optional: Additional Endpoint for Streaming Responses (Using create_async_generator)
|
683 |
+
# This endpoint leverages the new create_async_generator method for streaming responses.
|
684 |
+
# Note: Streaming responses may require clients that support Server-Sent Events (SSE) or WebSockets.
|
685 |
+
|
686 |
+
@app.post("/v1/chat/completions/stream", dependencies=[Depends(rate_limiter_per_ip)])
|
687 |
+
async def chat_completions_stream(request: ChatRequest, req: Request, api_key: str = Depends(get_api_key)):
|
688 |
+
client_ip = req.client.host
|
689 |
+
# Redact user messages only for logging purposes
|
690 |
+
redacted_messages = [{"role": msg.role, "content": "[redacted]"} for msg in request.messages]
|
691 |
+
|
692 |
+
logger.info(f"Received streaming chat completions request from API key: {api_key} | IP: {client_ip} | Model: {request.model} | Messages: {redacted_messages}")
|
693 |
+
|
694 |
+
try:
|
695 |
+
# Validate that the requested model is available
|
696 |
+
if request.model not in Blackbox.models and request.model not in Blackbox.model_aliases:
|
697 |
+
logger.warning(f"Attempt to use unavailable model: {request.model} from IP: {client_ip}")
|
698 |
+
raise HTTPException(status_code=400, detail="Requested model is not available.")
|
699 |
+
|
700 |
+
# Create an asynchronous generator for the response
|
701 |
+
async_generator = Blackbox.create_async_generator(
|
702 |
+
model=request.model,
|
703 |
+
messages=[{"role": msg.role, "content": msg.content} for msg in request.messages],
|
704 |
+
temperature=request.temperature,
|
705 |
+
max_tokens=request.max_tokens
|
706 |
+
)
|
707 |
+
|
708 |
+
logger.info(f"Started streaming response for API key: {api_key} | IP: {client_ip}")
|
709 |
+
return JSONResponse(
|
710 |
+
status_code=200,
|
711 |
+
content={"stream": True, "message": "Streaming started."}
|
712 |
+
)
|
713 |
+
# Note: Implement proper streaming responses using StreamingResponse or WebSockets if needed.
|
714 |
+
|
715 |
+
except ModelNotWorkingException as e:
|
716 |
+
logger.warning(f"Model not working: {e} | IP: {client_ip}")
|
717 |
+
raise HTTPException(status_code=503, detail=str(e))
|
718 |
+
except HTTPException as he:
|
719 |
+
logger.warning(f"HTTPException: {he.detail} | IP: {client_ip}")
|
720 |
+
raise he
|
721 |
+
except Exception as e:
|
722 |
+
logger.exception(f"An unexpected error occurred while processing the streaming chat completions request from IP: {client_ip}.")
|
723 |
+
raise HTTPException(status_code=500, detail=str(e))
|
724 |
+
|
725 |
if __name__ == "__main__":
|
726 |
import uvicorn
|
727 |
uvicorn.run(app, host="0.0.0.0", port=8000)
|