Update main.py
Browse files
main.py
CHANGED
@@ -455,6 +455,18 @@ class Blackbox:
|
|
455 |
"userSelectedModel": cls.userSelectedModel.get(model, model)
|
456 |
}
|
457 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
458 |
async with ClientSession(headers=common_headers, timeout=ClientTimeout(total=60)) as session:
|
459 |
try:
|
460 |
async with session.post(
|
@@ -610,17 +622,25 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
|
|
610 |
|
611 |
if request.stream:
|
612 |
# Streaming response
|
613 |
-
async def
|
614 |
async for chunk in Blackbox.create_async_generator(
|
615 |
model=request.model,
|
616 |
messages=[{"role": msg.role, "content": msg.content} for msg in request.messages],
|
617 |
proxy=None, # Add proxy if needed
|
618 |
websearch=False # Modify if websearch is needed
|
619 |
):
|
620 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
621 |
|
622 |
logger.info(f"Initiating streaming response for API key: {api_key} | IP: {client_ip}")
|
623 |
-
return StreamingResponse(
|
624 |
else:
|
625 |
# Non-streaming response
|
626 |
response_content = await Blackbox.generate_response(
|
@@ -632,7 +652,25 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
|
|
632 |
|
633 |
logger.info(f"Completed response generation for API key: {api_key} | IP: {client_ip}")
|
634 |
return {
|
635 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
636 |
}
|
637 |
except ModelNotWorkingException as e:
|
638 |
logger.warning(f"Model not working: {e} | IP: {client_ip}")
|
|
|
455 |
"userSelectedModel": cls.userSelectedModel.get(model, model)
|
456 |
}
|
457 |
|
458 |
+
headers_chat = {
|
459 |
+
'Accept': 'text/x-component',
|
460 |
+
'Content-Type': 'text/plain;charset=UTF-8',
|
461 |
+
'Referer': f'{cls.url}/chat/{chat_id}?model={model}',
|
462 |
+
'next-action': next_action,
|
463 |
+
'next-router-state-tree': next_router_state_tree,
|
464 |
+
'next-url': '/'
|
465 |
+
}
|
466 |
+
headers_chat_combined = {**common_headers, **headers_chat}
|
467 |
+
|
468 |
+
data_chat = '[]'
|
469 |
+
|
470 |
async with ClientSession(headers=common_headers, timeout=ClientTimeout(total=60)) as session:
|
471 |
try:
|
472 |
async with session.post(
|
|
|
622 |
|
623 |
if request.stream:
|
624 |
# Streaming response
|
625 |
+
async def event_generator():
|
626 |
async for chunk in Blackbox.create_async_generator(
|
627 |
model=request.model,
|
628 |
messages=[{"role": msg.role, "content": msg.content} for msg in request.messages],
|
629 |
proxy=None, # Add proxy if needed
|
630 |
websearch=False # Modify if websearch is needed
|
631 |
):
|
632 |
+
if isinstance(chunk, ImageResponseModel):
|
633 |
+
# Yield image URLs as plain text in SSE format
|
634 |
+
yield f"data: Image URL: {chunk.images}\n\n"
|
635 |
+
else:
|
636 |
+
# Ensure chunk is a string and yield as plain text in SSE format
|
637 |
+
if isinstance(chunk, str):
|
638 |
+
yield f"data: {chunk}\n\n"
|
639 |
+
else:
|
640 |
+
yield f"data: {str(chunk)}\n\n"
|
641 |
|
642 |
logger.info(f"Initiating streaming response for API key: {api_key} | IP: {client_ip}")
|
643 |
+
return StreamingResponse(event_generator(), media_type='text/event-stream')
|
644 |
else:
|
645 |
# Non-streaming response
|
646 |
response_content = await Blackbox.generate_response(
|
|
|
652 |
|
653 |
logger.info(f"Completed response generation for API key: {api_key} | IP: {client_ip}")
|
654 |
return {
|
655 |
+
"id": f"chatcmpl-{uuid.uuid4()}",
|
656 |
+
"object": "chat.completion",
|
657 |
+
"created": int(datetime.now().timestamp()),
|
658 |
+
"model": request.model,
|
659 |
+
"choices": [
|
660 |
+
{
|
661 |
+
"index": 0,
|
662 |
+
"message": {
|
663 |
+
"role": "assistant",
|
664 |
+
"content": response_content
|
665 |
+
},
|
666 |
+
"finish_reason": "stop"
|
667 |
+
}
|
668 |
+
],
|
669 |
+
"usage": {
|
670 |
+
"prompt_tokens": sum(len(msg.content.split()) for msg in request.messages),
|
671 |
+
"completion_tokens": len(response_content.split()),
|
672 |
+
"total_tokens": sum(len(msg.content.split()) for msg in request.messages) + len(response_content.split())
|
673 |
+
},
|
674 |
}
|
675 |
except ModelNotWorkingException as e:
|
676 |
logger.warning(f"Model not working: {e} | IP: {client_ip}")
|