Update main.py
Browse files
main.py
CHANGED
@@ -10,7 +10,7 @@ import time
|
|
10 |
from collections import defaultdict
|
11 |
from typing import List, Dict, Any, Optional, Union, AsyncGenerator
|
12 |
|
13 |
-
from aiohttp import ClientSession, ClientResponseError
|
14 |
from fastapi import FastAPI, HTTPException, Request, Depends, Header
|
15 |
from fastapi.responses import JSONResponse, StreamingResponse
|
16 |
from pydantic import BaseModel
|
@@ -315,7 +315,7 @@ class Blackbox:
|
|
315 |
"userSelectedModel": cls.userSelectedModel.get(model, model)
|
316 |
}
|
317 |
|
318 |
-
async with ClientSession(headers=common_headers) as session:
|
319 |
try:
|
320 |
async with session.post(
|
321 |
cls.api_endpoint,
|
@@ -324,9 +324,25 @@ class Blackbox:
|
|
324 |
proxy=proxy
|
325 |
) as response_api_chat:
|
326 |
response_api_chat.raise_for_status()
|
327 |
-
|
328 |
-
|
329 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
330 |
except ClientResponseError as e:
|
331 |
error_text = f"Error {e.status}: {e.message}"
|
332 |
try:
|
@@ -347,7 +363,7 @@ class Blackbox:
|
|
347 |
proxy: Optional[str] = None,
|
348 |
websearch: bool = False,
|
349 |
**kwargs
|
350 |
-
) -> AsyncGenerator[
|
351 |
"""
|
352 |
Creates an asynchronous generator for streaming responses from Blackbox AI.
|
353 |
|
@@ -359,7 +375,7 @@ class Blackbox:
|
|
359 |
**kwargs: Additional keyword arguments.
|
360 |
|
361 |
Yields:
|
362 |
-
|
363 |
"""
|
364 |
model = cls.get_model(model)
|
365 |
|
@@ -439,19 +455,7 @@ class Blackbox:
|
|
439 |
"userSelectedModel": cls.userSelectedModel.get(model, model)
|
440 |
}
|
441 |
|
442 |
-
|
443 |
-
'Accept': 'text/x-component',
|
444 |
-
'Content-Type': 'text/plain;charset=UTF-8',
|
445 |
-
'Referer': f'{cls.url}/chat/{chat_id}?model={model}',
|
446 |
-
'next-action': next_action,
|
447 |
-
'next-router-state-tree': next_router_state_tree,
|
448 |
-
'next-url': '/'
|
449 |
-
}
|
450 |
-
headers_chat_combined = {**common_headers, **headers_chat}
|
451 |
-
|
452 |
-
data_chat = '[]'
|
453 |
-
|
454 |
-
async with ClientSession(headers=common_headers) as session:
|
455 |
try:
|
456 |
async with session.post(
|
457 |
cls.api_endpoint,
|
@@ -460,43 +464,25 @@ class Blackbox:
|
|
460 |
proxy=proxy
|
461 |
) as response_api_chat:
|
462 |
response_api_chat.raise_for_status()
|
463 |
-
|
464 |
-
|
465 |
-
|
466 |
-
|
467 |
-
|
468 |
-
|
469 |
-
image_url = match.group(1)
|
470 |
-
image_response = ImageResponseModel(images=image_url, alt="Generated Image")
|
471 |
-
yield image_response
|
472 |
-
else:
|
473 |
-
yield cleaned_response
|
474 |
-
else:
|
475 |
-
if websearch:
|
476 |
-
match = re.search(r'\$~~~\$(.*?)\$~~~\$', cleaned_response, re.DOTALL)
|
477 |
if match:
|
478 |
-
|
479 |
-
|
480 |
-
|
481 |
-
sources = json.loads(source_part)
|
482 |
-
source_formatted = "**Source:**\n"
|
483 |
-
for item in sources:
|
484 |
-
title = item.get('title', 'No Title')
|
485 |
-
link = item.get('link', '#')
|
486 |
-
position = item.get('position', '')
|
487 |
-
source_formatted += f"{position}. [{title}]({link})\n"
|
488 |
-
final_response = f"{answer_part}\n\n{source_formatted}"
|
489 |
-
except json.JSONDecodeError:
|
490 |
-
final_response = f"{answer_part}\n\nSource information is unavailable."
|
491 |
else:
|
492 |
-
|
493 |
else:
|
494 |
if '$~~~$' in cleaned_response:
|
495 |
final_response = cleaned_response.split('$~~~$')[0].strip()
|
496 |
else:
|
497 |
final_response = cleaned_response
|
498 |
|
499 |
-
|
500 |
except ClientResponseError as e:
|
501 |
error_text = f"Error {e.status}: {e.message}"
|
502 |
try:
|
@@ -509,29 +495,6 @@ class Blackbox:
|
|
509 |
except Exception as e:
|
510 |
yield f"Unexpected error during /api/chat request: {str(e)}"
|
511 |
|
512 |
-
chat_url = f'{cls.url}/chat/{chat_id}?model={model}'
|
513 |
-
|
514 |
-
try:
|
515 |
-
async with session.post(
|
516 |
-
chat_url,
|
517 |
-
headers=headers_chat_combined,
|
518 |
-
data=data_chat,
|
519 |
-
proxy=proxy
|
520 |
-
) as response_chat:
|
521 |
-
response_chat.raise_for_status()
|
522 |
-
pass
|
523 |
-
except ClientResponseError as e:
|
524 |
-
error_text = f"Error {e.status}: {e.message}"
|
525 |
-
try:
|
526 |
-
error_response = await e.response.text()
|
527 |
-
cleaned_error = cls.clean_response(error_response)
|
528 |
-
error_text += f" - {cleaned_error}"
|
529 |
-
except Exception:
|
530 |
-
pass
|
531 |
-
yield error_text
|
532 |
-
except Exception as e:
|
533 |
-
yield f"Unexpected error during /chat/{chat_id} request: {str(e)}"
|
534 |
-
|
535 |
# Custom exception for model not working
|
536 |
class ModelNotWorkingException(Exception):
|
537 |
def __init__(self, model: str):
|
@@ -654,15 +617,7 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
|
|
654 |
proxy=None, # Add proxy if needed
|
655 |
websearch=False # Modify if websearch is needed
|
656 |
):
|
657 |
-
|
658 |
-
# Yield image URLs as plain text
|
659 |
-
yield f"Image URL: {chunk.images}\n"
|
660 |
-
else:
|
661 |
-
# Ensure chunk is a string and yield as plain text
|
662 |
-
if isinstance(chunk, str):
|
663 |
-
yield f"{chunk}\n"
|
664 |
-
else:
|
665 |
-
yield f"{str(chunk)}\n"
|
666 |
|
667 |
logger.info(f"Initiating streaming response for API key: {api_key} | IP: {client_ip}")
|
668 |
return StreamingResponse(content_generator(), media_type='text/plain')
|
|
|
10 |
from collections import defaultdict
|
11 |
from typing import List, Dict, Any, Optional, Union, AsyncGenerator
|
12 |
|
13 |
+
from aiohttp import ClientSession, ClientResponseError, ClientTimeout
|
14 |
from fastapi import FastAPI, HTTPException, Request, Depends, Header
|
15 |
from fastapi.responses import JSONResponse, StreamingResponse
|
16 |
from pydantic import BaseModel
|
|
|
315 |
"userSelectedModel": cls.userSelectedModel.get(model, model)
|
316 |
}
|
317 |
|
318 |
+
async with ClientSession(headers=common_headers, timeout=ClientTimeout(total=60)) as session:
|
319 |
try:
|
320 |
async with session.post(
|
321 |
cls.api_endpoint,
|
|
|
324 |
proxy=proxy
|
325 |
) as response_api_chat:
|
326 |
response_api_chat.raise_for_status()
|
327 |
+
# Instead of reading the entire response, iterate over chunks
|
328 |
+
async for data in response_api_chat.content.iter_chunked(1024):
|
329 |
+
decoded_data = data.decode('utf-8')
|
330 |
+
cleaned_response = cls.clean_response(decoded_data)
|
331 |
+
if model in cls.image_models:
|
332 |
+
match = re.search(r'!\[.*?\]\((https?://[^\)]+)\)', cleaned_response)
|
333 |
+
if match:
|
334 |
+
image_url = match.group(1)
|
335 |
+
image_response = ImageResponseModel(images=image_url, alt="Generated Image")
|
336 |
+
return image_response.dict()
|
337 |
+
else:
|
338 |
+
return cleaned_response
|
339 |
+
else:
|
340 |
+
if '$~~~$' in cleaned_response:
|
341 |
+
final_response = cleaned_response.split('$~~~$')[0].strip()
|
342 |
+
else:
|
343 |
+
final_response = cleaned_response
|
344 |
+
|
345 |
+
return final_response
|
346 |
except ClientResponseError as e:
|
347 |
error_text = f"Error {e.status}: {e.message}"
|
348 |
try:
|
|
|
363 |
proxy: Optional[str] = None,
|
364 |
websearch: bool = False,
|
365 |
**kwargs
|
366 |
+
) -> AsyncGenerator[str, None]:
|
367 |
"""
|
368 |
Creates an asynchronous generator for streaming responses from Blackbox AI.
|
369 |
|
|
|
375 |
**kwargs: Additional keyword arguments.
|
376 |
|
377 |
Yields:
|
378 |
+
str: Segments of the generated response.
|
379 |
"""
|
380 |
model = cls.get_model(model)
|
381 |
|
|
|
455 |
"userSelectedModel": cls.userSelectedModel.get(model, model)
|
456 |
}
|
457 |
|
458 |
+
async with ClientSession(headers=common_headers, timeout=ClientTimeout(total=60)) as session:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
459 |
try:
|
460 |
async with session.post(
|
461 |
cls.api_endpoint,
|
|
|
464 |
proxy=proxy
|
465 |
) as response_api_chat:
|
466 |
response_api_chat.raise_for_status()
|
467 |
+
# Iterate over the response in chunks
|
468 |
+
async for data in response_api_chat.content.iter_any():
|
469 |
+
decoded_data = data.decode('utf-8')
|
470 |
+
cleaned_response = cls.clean_response(decoded_data)
|
471 |
+
if model in cls.image_models:
|
472 |
+
match = re.search(r'!\[.*?\]\((https?://[^\)]+)\)', cleaned_response)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
473 |
if match:
|
474 |
+
image_url = match.group(1)
|
475 |
+
image_response = ImageResponseModel(images=image_url, alt="Generated Image")
|
476 |
+
yield f"Image URL: {image_response.images}\n"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
477 |
else:
|
478 |
+
yield cleaned_response
|
479 |
else:
|
480 |
if '$~~~$' in cleaned_response:
|
481 |
final_response = cleaned_response.split('$~~~$')[0].strip()
|
482 |
else:
|
483 |
final_response = cleaned_response
|
484 |
|
485 |
+
yield f"{final_response}\n"
|
486 |
except ClientResponseError as e:
|
487 |
error_text = f"Error {e.status}: {e.message}"
|
488 |
try:
|
|
|
495 |
except Exception as e:
|
496 |
yield f"Unexpected error during /api/chat request: {str(e)}"
|
497 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
498 |
# Custom exception for model not working
|
499 |
class ModelNotWorkingException(Exception):
|
500 |
def __init__(self, model: str):
|
|
|
617 |
proxy=None, # Add proxy if needed
|
618 |
websearch=False # Modify if websearch is needed
|
619 |
):
|
620 |
+
yield chunk
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
621 |
|
622 |
logger.info(f"Initiating streaming response for API key: {api_key} | IP: {client_ip}")
|
623 |
return StreamingResponse(content_generator(), media_type='text/plain')
|