Update main.py
Browse files
main.py
CHANGED
@@ -13,7 +13,7 @@ from datetime import datetime
|
|
13 |
|
14 |
from aiohttp import ClientSession, ClientResponseError
|
15 |
from fastapi import FastAPI, HTTPException, Request, Depends, Header
|
16 |
-
from fastapi.responses import JSONResponse
|
17 |
from pydantic import BaseModel
|
18 |
|
19 |
# Configure logging
|
@@ -39,6 +39,13 @@ rate_limit_store = defaultdict(lambda: {"count": 0, "timestamp": time.time()})
|
|
39 |
CLEANUP_INTERVAL = 60 # seconds
|
40 |
RATE_LIMIT_WINDOW = 60 # seconds
|
41 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
class Blackbox:
|
43 |
label = "Blackbox AI"
|
44 |
url = "https://www.blackbox.ai"
|
@@ -184,20 +191,34 @@ class Blackbox:
|
|
184 |
|
185 |
@staticmethod
|
186 |
def clean_response(text: str) -> str:
|
187 |
-
pattern = r'^\$\@\$v=undefined-rv1
|
188 |
cleaned_text = re.sub(pattern, '', text)
|
189 |
return cleaned_text
|
190 |
|
191 |
@classmethod
|
192 |
-
async def
|
193 |
cls,
|
194 |
model: str,
|
195 |
messages: List[Dict[str, str]],
|
196 |
proxy: Optional[str] = None,
|
197 |
websearch: bool = False,
|
198 |
**kwargs
|
199 |
-
) -> AsyncGenerator[Union[str,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
200 |
model = cls.get_model(model)
|
|
|
201 |
chat_id = cls.generate_random_string()
|
202 |
next_action = cls.generate_next_action()
|
203 |
next_router_state_tree = cls.generate_next_router_state_tree()
|
@@ -302,9 +323,10 @@ class Blackbox:
|
|
302 |
match = re.search(r'!\[.*?\]\((https?://[^\)]+)\)', cleaned_response)
|
303 |
if match:
|
304 |
image_url = match.group(1)
|
305 |
-
|
|
|
306 |
else:
|
307 |
-
yield
|
308 |
else:
|
309 |
if websearch:
|
310 |
match = re.search(r'\$~~~\$(.*?)\$~~~\$', cleaned_response, re.DOTALL)
|
@@ -325,12 +347,12 @@ class Blackbox:
|
|
325 |
else:
|
326 |
final_response = cleaned_response
|
327 |
else:
|
328 |
-
if '
|
329 |
-
final_response = cleaned_response.split('
|
330 |
else:
|
331 |
final_response = cleaned_response
|
332 |
|
333 |
-
yield
|
334 |
except ClientResponseError as e:
|
335 |
error_text = f"Error {e.status}: {e.message}"
|
336 |
try:
|
@@ -339,10 +361,11 @@ class Blackbox:
|
|
339 |
error_text += f" - {cleaned_error}"
|
340 |
except Exception:
|
341 |
pass
|
342 |
-
yield
|
343 |
except Exception as e:
|
344 |
-
yield
|
345 |
|
|
|
346 |
chat_url = f'{cls.url}/chat/{chat_id}?model={model}'
|
347 |
|
348 |
try:
|
@@ -362,9 +385,9 @@ class Blackbox:
|
|
362 |
error_text += f" - {cleaned_error}"
|
363 |
except Exception:
|
364 |
pass
|
365 |
-
yield
|
366 |
except Exception as e:
|
367 |
-
yield
|
368 |
|
369 |
# Custom exception for model not working
|
370 |
class ModelNotWorkingException(Exception):
|
@@ -455,6 +478,7 @@ class Message(BaseModel):
|
|
455 |
class ChatRequest(BaseModel):
|
456 |
model: str
|
457 |
messages: List[Message]
|
|
|
458 |
temperature: Optional[float] = 1.0
|
459 |
top_p: Optional[float] = 1.0
|
460 |
n: Optional[int] = 1
|
@@ -463,8 +487,17 @@ class ChatRequest(BaseModel):
|
|
463 |
frequency_penalty: Optional[float] = 0.0
|
464 |
logit_bias: Optional[Dict[str, float]] = None
|
465 |
user: Optional[str] = None
|
466 |
-
|
467 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
468 |
|
469 |
@app.post("/v1/chat/completions", dependencies=[Depends(rate_limiter_per_ip)])
|
470 |
async def chat_completions(request: ChatRequest, req: Request, api_key: str = Depends(get_api_key)):
|
@@ -481,40 +514,36 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
|
|
481 |
raise HTTPException(status_code=400, detail="Requested model is not available.")
|
482 |
|
483 |
# Process the request with actual message content, but don't log it
|
484 |
-
|
485 |
model=request.model,
|
486 |
messages=[{"role": msg.role, "content": msg.content} for msg in request.messages],
|
487 |
temperature=request.temperature,
|
488 |
-
max_tokens=request.max_tokens
|
489 |
-
websearch=request.websearch
|
490 |
)
|
491 |
|
492 |
if request.stream:
|
493 |
async def stream_response():
|
494 |
-
async for chunk in
|
495 |
-
if chunk
|
496 |
-
|
497 |
-
|
498 |
-
|
499 |
-
|
500 |
-
|
501 |
-
|
502 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
503 |
return StreamingResponse(stream_response(), media_type="text/event-stream")
|
504 |
else:
|
505 |
-
|
506 |
-
image_url = None
|
507 |
-
async for chunk in response_generator:
|
508 |
-
if chunk["type"] == "text":
|
509 |
-
full_response += chunk["content"]
|
510 |
-
elif chunk["type"] == "image":
|
511 |
-
image_url = chunk["url"]
|
512 |
-
elif chunk["type"] == "error":
|
513 |
-
raise HTTPException(status_code=500, detail=chunk["content"])
|
514 |
-
|
515 |
logger.info(f"Completed response generation for API key: {api_key} | IP: {client_ip}")
|
516 |
-
|
517 |
-
response = {
|
518 |
"id": f"chatcmpl-{uuid.uuid4()}",
|
519 |
"object": "chat.completion",
|
520 |
"created": int(datetime.now().timestamp()),
|
@@ -524,23 +553,17 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
|
|
524 |
"index": 0,
|
525 |
"message": {
|
526 |
"role": "assistant",
|
527 |
-
"content":
|
528 |
},
|
529 |
"finish_reason": "stop"
|
530 |
}
|
531 |
],
|
532 |
"usage": {
|
533 |
"prompt_tokens": sum(len(msg.content.split()) for msg in request.messages),
|
534 |
-
"completion_tokens": len(
|
535 |
-
"total_tokens": sum(len(msg.content.split()) for msg in request.messages) + len(
|
536 |
},
|
537 |
}
|
538 |
-
|
539 |
-
if image_url:
|
540 |
-
response["choices"][0]["message"]["image"] = image_url
|
541 |
-
|
542 |
-
return response
|
543 |
-
|
544 |
except ModelNotWorkingException as e:
|
545 |
logger.warning(f"Model not working: {e} | IP: {client_ip}")
|
546 |
raise HTTPException(status_code=503, detail=str(e))
|
@@ -584,4 +607,4 @@ async def http_exception_handler(request: Request, exc: HTTPException):
|
|
584 |
|
585 |
if __name__ == "__main__":
|
586 |
import uvicorn
|
587 |
-
uvicorn.run(app, host="0.0.0.0", port=8000)
|
|
|
13 |
|
14 |
from aiohttp import ClientSession, ClientResponseError
|
15 |
from fastapi import FastAPI, HTTPException, Request, Depends, Header
|
16 |
+
from fastapi.responses import JSONResponse, StreamingResponse
|
17 |
from pydantic import BaseModel
|
18 |
|
19 |
# Configure logging
|
|
|
39 |
CLEANUP_INTERVAL = 60 # seconds
|
40 |
RATE_LIMIT_WINDOW = 60 # seconds
|
41 |
|
42 |
+
class ImageResponse:
|
43 |
+
def __init__(self, images: Union[str, List[str]], alt: str = "Generated Image"):
|
44 |
+
if isinstance(images, str):
|
45 |
+
images = [images]
|
46 |
+
self.images = images
|
47 |
+
self.alt = alt
|
48 |
+
|
49 |
class Blackbox:
|
50 |
label = "Blackbox AI"
|
51 |
url = "https://www.blackbox.ai"
|
|
|
191 |
|
192 |
@staticmethod
|
193 |
def clean_response(text: str) -> str:
|
194 |
+
pattern = r'^\$\@\$v=undefined-rv1\$\@'
|
195 |
cleaned_text = re.sub(pattern, '', text)
|
196 |
return cleaned_text
|
197 |
|
198 |
@classmethod
|
199 |
+
async def create_async_generator(
|
200 |
cls,
|
201 |
model: str,
|
202 |
messages: List[Dict[str, str]],
|
203 |
proxy: Optional[str] = None,
|
204 |
websearch: bool = False,
|
205 |
**kwargs
|
206 |
+
) -> AsyncGenerator[Union[str, ImageResponse], None]:
|
207 |
+
"""
|
208 |
+
Creates an asynchronous generator for streaming responses from Blackbox AI.
|
209 |
+
|
210 |
+
Parameters:
|
211 |
+
model (str): Model to use for generating responses.
|
212 |
+
messages (List[Dict[str, str]]): Message history.
|
213 |
+
proxy (Optional[str]): Proxy URL, if needed.
|
214 |
+
websearch (bool): Enables or disables web search mode.
|
215 |
+
**kwargs: Additional keyword arguments.
|
216 |
+
|
217 |
+
Yields:
|
218 |
+
Union[str, ImageResponse]: Segments of the generated response or ImageResponse objects.
|
219 |
+
"""
|
220 |
model = cls.get_model(model)
|
221 |
+
|
222 |
chat_id = cls.generate_random_string()
|
223 |
next_action = cls.generate_next_action()
|
224 |
next_router_state_tree = cls.generate_next_router_state_tree()
|
|
|
323 |
match = re.search(r'!\[.*?\]\((https?://[^\)]+)\)', cleaned_response)
|
324 |
if match:
|
325 |
image_url = match.group(1)
|
326 |
+
image_response = ImageResponse(images=image_url, alt="Generated Image")
|
327 |
+
yield image_response
|
328 |
else:
|
329 |
+
yield cleaned_response
|
330 |
else:
|
331 |
if websearch:
|
332 |
match = re.search(r'\$~~~\$(.*?)\$~~~\$', cleaned_response, re.DOTALL)
|
|
|
347 |
else:
|
348 |
final_response = cleaned_response
|
349 |
else:
|
350 |
+
if '$~~~' in cleaned_response:
|
351 |
+
final_response = cleaned_response.split('$~~~')[0].strip()
|
352 |
else:
|
353 |
final_response = cleaned_response
|
354 |
|
355 |
+
yield final_response
|
356 |
except ClientResponseError as e:
|
357 |
error_text = f"Error {e.status}: {e.message}"
|
358 |
try:
|
|
|
361 |
error_text += f" - {cleaned_error}"
|
362 |
except Exception:
|
363 |
pass
|
364 |
+
yield error_text
|
365 |
except Exception as e:
|
366 |
+
yield f"Unexpected error during /api/chat request: {str(e)}"
|
367 |
|
368 |
+
# Not clear what to do with this second request; keeping it for compatibility
|
369 |
chat_url = f'{cls.url}/chat/{chat_id}?model={model}'
|
370 |
|
371 |
try:
|
|
|
385 |
error_text += f" - {cleaned_error}"
|
386 |
except Exception:
|
387 |
pass
|
388 |
+
yield error_text
|
389 |
except Exception as e:
|
390 |
+
yield f"Unexpected error during /chat/{chat_id} request: {str(e)}"
|
391 |
|
392 |
# Custom exception for model not working
|
393 |
class ModelNotWorkingException(Exception):
|
|
|
478 |
class ChatRequest(BaseModel):
|
479 |
model: str
|
480 |
messages: List[Message]
|
481 |
+
stream: Optional[bool] = False # Added for streaming support
|
482 |
temperature: Optional[float] = 1.0
|
483 |
top_p: Optional[float] = 1.0
|
484 |
n: Optional[int] = 1
|
|
|
487 |
frequency_penalty: Optional[float] = 0.0
|
488 |
logit_bias: Optional[Dict[str, float]] = None
|
489 |
user: Optional[str] = None
|
490 |
+
|
491 |
+
# Helper function to collect responses from async generator
|
492 |
+
async def collect_response_content(generator: AsyncGenerator[Union[str, ImageResponse], None]) -> str:
|
493 |
+
response_content = ''
|
494 |
+
async for chunk in generator:
|
495 |
+
if isinstance(chunk, str):
|
496 |
+
response_content += chunk
|
497 |
+
elif isinstance(chunk, ImageResponse):
|
498 |
+
# Handle image response if needed
|
499 |
+
response_content += f"[Image: {chunk.alt}] {', '.join(chunk.images)}\n"
|
500 |
+
return response_content
|
501 |
|
502 |
@app.post("/v1/chat/completions", dependencies=[Depends(rate_limiter_per_ip)])
|
503 |
async def chat_completions(request: ChatRequest, req: Request, api_key: str = Depends(get_api_key)):
|
|
|
514 |
raise HTTPException(status_code=400, detail="Requested model is not available.")
|
515 |
|
516 |
# Process the request with actual message content, but don't log it
|
517 |
+
generator = Blackbox.create_async_generator(
|
518 |
model=request.model,
|
519 |
messages=[{"role": msg.role, "content": msg.content} for msg in request.messages],
|
520 |
temperature=request.temperature,
|
521 |
+
max_tokens=request.max_tokens
|
|
|
522 |
)
|
523 |
|
524 |
if request.stream:
|
525 |
async def stream_response():
|
526 |
+
async for chunk in generator:
|
527 |
+
if isinstance(chunk, str):
|
528 |
+
data = json.dumps({
|
529 |
+
"choices": [{
|
530 |
+
"delta": {"content": chunk},
|
531 |
+
"index": 0,
|
532 |
+
"finish_reason": None
|
533 |
+
}],
|
534 |
+
"model": request.model,
|
535 |
+
"id": f"chatcmpl-{uuid.uuid4()}",
|
536 |
+
"object": "chat.completion.chunk",
|
537 |
+
"created": int(datetime.now().timestamp()),
|
538 |
+
})
|
539 |
+
yield f"data: {data}\n\n"
|
540 |
+
logger.info(f"Streaming response enabled for API key: {api_key} | IP: {client_ip}")
|
541 |
+
|
542 |
return StreamingResponse(stream_response(), media_type="text/event-stream")
|
543 |
else:
|
544 |
+
response_content = await collect_response_content(generator)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
545 |
logger.info(f"Completed response generation for API key: {api_key} | IP: {client_ip}")
|
546 |
+
return {
|
|
|
547 |
"id": f"chatcmpl-{uuid.uuid4()}",
|
548 |
"object": "chat.completion",
|
549 |
"created": int(datetime.now().timestamp()),
|
|
|
553 |
"index": 0,
|
554 |
"message": {
|
555 |
"role": "assistant",
|
556 |
+
"content": response_content
|
557 |
},
|
558 |
"finish_reason": "stop"
|
559 |
}
|
560 |
],
|
561 |
"usage": {
|
562 |
"prompt_tokens": sum(len(msg.content.split()) for msg in request.messages),
|
563 |
+
"completion_tokens": len(response_content.split()),
|
564 |
+
"total_tokens": sum(len(msg.content.split()) for msg in request.messages) + len(response_content.split())
|
565 |
},
|
566 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
567 |
except ModelNotWorkingException as e:
|
568 |
logger.warning(f"Model not working: {e} | IP: {client_ip}")
|
569 |
raise HTTPException(status_code=503, detail=str(e))
|
|
|
607 |
|
608 |
if __name__ == "__main__":
|
609 |
import uvicorn
|
610 |
+
uvicorn.run(app, host="0.0.0.0", port=8000)
|