Update main.py
Browse files
main.py
CHANGED
@@ -11,7 +11,6 @@ import asyncio
|
|
11 |
import time
|
12 |
from collections import defaultdict
|
13 |
from typing import List, Dict, Any, Optional, Union, AsyncGenerator
|
14 |
-
from datetime import datetime # Essential for timestamping
|
15 |
|
16 |
from aiohttp import ClientSession, ClientResponseError
|
17 |
from fastapi import FastAPI, HTTPException, Request, Depends, Header
|
@@ -41,18 +40,13 @@ rate_limit_store = defaultdict(lambda: {"count": 0, "timestamp": time.time()})
|
|
41 |
CLEANUP_INTERVAL = 60 # seconds
|
42 |
RATE_LIMIT_WINDOW = 60 # seconds
|
43 |
|
44 |
-
# Define ImageResponse
|
45 |
class ImageResponse(BaseModel):
|
46 |
images: str
|
47 |
alt: str
|
48 |
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
class ProviderModelMixin:
|
53 |
-
pass
|
54 |
-
|
55 |
-
class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
56 |
label = "Blackbox AI"
|
57 |
url = "https://www.blackbox.ai"
|
58 |
api_endpoint = "https://www.blackbox.ai/api/chat"
|
@@ -147,7 +141,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
|
147 |
"blackboxai": "/?model=blackboxai",
|
148 |
"gpt-4o": "/?model=gpt-4o",
|
149 |
"gemini-pro": "/?model=gemini-pro",
|
150 |
-
"claude-sonnet-3.5": "/?model=claude-sonnet-3.5"
|
151 |
}
|
152 |
|
153 |
model_aliases = {
|
@@ -201,115 +195,6 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
|
201 |
cleaned_text = re.sub(pattern, '', text)
|
202 |
return cleaned_text
|
203 |
|
204 |
-
@classmethod
|
205 |
-
async def generate_response(
|
206 |
-
cls,
|
207 |
-
model: str,
|
208 |
-
messages: List[Dict[str, str]],
|
209 |
-
proxy: Optional[str] = None,
|
210 |
-
**kwargs
|
211 |
-
) -> str:
|
212 |
-
model = cls.get_model(model)
|
213 |
-
chat_id = cls.generate_random_string()
|
214 |
-
next_action = cls.generate_next_action()
|
215 |
-
next_router_state_tree = cls.generate_next_router_state_tree()
|
216 |
-
|
217 |
-
agent_mode = cls.agentMode.get(model, {})
|
218 |
-
trending_agent_mode = cls.trendingAgentMode.get(model, {})
|
219 |
-
|
220 |
-
prefix = cls.model_prefixes.get(model, "")
|
221 |
-
|
222 |
-
formatted_prompt = ""
|
223 |
-
for message in messages:
|
224 |
-
role = message.get('role', '').capitalize()
|
225 |
-
content = message.get('content', '')
|
226 |
-
if role and content:
|
227 |
-
formatted_prompt += f"{role}: {content}\n"
|
228 |
-
|
229 |
-
if prefix:
|
230 |
-
formatted_prompt = f"{prefix} {formatted_prompt}".strip()
|
231 |
-
|
232 |
-
referer_path = cls.model_referers.get(model, f"/?model={model}")
|
233 |
-
referer_url = f"{cls.url}{referer_path}"
|
234 |
-
|
235 |
-
common_headers = {
|
236 |
-
'accept': '*/*',
|
237 |
-
'accept-language': 'en-US,en;q=0.9',
|
238 |
-
'cache-control': 'no-cache',
|
239 |
-
'origin': cls.url,
|
240 |
-
'pragma': 'no-cache',
|
241 |
-
'priority': 'u=1, i',
|
242 |
-
'sec-ch-ua': '"Chromium";v="129", "Not=A?Brand";v="8"',
|
243 |
-
'sec-ch-ua-mobile': '?0',
|
244 |
-
'sec-ch-ua-platform': '"Linux"',
|
245 |
-
'sec-fetch-dest': 'empty',
|
246 |
-
'sec-fetch-mode': 'cors',
|
247 |
-
'sec-fetch-site': 'same-origin',
|
248 |
-
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) '
|
249 |
-
'AppleWebKit/537.36 (KHTML, like Gecko) '
|
250 |
-
'Chrome/129.0.0.0 Safari/537.36'
|
251 |
-
}
|
252 |
-
|
253 |
-
headers_api_chat = {
|
254 |
-
'Content-Type': 'application/json',
|
255 |
-
'Referer': referer_url
|
256 |
-
}
|
257 |
-
headers_api_chat_combined = {**common_headers, **headers_api_chat}
|
258 |
-
|
259 |
-
payload_api_chat = {
|
260 |
-
"messages": [
|
261 |
-
{
|
262 |
-
"id": chat_id,
|
263 |
-
"content": formatted_prompt,
|
264 |
-
"role": "user"
|
265 |
-
}
|
266 |
-
],
|
267 |
-
"id": chat_id,
|
268 |
-
"previewToken": None,
|
269 |
-
"userId": None,
|
270 |
-
"codeModelMode": True,
|
271 |
-
"agentMode": agent_mode,
|
272 |
-
"trendingAgentMode": trending_agent_mode,
|
273 |
-
"isMicMode": False,
|
274 |
-
"userSystemPrompt": None,
|
275 |
-
"maxTokens": 1024,
|
276 |
-
"playgroundTopP": 0.9,
|
277 |
-
"playgroundTemperature": 0.5,
|
278 |
-
"isChromeExt": False,
|
279 |
-
"githubToken": None,
|
280 |
-
"clickedAnswer2": False,
|
281 |
-
"clickedAnswer3": False,
|
282 |
-
"clickedForceWebSearch": False,
|
283 |
-
"visitFromDelta": False,
|
284 |
-
"mobileClient": False,
|
285 |
-
"webSearchMode": False,
|
286 |
-
"userSelectedModel": cls.userSelectedModel.get(model, model)
|
287 |
-
}
|
288 |
-
|
289 |
-
async with ClientSession(headers=common_headers) as session:
|
290 |
-
try:
|
291 |
-
async with session.post(
|
292 |
-
cls.api_endpoint,
|
293 |
-
headers=headers_api_chat_combined,
|
294 |
-
json=payload_api_chat,
|
295 |
-
proxy=proxy
|
296 |
-
) as response_api_chat:
|
297 |
-
response_api_chat.raise_for_status()
|
298 |
-
text = await response_api_chat.text()
|
299 |
-
cleaned_response = cls.clean_response(text)
|
300 |
-
return cleaned_response
|
301 |
-
except ClientResponseError as e:
|
302 |
-
error_text = f"Error {e.status}: {e.message}"
|
303 |
-
try:
|
304 |
-
error_response = await e.response.text()
|
305 |
-
cleaned_error = cls.clean_response(error_response)
|
306 |
-
error_text += f" - {cleaned_error}"
|
307 |
-
except Exception:
|
308 |
-
pass
|
309 |
-
return error_text
|
310 |
-
except Exception as e:
|
311 |
-
return f"Unexpected error during /api/chat request: {str(e)}"
|
312 |
-
|
313 |
@classmethod
|
314 |
async def create_async_generator(
|
315 |
cls,
|
@@ -332,7 +217,6 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
|
332 |
Yields:
|
333 |
Union[str, ImageResponse]: Segments of the generated response or ImageResponse objects.
|
334 |
"""
|
335 |
-
logger.debug("Starting async generator for model: %s", model)
|
336 |
model = cls.get_model(model)
|
337 |
|
338 |
chat_id = cls.generate_random_string()
|
@@ -425,7 +309,6 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
|
425 |
|
426 |
async with ClientSession(headers=common_headers) as session:
|
427 |
try:
|
428 |
-
logger.debug("Sending POST request to Blackbox API at %s", cls.api_endpoint)
|
429 |
async with session.post(
|
430 |
cls.api_endpoint,
|
431 |
headers=headers_api_chat_combined,
|
@@ -435,19 +318,15 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
|
435 |
response_api_chat.raise_for_status()
|
436 |
text = await response_api_chat.text()
|
437 |
cleaned_response = cls.clean_response(text)
|
438 |
-
logger.debug("Received response from Blackbox API: %s", cleaned_response)
|
439 |
-
|
440 |
-
# Test yield to verify streaming works
|
441 |
-
yield "Streaming response started...\n"
|
442 |
|
443 |
if model in cls.image_models:
|
444 |
match = re.search(r'!\[.*?\]\((https?://[^\)]+)\)', cleaned_response)
|
445 |
if match:
|
446 |
image_url = match.group(1)
|
447 |
image_response = ImageResponse(images=image_url, alt="Generated Image")
|
448 |
-
yield image_response
|
449 |
else:
|
450 |
-
yield cleaned_response
|
451 |
else:
|
452 |
if websearch:
|
453 |
match = re.search(r'\$~~~\$(.*?)\$~~~\$', cleaned_response, re.DOTALL)
|
@@ -473,29 +352,22 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
|
473 |
else:
|
474 |
final_response = cleaned_response
|
475 |
|
476 |
-
yield final_response
|
477 |
-
|
478 |
except ClientResponseError as e:
|
479 |
error_text = f"Error {e.status}: {e.message}"
|
480 |
-
logger.error("ClientResponseError: %s", error_text)
|
481 |
try:
|
482 |
error_response = await e.response.text()
|
483 |
cleaned_error = cls.clean_response(error_response)
|
484 |
error_text += f" - {cleaned_error}"
|
485 |
except Exception:
|
486 |
pass
|
487 |
-
yield
|
488 |
except Exception as e:
|
489 |
-
|
490 |
-
logger.error("Exception: %s", error_text)
|
491 |
-
yield f"{error_text}\n"
|
492 |
-
|
493 |
-
# Test yield after API call
|
494 |
-
yield "Streaming response ended.\n"
|
495 |
|
496 |
chat_url = f'{cls.url}/chat/{chat_id}?model={model}'
|
|
|
497 |
try:
|
498 |
-
logger.debug("Sending POST request to Chat URL: %s", chat_url)
|
499 |
async with session.post(
|
500 |
chat_url,
|
501 |
headers=headers_chat_combined,
|
@@ -503,15 +375,18 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
|
503 |
proxy=proxy
|
504 |
) as response_chat:
|
505 |
response_chat.raise_for_status()
|
506 |
-
|
507 |
except ClientResponseError as e:
|
508 |
error_text = f"Error {e.status}: {e.message}"
|
509 |
-
|
510 |
-
|
|
|
|
|
|
|
|
|
|
|
511 |
except Exception as e:
|
512 |
-
|
513 |
-
logger.error("Exception on chat POST: %s", error_text)
|
514 |
-
yield f"{error_text}\n"
|
515 |
|
516 |
# Custom exception for model not working
|
517 |
class ModelNotWorkingException(Exception):
|
@@ -612,7 +487,11 @@ class ChatRequest(BaseModel):
|
|
612 |
user: Optional[str] = None
|
613 |
|
614 |
@app.post("/v1/chat/completions", dependencies=[Depends(rate_limiter_per_ip)])
|
615 |
-
async def chat_completions(
|
|
|
|
|
|
|
|
|
616 |
client_ip = req.client.host
|
617 |
# Redact user messages only for logging purposes
|
618 |
redacted_messages = [{"role": msg.role, "content": "[redacted]"} for msg in request.messages]
|
@@ -625,16 +504,42 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
|
|
625 |
logger.warning(f"Attempt to use unavailable model: {request.model} from IP: {client_ip}")
|
626 |
raise HTTPException(status_code=400, detail="Requested model is not available.")
|
627 |
|
628 |
-
# Create
|
629 |
-
|
630 |
model=request.model,
|
631 |
messages=[{"role": msg.role, "content": msg.content} for msg in request.messages],
|
632 |
-
|
633 |
-
|
634 |
)
|
635 |
|
636 |
logger.info(f"Started streaming response for API key: {api_key} | IP: {client_ip}")
|
637 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
638 |
|
639 |
except ModelNotWorkingException as e:
|
640 |
logger.warning(f"Model not working: {e} | IP: {client_ip}")
|
|
|
11 |
import time
|
12 |
from collections import defaultdict
|
13 |
from typing import List, Dict, Any, Optional, Union, AsyncGenerator
|
|
|
14 |
|
15 |
from aiohttp import ClientSession, ClientResponseError
|
16 |
from fastapi import FastAPI, HTTPException, Request, Depends, Header
|
|
|
40 |
CLEANUP_INTERVAL = 60 # seconds
|
41 |
RATE_LIMIT_WINDOW = 60 # seconds
|
42 |
|
43 |
+
# Define the ImageResponse model
|
44 |
class ImageResponse(BaseModel):
|
45 |
images: str
|
46 |
alt: str
|
47 |
|
48 |
+
# Updated Blackbox Provider
|
49 |
+
class Blackbox:
|
|
|
|
|
|
|
|
|
|
|
50 |
label = "Blackbox AI"
|
51 |
url = "https://www.blackbox.ai"
|
52 |
api_endpoint = "https://www.blackbox.ai/api/chat"
|
|
|
141 |
"blackboxai": "/?model=blackboxai",
|
142 |
"gpt-4o": "/?model=gpt-4o",
|
143 |
"gemini-pro": "/?model=gemini-pro",
|
144 |
+
"claude-sonnet-3.5": "/?model=claude-sonnet-3.5"
|
145 |
}
|
146 |
|
147 |
model_aliases = {
|
|
|
195 |
cleaned_text = re.sub(pattern, '', text)
|
196 |
return cleaned_text
|
197 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
198 |
@classmethod
|
199 |
async def create_async_generator(
|
200 |
cls,
|
|
|
217 |
Yields:
|
218 |
Union[str, ImageResponse]: Segments of the generated response or ImageResponse objects.
|
219 |
"""
|
|
|
220 |
model = cls.get_model(model)
|
221 |
|
222 |
chat_id = cls.generate_random_string()
|
|
|
309 |
|
310 |
async with ClientSession(headers=common_headers) as session:
|
311 |
try:
|
|
|
312 |
async with session.post(
|
313 |
cls.api_endpoint,
|
314 |
headers=headers_api_chat_combined,
|
|
|
318 |
response_api_chat.raise_for_status()
|
319 |
text = await response_api_chat.text()
|
320 |
cleaned_response = cls.clean_response(text)
|
|
|
|
|
|
|
|
|
321 |
|
322 |
if model in cls.image_models:
|
323 |
match = re.search(r'!\[.*?\]\((https?://[^\)]+)\)', cleaned_response)
|
324 |
if match:
|
325 |
image_url = match.group(1)
|
326 |
image_response = ImageResponse(images=image_url, alt="Generated Image")
|
327 |
+
yield image_response
|
328 |
else:
|
329 |
+
yield cleaned_response
|
330 |
else:
|
331 |
if websearch:
|
332 |
match = re.search(r'\$~~~\$(.*?)\$~~~\$', cleaned_response, re.DOTALL)
|
|
|
352 |
else:
|
353 |
final_response = cleaned_response
|
354 |
|
355 |
+
yield final_response
|
|
|
356 |
except ClientResponseError as e:
|
357 |
error_text = f"Error {e.status}: {e.message}"
|
|
|
358 |
try:
|
359 |
error_response = await e.response.text()
|
360 |
cleaned_error = cls.clean_response(error_response)
|
361 |
error_text += f" - {cleaned_error}"
|
362 |
except Exception:
|
363 |
pass
|
364 |
+
yield error_text
|
365 |
except Exception as e:
|
366 |
+
yield f"Unexpected error during /api/chat request: {str(e)}"
|
|
|
|
|
|
|
|
|
|
|
367 |
|
368 |
chat_url = f'{cls.url}/chat/{chat_id}?model={model}'
|
369 |
+
|
370 |
try:
|
|
|
371 |
async with session.post(
|
372 |
chat_url,
|
373 |
headers=headers_chat_combined,
|
|
|
375 |
proxy=proxy
|
376 |
) as response_chat:
|
377 |
response_chat.raise_for_status()
|
378 |
+
pass
|
379 |
except ClientResponseError as e:
|
380 |
error_text = f"Error {e.status}: {e.message}"
|
381 |
+
try:
|
382 |
+
error_response = await e.response.text()
|
383 |
+
cleaned_error = cls.clean_response(error_response)
|
384 |
+
error_text += f" - {cleaned_error}"
|
385 |
+
except Exception:
|
386 |
+
pass
|
387 |
+
yield error_text
|
388 |
except Exception as e:
|
389 |
+
yield f"Unexpected error during /chat/{chat_id} request: {str(e)}"
|
|
|
|
|
390 |
|
391 |
# Custom exception for model not working
|
392 |
class ModelNotWorkingException(Exception):
|
|
|
487 |
user: Optional[str] = None
|
488 |
|
489 |
@app.post("/v1/chat/completions", dependencies=[Depends(rate_limiter_per_ip)])
|
490 |
+
async def chat_completions(
|
491 |
+
request: ChatRequest,
|
492 |
+
req: Request,
|
493 |
+
api_key: str = Depends(get_api_key)
|
494 |
+
):
|
495 |
client_ip = req.client.host
|
496 |
# Redact user messages only for logging purposes
|
497 |
redacted_messages = [{"role": msg.role, "content": "[redacted]"} for msg in request.messages]
|
|
|
504 |
logger.warning(f"Attempt to use unavailable model: {request.model} from IP: {client_ip}")
|
505 |
raise HTTPException(status_code=400, detail="Requested model is not available.")
|
506 |
|
507 |
+
# Create the asynchronous generator for streaming responses
|
508 |
+
async_generator = Blackbox.create_async_generator(
|
509 |
model=request.model,
|
510 |
messages=[{"role": msg.role, "content": msg.content} for msg in request.messages],
|
511 |
+
temperature=request.temperature,
|
512 |
+
max_tokens=request.max_tokens
|
513 |
)
|
514 |
|
515 |
logger.info(f"Started streaming response for API key: {api_key} | IP: {client_ip}")
|
516 |
+
|
517 |
+
# Define a generator function to yield the streamed data in the OpenAI format
|
518 |
+
async def stream_response():
|
519 |
+
async for chunk in async_generator:
|
520 |
+
if isinstance(chunk, ImageResponse):
|
521 |
+
# Handle image responses if necessary
|
522 |
+
# For simplicity, converting ImageResponse to a string representation
|
523 |
+
# You might want to handle it differently based on your requirements
|
524 |
+
data = json.dumps({
|
525 |
+
"choices": [{
|
526 |
+
"delta": {
|
527 |
+
"content": f""
|
528 |
+
}
|
529 |
+
}]
|
530 |
+
})
|
531 |
+
else:
|
532 |
+
# Assuming chunk is a string
|
533 |
+
data = json.dumps({
|
534 |
+
"choices": [{
|
535 |
+
"delta": {
|
536 |
+
"content": chunk
|
537 |
+
}
|
538 |
+
}]
|
539 |
+
})
|
540 |
+
yield f"data: {data}\n\n"
|
541 |
+
|
542 |
+
return StreamingResponse(stream_response(), media_type="text/event-stream")
|
543 |
|
544 |
except ModelNotWorkingException as e:
|
545 |
logger.warning(f"Model not working: {e} | IP: {client_ip}")
|