import os import re import random import string import uuid import json import logging import asyncio import time from collections import defaultdict from typing import List, Dict, Any, Optional, AsyncGenerator, Union from datetime import datetime from aiohttp import ClientSession, ClientTimeout, ClientError from fastapi import FastAPI, HTTPException, Request, Depends, Header from fastapi.responses import StreamingResponse, JSONResponse, RedirectResponse from pydantic import BaseModel # Configure logging logging.basicConfig( level=logging.INFO, format="%(asctime)s [%(levelname)s] %(name)s: %(message)s", handlers=[logging.StreamHandler()] ) logger = logging.getLogger(__name__) # Load environment variables API_KEYS = os.getenv('API_KEYS', '').split(',') # Comma-separated API keys RATE_LIMIT = int(os.getenv('RATE_LIMIT', '60')) # Requests per minute AVAILABLE_MODELS = os.getenv('AVAILABLE_MODELS', '') # Comma-separated available models if not API_KEYS or API_KEYS == ['']: logger.error("No API keys found. Please set the API_KEYS environment variable.") raise Exception("API_KEYS environment variable not set.") # Process available models if AVAILABLE_MODELS: AVAILABLE_MODELS = [model.strip() for model in AVAILABLE_MODELS.split(',') if model.strip()] else: AVAILABLE_MODELS = [] # If empty, all models are available # Simple in-memory rate limiter rate_limit_store = defaultdict(lambda: {"count": 0, "timestamp": time.time()}) ip_rate_limit_store = defaultdict(lambda: {"count": 0, "timestamp": time.time()}) # Define cleanup interval and window CLEANUP_INTERVAL = 60 # seconds RATE_LIMIT_WINDOW = 60 # seconds async def cleanup_rate_limit_stores(): while True: current_time = time.time() # Clean API key rate limit store keys_to_delete = [key for key, value in rate_limit_store.items() if current_time - value["timestamp"] > RATE_LIMIT_WINDOW * 2] for key in keys_to_delete: del rate_limit_store[key] logger.debug(f"Cleaned up rate_limit_store for API key: {key}") # Clean IP rate limit store ips_to_delete = [ip for ip, value in ip_rate_limit_store.items() if current_time - value["timestamp"] > RATE_LIMIT_WINDOW * 2] for ip in ips_to_delete: del ip_rate_limit_store[ip] logger.debug(f"Cleaned up ip_rate_limit_store for IP: {ip}") await asyncio.sleep(CLEANUP_INTERVAL) async def get_api_key(request: Request, authorization: str = Header(None)) -> str: client_ip = request.client.host if authorization is None or not authorization.startswith('Bearer '): logger.warning(f"Invalid or missing authorization header from IP: {client_ip}") raise HTTPException(status_code=401, detail='Invalid authorization header format') api_key = authorization[7:] if api_key not in API_KEYS: logger.warning(f"Invalid API key attempted: {api_key} from IP: {client_ip}") raise HTTPException(status_code=401, detail='Invalid API key') return api_key async def rate_limiter(request: Request, api_key: str = Depends(get_api_key)): client_ip = request.client.host current_time = time.time() # Rate limiting per API key window_start = rate_limit_store[api_key]["timestamp"] if current_time - window_start > RATE_LIMIT_WINDOW: rate_limit_store[api_key] = {"count": 1, "timestamp": current_time} else: if rate_limit_store[api_key]["count"] >= RATE_LIMIT: logger.warning(f"Rate limit exceeded for API key: {api_key} from IP: {client_ip}") raise HTTPException(status_code=429, detail='Rate limit exceeded for API key') rate_limit_store[api_key]["count"] += 1 # Rate limiting per IP address window_start_ip = ip_rate_limit_store[client_ip]["timestamp"] if current_time - window_start_ip > RATE_LIMIT_WINDOW: ip_rate_limit_store[client_ip] = {"count": 1, "timestamp": current_time} else: if ip_rate_limit_store[client_ip]["count"] >= RATE_LIMIT: logger.warning(f"Rate limit exceeded for IP address: {client_ip}") raise HTTPException(status_code=429, detail='Rate limit exceeded for IP address') ip_rate_limit_store[client_ip]["count"] += 1 # Custom exception for model not working class ModelNotWorkingException(Exception): def __init__(self, model: str): self.model = model self.message = f"The model '{model}' is currently not working. Please try another model or wait for it to be fixed." super().__init__(self.message) # Mock implementations for ImageResponse and to_data_uri class ImageResponse: def __init__(self, url: str, alt: str): self.url = url self.alt = alt def to_data_uri(image: Any) -> str: return "data:image/png;base64,..." # Replace with actual base64 data class Blackbox: url = "https://www.blackbox.ai" api_endpoint = "https://www.blackbox.ai/api/chat" working = True supports_stream = True supports_system_message = True supports_message_history = True default_model = 'blackboxai' image_models = ['ImageGeneration'] models = [ default_model, 'blackboxai-pro', "llama-3.1-8b", 'llama-3.1-70b', 'llama-3.1-405b', 'gpt-4o', 'gemini-pro', 'gemini-1.5-flash', 'claude-sonnet-3.5', 'PythonAgent', 'JavaAgent', 'JavaScriptAgent', 'HTMLAgent', 'GoogleCloudAgent', 'AndroidDeveloper', 'SwiftDeveloper', 'Next.jsAgent', 'MongoDBAgent', 'PyTorchAgent', 'ReactAgent', 'XcodeAgent', 'AngularJSAgent', *image_models, 'Niansuh', ] # Filter models based on AVAILABLE_MODELS if AVAILABLE_MODELS: models = [model for model in models if model in AVAILABLE_MODELS] agentMode = { 'ImageGeneration': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"}, 'Niansuh': {'mode': True, 'id': "NiansuhAIk1HgESy", 'name': "Niansuh"}, } trendingAgentMode = { "blackboxai": {}, "gemini-1.5-flash": {'mode': True, 'id': 'Gemini'}, "llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"}, 'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"}, 'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405b"}, 'blackboxai-pro': {'mode': True, 'id': "BLACKBOXAI-PRO"}, 'PythonAgent': {'mode': True, 'id': "Python Agent"}, 'JavaAgent': {'mode': True, 'id': "Java Agent"}, 'JavaScriptAgent': {'mode': True, 'id': "JavaScript Agent"}, 'HTMLAgent': {'mode': True, 'id': "HTML Agent"}, 'GoogleCloudAgent': {'mode': True, 'id': "Google Cloud Agent"}, 'AndroidDeveloper': {'mode': True, 'id': "Android Developer"}, 'SwiftDeveloper': {'mode': True, 'id': "Swift Developer"}, 'Next.jsAgent': {'mode': True, 'id': "Next.js Agent"}, 'MongoDBAgent': {'mode': True, 'id': "MongoDB Agent"}, 'PyTorchAgent': {'mode': True, 'id': "PyTorch Agent"}, 'ReactAgent': {'mode': True, 'id': "React Agent"}, 'XcodeAgent': {'mode': True, 'id': "Xcode Agent"}, 'AngularJSAgent': {'mode': True, 'id': "AngularJS Agent"}, } userSelectedModel = { "gpt-4o": "gpt-4o", "gemini-pro": "gemini-pro", 'claude-sonnet-3.5': "claude-sonnet-3.5", } model_prefixes = { 'gpt-4o': '@GPT-4o', 'gemini-pro': '@Gemini-PRO', 'claude-sonnet-3.5': '@Claude-Sonnet-3.5', 'PythonAgent': '@Python Agent', 'JavaAgent': '@Java Agent', 'JavaScriptAgent': '@JavaScript Agent', 'HTMLAgent': '@HTML Agent', 'GoogleCloudAgent': '@Google Cloud Agent', 'AndroidDeveloper': '@Android Developer', 'SwiftDeveloper': '@Swift Developer', 'Next.jsAgent': '@Next.js Agent', 'MongoDBAgent': '@MongoDB Agent', 'PyTorchAgent': '@PyTorch Agent', 'ReactAgent': '@React Agent', 'XcodeAgent': '@Xcode Agent', 'AngularJSAgent': '@AngularJS Agent', 'blackboxai-pro': '@BLACKBOXAI-PRO', 'ImageGeneration': '@Image Generation', 'Niansuh': '@Niansuh', } model_referers = { "blackboxai": f"{url}/?model=blackboxai", "gpt-4o": f"{url}/?model=gpt-4o", "gemini-pro": f"{url}/?model=gemini-pro", "claude-sonnet-3.5": f"{url}/?model=claude-sonnet-3.5" } model_aliases = { "gemini-flash": "gemini-1.5-flash", "claude-3.5-sonnet": "claude-sonnet-3.5", "flux": "ImageGeneration", "niansuh": "Niansuh", } @classmethod def get_model(cls, model: str) -> Optional[str]: if model in cls.models: return model elif model in cls.userSelectedModel and cls.userSelectedModel[model] in cls.models: return model elif model in cls.model_aliases and cls.model_aliases[model] in cls.models: return cls.model_aliases[model] else: return cls.default_model if cls.default_model in cls.models else None @classmethod async def create_async_generator( cls, model: str, messages: List[Dict[str, str]], proxy: Optional[str] = None, image: Any = None, image_name: Optional[str] = None, webSearchMode: bool = False, **kwargs ) -> AsyncGenerator[Any, None]: model = cls.get_model(model) if model is None: logger.error(f"Model {model} is not available.") raise ModelNotWorkingException(model) logger.info(f"Selected model: {model}") if not cls.working or model not in cls.models: logger.error(f"Model {model} is not working or not supported.") raise ModelNotWorkingException(model) headers = { "accept": "*/*", "accept-language": "en-US,en;q=0.9", "cache-control": "no-cache", "content-type": "application/json", "origin": cls.url, "pragma": "no-cache", "priority": "u=1, i", "referer": cls.model_referers.get(model, cls.url), "sec-ch-ua": '"Chromium";v="129", "Not=A?Brand";v="8"', "sec-ch-ua-mobile": "?0", "sec-ch-ua-platform": '"Linux"', "sec-fetch-dest": "empty", "sec-fetch-mode": "cors", "sec-fetch-site": "same-origin", "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36", } if model in cls.model_prefixes: prefix = cls.model_prefixes[model] if not messages[0]['content'].startswith(prefix): logger.debug(f"Adding prefix '{prefix}' to the first message.") messages[0]['content'] = f"{prefix} {messages[0]['content']}" random_id = ''.join(random.choices(string.ascii_letters + string.digits, k=7)) messages[-1]['id'] = random_id messages[-1]['role'] = 'user' # Don't log the full message content for privacy logger.debug(f"Generated message ID: {random_id} for model: {model}") if image is not None: messages[-1]['data'] = { 'fileText': '', 'imageBase64': to_data_uri(image), 'title': image_name } messages[-1]['content'] = 'FILE:BB\n$#$\n\n$#$\n' + messages[-1]['content'] logger.debug("Image data added to the message.") data = { "messages": messages, "id": random_id, "previewToken": None, "userId": None, "codeModelMode": True, "agentMode": {}, "trendingAgentMode": {}, "isMicMode": False, "userSystemPrompt": None, "maxTokens": 99999999, "playgroundTopP": 0.9, "playgroundTemperature": 0.5, "isChromeExt": False, "githubToken": None, "clickedAnswer2": False, "clickedAnswer3": False, "clickedForceWebSearch": False, "visitFromDelta": False, "mobileClient": False, "userSelectedModel": None, "webSearchMode": webSearchMode, } if model in cls.agentMode: data["agentMode"] = cls.agentMode[model] elif model in cls.trendingAgentMode: data["trendingAgentMode"] = cls.trendingAgentMode[model] elif model in cls.userSelectedModel: data["userSelectedModel"] = cls.userSelectedModel[model] logger.info(f"Sending request to {cls.api_endpoint} with data (excluding messages).") timeout = ClientTimeout(total=60) # Set an appropriate timeout retry_attempts = 10 # Set the number of retry attempts for attempt in range(retry_attempts): try: async with ClientSession(headers=headers, timeout=timeout) as session: async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: response.raise_for_status() logger.info(f"Received response with status {response.status}") if model == 'ImageGeneration': response_text = await response.text() url_match = re.search(r'https://storage\.googleapis\.com/[^\s\)]+', response_text) if url_match: image_url = url_match.group(0) logger.info(f"Image URL found.") yield ImageResponse(image_url, alt=messages[-1]['content']) else: logger.error("Image URL not found in the response.") raise Exception("Image URL not found in the response") else: full_response = "" search_results_json = "" try: async for chunk, _ in response.content.iter_chunks(): if chunk: decoded_chunk = chunk.decode(errors='ignore') decoded_chunk = re.sub(r'\$@\$v=[^$]+\$@\$', '', decoded_chunk) if decoded_chunk.strip(): if '$~~~$' in decoded_chunk: search_results_json += decoded_chunk else: full_response += decoded_chunk yield decoded_chunk logger.info("Finished streaming response chunks.") except Exception as e: logger.exception("Error while iterating over response chunks.") raise e if data["webSearchMode"] and search_results_json: match = re.search(r'\$~~~\$(.*?)\$~~~\$', search_results_json, re.DOTALL) if match: try: search_results = json.loads(match.group(1)) formatted_results = "\n\n**Sources:**\n" for i, result in enumerate(search_results[:5], 1): formatted_results += f"{i}. [{result['title']}]({result['link']})\n" logger.info("Formatted search results.") yield formatted_results except json.JSONDecodeError as je: logger.error("Failed to parse search results JSON.") raise je break # Exit the retry loop if successful except ClientError as ce: logger.error(f"Client error occurred: {ce}. Retrying attempt {attempt + 1}/{retry_attempts}") if attempt == retry_attempts - 1: raise HTTPException(status_code=502, detail="Error communicating with the external API.") except asyncio.TimeoutError: logger.error(f"Request timed out. Retrying attempt {attempt + 1}/{retry_attempts}") if attempt == retry_attempts - 1: raise HTTPException(status_code=504, detail="External API request timed out.") except Exception as e: logger.error(f"Unexpected error: {e}. Retrying attempt {attempt + 1}/{retry_attempts}") if attempt == retry_attempts - 1: raise HTTPException(status_code=500, detail=str(e)) # FastAPI app setup app = FastAPI() # Middleware to enhance security @app.middleware("http") async def security_middleware(request: Request, call_next): # Enforce that POST requests to sensitive endpoints must have a valid Content-Type if request.method == "POST" and request.url.path in ["/v1/chat/completions", "/v1/completions"]: content_type = request.headers.get("Content-Type") if content_type != "application/json": client_ip = request.client.host logger.warning(f"Invalid Content-Type from IP: {client_ip} for path: {request.url.path}") return JSONResponse( status_code=400, content={ "error": { "message": "Content-Type must be application/json", "type": "invalid_request_error", "param": None, "code": None } }, ) response = await call_next(request) return response class Message(BaseModel): role: str content: str class ChatRequest(BaseModel): model: str messages: List[Message] temperature: Optional[float] = 1.0 top_p: Optional[float] = 1.0 n: Optional[int] = 1 stream: Optional[bool] = False stop: Optional[Union[str, List[str]]] = None max_tokens: Optional[int] = None presence_penalty: Optional[float] = 0.0 frequency_penalty: Optional[float] = 0.0 logit_bias: Optional[Dict[str, float]] = None user: Optional[str] = None webSearchMode: Optional[bool] = False # Custom parameter def create_response(content: str, model: str, finish_reason: Optional[str] = None) -> Dict[str, Any]: return { "id": f"chatcmpl-{uuid.uuid4()}", "object": "chat.completion.chunk", "created": int(datetime.now().timestamp()), "model": model, "choices": [ { "index": 0, "delta": {"content": content, "role": "assistant"}, "finish_reason": finish_reason, } ], "usage": None, } @app.post("/v1/chat/completions", dependencies=[Depends(rate_limiter)]) async def chat_completions(request: ChatRequest, req: Request, api_key: str = Depends(get_api_key)): client_ip = req.client.host # Redact user messages only for logging purposes redacted_messages = [{"role": msg.role, "content": "[redacted]"} for msg in request.messages] logger.info(f"Received chat completions request from API key: {api_key} | IP: {client_ip} | Model: {request.model} | Messages: {redacted_messages}") try: # Validate that the requested model is available if request.model not in Blackbox.models and request.model not in Blackbox.model_aliases: logger.warning(f"Attempt to use unavailable model: {request.model} from IP: {client_ip}") raise HTTPException(status_code=400, detail="Requested model is not available.") # Process the request with actual message content, but don't log it async_generator = Blackbox.create_async_generator( model=request.model, messages=[{"role": msg.role, "content": msg.content} for msg in request.messages], # Actual message content used here image=None, image_name=None, webSearchMode=request.webSearchMode ) if request.stream: async def generate(): try: async for chunk in async_generator: if isinstance(chunk, ImageResponse): image_markdown = f"![image]({chunk.url})" response_chunk = create_response(image_markdown, request.model) else: response_chunk = create_response(chunk, request.model) yield f"data: {json.dumps(response_chunk)}\n\n" yield "data: [DONE]\n\n" except HTTPException as he: error_response = {"error": he.detail} yield f"data: {json.dumps(error_response)}\n\n" except Exception as e: logger.exception(f"Error during streaming response generation from IP: {client_ip}.") error_response = {"error": str(e)} yield f"data: {json.dumps(error_response)}\n\n" return StreamingResponse(generate(), media_type="text/event-stream") else: response_content = "" async for chunk in async_generator: if isinstance(chunk, ImageResponse): response_content += f"![image]({chunk.url})\n" else: response_content += chunk logger.info(f"Completed non-streaming response generation for API key: {api_key} | IP: {client_ip}") return { "id": f"chatcmpl-{uuid.uuid4()}", "object": "chat.completion", "created": int(datetime.now().timestamp()), "model": request.model, "choices": [ { "message": { "role": "assistant", "content": response_content }, "finish_reason": "stop", "index": 0 } ], "usage": { "prompt_tokens": sum(len(msg.content.split()) for msg in request.messages), "completion_tokens": len(response_content.split()), "total_tokens": sum(len(msg.content.split()) for msg in request.messages) + len(response_content.split()) }, } except ModelNotWorkingException as e: logger.warning(f"Model not working: {e} | IP: {client_ip}") raise HTTPException(status_code=503, detail=str(e)) except HTTPException as he: logger.warning(f"HTTPException: {he.detail} | IP: {client_ip}") raise he except Exception as e: logger.exception(f"An unexpected error occurred while processing the chat completions request from IP: {client_ip}.") raise HTTPException(status_code=500, detail=str(e)) # Return 'about:blank' when accessing the endpoint via GET @app.get("/v1/chat/completions") async def chat_completions_get(req: Request): client_ip = req.client.host logger.info(f"GET request made to /v1/chat/completions from IP: {client_ip}, redirecting to 'about:blank'") return RedirectResponse(url='about:blank') @app.get("/v1/models") async def get_models(req: Request): client_ip = req.client.host logger.info(f"Fetching available models from IP: {client_ip}") return {"data": [{"id": model, "object": "model"} for model in Blackbox.models]} # Additional endpoints for better functionality @app.get("/v1/health", dependencies=[Depends(rate_limiter)]) async def health_check(req: Request, api_key: str = Depends(get_api_key)): client_ip = req.client.host logger.info(f"Health check requested by API key: {api_key} | IP: {client_ip}") return {"status": "ok"} @app.get("/v1/models/{model}/status") async def model_status(model: str, req: Request): client_ip = req.client.host logger.info(f"Model status requested for '{model}' from IP: {client_ip}") if model in Blackbox.models: return {"model": model, "status": "available"} elif model in Blackbox.model_aliases and Blackbox.model_aliases[model] in Blackbox.models: actual_model = Blackbox.model_aliases[model] return {"model": actual_model, "status": "available via alias"} else: logger.warning(f"Model not found: {model} from IP: {client_ip}") raise HTTPException(status_code=404, detail="Model not found") # Custom exception handler to match OpenAI's error format @app.exception_handler(HTTPException) async def http_exception_handler(request: Request, exc: HTTPException): client_ip = request.client.host logger.error(f"HTTPException: {exc.detail} | Path: {request.url.path} | IP: {client_ip}") return JSONResponse( status_code=exc.status_code, content={ "error": { "message": exc.detail, "type": "invalid_request_error", "param": None, "code": None } }, ) # New endpoint: /v1/tokenizer to calculate token counts class TokenizerRequest(BaseModel): text: str @app.post("/v1/tokenizer") async def tokenizer(request: TokenizerRequest, req: Request, api_key: str = Depends(get_api_key)): client_ip = req.client.host text = request.text token_count = len(text.split()) logger.info(f"Tokenizer requested by API key: {api_key} | IP: {client_ip} | Text length: {len(text)}") return {"text": text, "tokens": token_count} # New endpoint: /v1/completions to support text completions class CompletionRequest(BaseModel): model: str prompt: str max_tokens: Optional[int] = 16 temperature: Optional[float] = 1.0 top_p: Optional[float] = 1.0 n: Optional[int] = 1 stream: Optional[bool] = False stop: Optional[Union[str, List[str]]] = None logprobs: Optional[int] = None echo: Optional[bool] = False presence_penalty: Optional[float] = 0.0 frequency_penalty: Optional[float] = 0.0 best_of: Optional[int] = 1 logit_bias: Optional[Dict[str, float]] = None user: Optional[str] = None @app.post("/v1/completions", dependencies=[Depends(rate_limiter)]) async def completions(request: CompletionRequest, req: Request, api_key: str = Depends(get_api_key)): client_ip = req.client.host logger.info(f"Received completion request from API key: {api_key} | IP: {client_ip} | Model: {request.model}") try: # Validate that the requested model is available if request.model not in Blackbox.models and request.model not in Blackbox.model_aliases: logger.warning(f"Attempt to use unavailable model: {request.model} from IP: {client_ip}") raise HTTPException(status_code=400, detail="Requested model is not available.") # Simulate a simple completion by echoing the prompt completion_text = f"{request.prompt} [Completed by {request.model}]" return { "id": f"cmpl-{uuid.uuid4()}", "object": "text_completion", "created": int(datetime.now().timestamp()), "model": request.model, "choices": [ { "text": completion_text, "index": 0, "logprobs": None, "finish_reason": "length" } ], "usage": { "prompt_tokens": len(request.prompt.split()), "completion_tokens": len(completion_text.split()), "total_tokens": len(request.prompt.split()) + len(completion_text.split()) } } except HTTPException as he: logger.warning(f"HTTPException: {he.detail} | IP: {client_ip}") raise he except Exception as e: logger.exception(f"An unexpected error occurred while processing the completions request from IP: {client_ip}.") raise HTTPException(status_code=500, detail=str(e)) # Add the cleanup task when the app starts @app.on_event("startup") async def startup_event(): asyncio.create_task(cleanup_rate_limit_stores()) logger.info("Started rate limit store cleanup task.") if __name__ == "__main__": import uvicorn uvicorn.run(app, host="0.0.0.0", port=8000)