Update main.py
Browse files
main.py
CHANGED
@@ -1,21 +1,23 @@
|
|
1 |
-
from
|
2 |
-
|
3 |
-
|
4 |
-
|
|
|
|
|
5 |
import uuid
|
|
|
6 |
import logging
|
7 |
-
import
|
8 |
import time
|
9 |
from collections import defaultdict
|
|
|
|
|
10 |
from datetime import datetime
|
11 |
-
import asyncio
|
12 |
-
import json
|
13 |
-
import re
|
14 |
-
import random
|
15 |
-
import string
|
16 |
-
from aiohttp import ClientSession, ClientTimeout, ClientError
|
17 |
|
18 |
-
|
|
|
|
|
|
|
19 |
|
20 |
# Configure logging
|
21 |
logging.basicConfig(
|
@@ -53,7 +55,10 @@ async def cleanup_rate_limit_stores():
|
|
53 |
"""
|
54 |
while True:
|
55 |
current_time = time.time()
|
56 |
-
ips_to_delete = [
|
|
|
|
|
|
|
57 |
for ip in ips_to_delete:
|
58 |
del rate_limit_store[ip]
|
59 |
logger.debug(f"Cleaned up rate_limit_store for IP: {ip}")
|
@@ -89,7 +94,418 @@ async def get_api_key(request: Request, authorization: str = Header(None)) -> st
|
|
89 |
raise HTTPException(status_code=401, detail='Invalid API key')
|
90 |
return api_key
|
91 |
|
92 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
93 |
class Message(BaseModel):
|
94 |
role: str
|
95 |
content: str
|
@@ -140,45 +556,6 @@ def create_response(content: str, model: str, finish_reason: Optional[str] = Non
|
|
140 |
"usage": None, # To be filled in non-streaming responses
|
141 |
}
|
142 |
|
143 |
-
# Custom exception for model not working
|
144 |
-
class ModelNotWorkingException(Exception):
|
145 |
-
def __init__(self, model: str):
|
146 |
-
self.model = model
|
147 |
-
self.message = f"The model '{model}' is currently not working. Please try another model or wait for it to be fixed."
|
148 |
-
super().__init__(self.message)
|
149 |
-
|
150 |
-
# Initialize FastAPI app
|
151 |
-
app = FastAPI()
|
152 |
-
|
153 |
-
# Add the cleanup task when the app starts
|
154 |
-
@app.on_event("startup")
|
155 |
-
async def startup_event():
|
156 |
-
asyncio.create_task(cleanup_rate_limit_stores())
|
157 |
-
logger.info("Started rate limit store cleanup task.")
|
158 |
-
|
159 |
-
# Middleware to enhance security and enforce Content-Type for specific endpoints
|
160 |
-
@app.middleware("http")
|
161 |
-
async def security_middleware(request: Request, call_next):
|
162 |
-
client_ip = request.client.host
|
163 |
-
# Enforce that POST requests to /v1/chat/completions must have Content-Type: application/json
|
164 |
-
if request.method == "POST" and request.url.path == "/v1/chat/completions":
|
165 |
-
content_type = request.headers.get("Content-Type")
|
166 |
-
if content_type != "application/json":
|
167 |
-
logger.warning(f"Invalid Content-Type from IP: {client_ip} for path: {request.url.path}")
|
168 |
-
return JSONResponse(
|
169 |
-
status_code=400,
|
170 |
-
content={
|
171 |
-
"error": {
|
172 |
-
"message": "Content-Type must be application/json",
|
173 |
-
"type": "invalid_request_error",
|
174 |
-
"param": None,
|
175 |
-
"code": None
|
176 |
-
}
|
177 |
-
},
|
178 |
-
)
|
179 |
-
response = await call_next(request)
|
180 |
-
return response
|
181 |
-
|
182 |
# FastAPI Endpoints
|
183 |
|
184 |
@app.post("/v1/chat/completions", dependencies=[Depends(rate_limiter_per_ip)])
|
@@ -199,8 +576,7 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
|
|
199 |
async_generator = Blackbox.create_async_generator(
|
200 |
model=request.model,
|
201 |
messages=[{"role": msg.role, "content": msg.content} for msg in request.messages], # Actual message content used here
|
202 |
-
|
203 |
-
image_name=None,
|
204 |
websearch=request.webSearchMode
|
205 |
)
|
206 |
|
@@ -234,7 +610,7 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
|
|
234 |
yield f"data: {json.dumps(response_chunk)}\n\n"
|
235 |
|
236 |
# After all chunks are sent, send the final message with finish_reason
|
237 |
-
prompt_tokens = sum(len(msg
|
238 |
completion_tokens = len(assistant_content.split())
|
239 |
total_tokens = prompt_tokens + completion_tokens
|
240 |
estimated_cost = calculate_estimated_cost(prompt_tokens, completion_tokens)
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import os
|
4 |
+
import re
|
5 |
+
import random
|
6 |
+
import string
|
7 |
import uuid
|
8 |
+
import json
|
9 |
import logging
|
10 |
+
import asyncio
|
11 |
import time
|
12 |
from collections import defaultdict
|
13 |
+
from typing import List, Dict, Any, Optional, AsyncGenerator, Union
|
14 |
+
|
15 |
from datetime import datetime
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
|
17 |
+
from aiohttp import ClientSession, ClientTimeout, ClientError, ClientResponseError
|
18 |
+
from fastapi import FastAPI, HTTPException, Request, Depends, Header
|
19 |
+
from fastapi.responses import StreamingResponse, JSONResponse, RedirectResponse
|
20 |
+
from pydantic import BaseModel
|
21 |
|
22 |
# Configure logging
|
23 |
logging.basicConfig(
|
|
|
55 |
"""
|
56 |
while True:
|
57 |
current_time = time.time()
|
58 |
+
ips_to_delete = [
|
59 |
+
ip for ip, value in rate_limit_store.items()
|
60 |
+
if current_time - value["timestamp"] > RATE_LIMIT_WINDOW * 2
|
61 |
+
]
|
62 |
for ip in ips_to_delete:
|
63 |
del rate_limit_store[ip]
|
64 |
logger.debug(f"Cleaned up rate_limit_store for IP: {ip}")
|
|
|
94 |
raise HTTPException(status_code=401, detail='Invalid API key')
|
95 |
return api_key
|
96 |
|
97 |
+
# Custom exception for model not working
|
98 |
+
class ModelNotWorkingException(Exception):
|
99 |
+
def __init__(self, model: str):
|
100 |
+
self.model = model
|
101 |
+
self.message = f"The model '{model}' is currently not working. Please try another model or wait for it to be fixed."
|
102 |
+
super().__init__(self.message)
|
103 |
+
|
104 |
+
# Mock implementations for ImageResponse and to_data_uri
|
105 |
+
class ImageResponse:
|
106 |
+
def __init__(self, images: str, alt: str):
|
107 |
+
self.images = images
|
108 |
+
self.alt = alt
|
109 |
+
|
110 |
+
def to_data_uri(image: Any) -> str:
|
111 |
+
return "data:image/png;base64,..." # Replace with actual base64 data
|
112 |
+
|
113 |
+
# Placeholder classes for AsyncGeneratorProvider and ProviderModelMixin
|
114 |
+
class AsyncGeneratorProvider:
|
115 |
+
pass # Implement as per your actual provider's requirements
|
116 |
+
|
117 |
+
class ProviderModelMixin:
|
118 |
+
pass # Implement as per your actual provider's requirements
|
119 |
+
|
120 |
+
class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
121 |
+
label = "Blackbox AI"
|
122 |
+
url = "https://www.blackbox.ai"
|
123 |
+
api_endpoint = "https://www.blackbox.ai/api/chat"
|
124 |
+
working = True
|
125 |
+
supports_gpt_4 = True
|
126 |
+
supports_stream = True
|
127 |
+
supports_system_message = True
|
128 |
+
supports_message_history = True
|
129 |
+
|
130 |
+
default_model = 'blackboxai'
|
131 |
+
image_models = ['ImageGeneration']
|
132 |
+
models = [
|
133 |
+
default_model,
|
134 |
+
'blackboxai-pro',
|
135 |
+
*image_models,
|
136 |
+
"llama-3.1-8b",
|
137 |
+
'llama-3.1-70b',
|
138 |
+
'llama-3.1-405b',
|
139 |
+
'gpt-4o',
|
140 |
+
'gemini-pro',
|
141 |
+
'gemini-1.5-flash',
|
142 |
+
'claude-sonnet-3.5',
|
143 |
+
'PythonAgent',
|
144 |
+
'JavaAgent',
|
145 |
+
'JavaScriptAgent',
|
146 |
+
'HTMLAgent',
|
147 |
+
'GoogleCloudAgent',
|
148 |
+
'AndroidDeveloper',
|
149 |
+
'SwiftDeveloper',
|
150 |
+
'Next.jsAgent',
|
151 |
+
'MongoDBAgent',
|
152 |
+
'PyTorchAgent',
|
153 |
+
'ReactAgent',
|
154 |
+
'XcodeAgent',
|
155 |
+
'AngularJSAgent',
|
156 |
+
]
|
157 |
+
|
158 |
+
# Filter models based on AVAILABLE_MODELS
|
159 |
+
if AVAILABLE_MODELS:
|
160 |
+
models = [model for model in models if model in AVAILABLE_MODELS]
|
161 |
+
|
162 |
+
agentMode = {
|
163 |
+
'ImageGeneration': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"},
|
164 |
+
}
|
165 |
+
|
166 |
+
trendingAgentMode = {
|
167 |
+
"blackboxai": {},
|
168 |
+
"gemini-1.5-flash": {'mode': True, 'id': 'Gemini'},
|
169 |
+
"llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"},
|
170 |
+
'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"},
|
171 |
+
'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405b"},
|
172 |
+
'blackboxai-pro': {'mode': True, 'id': "BLACKBOXAI-PRO"},
|
173 |
+
'PythonAgent': {'mode': True, 'id': "Python Agent"},
|
174 |
+
'JavaAgent': {'mode': True, 'id': "Java Agent"},
|
175 |
+
'JavaScriptAgent': {'mode': True, 'id': "JavaScript Agent"},
|
176 |
+
'HTMLAgent': {'mode': True, 'id': "HTML Agent"},
|
177 |
+
'GoogleCloudAgent': {'mode': True, 'id': "Google Cloud Agent"},
|
178 |
+
'AndroidDeveloper': {'mode': True, 'id': "Android Developer"},
|
179 |
+
'SwiftDeveloper': {'mode': True, 'id': "Swift Developer"},
|
180 |
+
'Next.jsAgent': {'mode': True, 'id': "Next.js Agent"},
|
181 |
+
'MongoDBAgent': {'mode': True, 'id': "MongoDB Agent"},
|
182 |
+
'PyTorchAgent': {'mode': True, 'id': "PyTorch Agent"},
|
183 |
+
'ReactAgent': {'mode': True, 'id': "React Agent"},
|
184 |
+
'XcodeAgent': {'mode': True, 'id': "Xcode Agent"},
|
185 |
+
'AngularJSAgent': {'mode': True, 'id': "AngularJS Agent"},
|
186 |
+
}
|
187 |
+
|
188 |
+
userSelectedModel = {
|
189 |
+
"gpt-4o": "gpt-4o",
|
190 |
+
"gemini-pro": "gemini-pro",
|
191 |
+
'claude-sonnet-3.5': "claude-sonnet-3.5",
|
192 |
+
"niansuh": "Niansuh", # Added based on model_aliases
|
193 |
+
}
|
194 |
+
|
195 |
+
model_prefixes = {
|
196 |
+
'gpt-4o': '@GPT-4o',
|
197 |
+
'gemini-pro': '@Gemini-PRO',
|
198 |
+
'claude-sonnet-3.5': '@Claude-Sonnet-3.5',
|
199 |
+
'PythonAgent': '@Python Agent',
|
200 |
+
'JavaAgent': '@Java Agent',
|
201 |
+
'JavaScriptAgent': '@JavaScript Agent',
|
202 |
+
'HTMLAgent': '@HTML Agent',
|
203 |
+
'GoogleCloudAgent': '@Google Cloud Agent',
|
204 |
+
'AndroidDeveloper': '@Android Developer',
|
205 |
+
'SwiftDeveloper': '@Swift Developer',
|
206 |
+
'Next.jsAgent': '@Next.js Agent',
|
207 |
+
'MongoDBAgent': '@MongoDB Agent',
|
208 |
+
'PyTorchAgent': '@PyTorch Agent',
|
209 |
+
'ReactAgent': '@React Agent',
|
210 |
+
'XcodeAgent': '@Xcode Agent',
|
211 |
+
'AngularJSAgent': '@AngularJS Agent',
|
212 |
+
'blackboxai-pro': '@BLACKBOXAI-PRO',
|
213 |
+
'ImageGeneration': '@Image Generation',
|
214 |
+
'Niansuh': '@Niansuh',
|
215 |
+
}
|
216 |
+
|
217 |
+
model_referers = {
|
218 |
+
"blackboxai": "/?model=blackboxai",
|
219 |
+
"gpt-4o": "/?model=gpt-4o",
|
220 |
+
"gemini-pro": "/?model=gemini-pro",
|
221 |
+
"claude-sonnet-3.5": "/?model=claude-sonnet-3.5"
|
222 |
+
}
|
223 |
+
|
224 |
+
model_aliases = {
|
225 |
+
"gemini-flash": "gemini-1.5-flash",
|
226 |
+
"claude-3.5-sonnet": "claude-sonnet-3.5",
|
227 |
+
"flux": "ImageGeneration",
|
228 |
+
"niansuh": "Niansuh",
|
229 |
+
}
|
230 |
+
|
231 |
+
@classmethod
|
232 |
+
def get_model(cls, model: str) -> Optional[str]:
|
233 |
+
if model in cls.models:
|
234 |
+
return model
|
235 |
+
elif model in cls.userSelectedModel and cls.userSelectedModel[model] in cls.models:
|
236 |
+
return cls.userSelectedModel[model]
|
237 |
+
elif model in cls.model_aliases and cls.model_aliases[model] in cls.models:
|
238 |
+
return cls.model_aliases[model]
|
239 |
+
else:
|
240 |
+
return cls.default_model if cls.default_model in cls.models else None
|
241 |
+
|
242 |
+
@staticmethod
|
243 |
+
def generate_random_string(length: int = 7) -> str:
|
244 |
+
characters = string.ascii_letters + string.digits
|
245 |
+
return ''.join(random.choices(characters, k=length))
|
246 |
+
|
247 |
+
@staticmethod
|
248 |
+
def generate_next_action() -> str:
|
249 |
+
return uuid.uuid4().hex
|
250 |
+
|
251 |
+
@staticmethod
|
252 |
+
def generate_next_router_state_tree() -> str:
|
253 |
+
router_state = [
|
254 |
+
"",
|
255 |
+
{
|
256 |
+
"children": [
|
257 |
+
"(chat)",
|
258 |
+
{
|
259 |
+
"children": [
|
260 |
+
"__PAGE__",
|
261 |
+
{}
|
262 |
+
]
|
263 |
+
}
|
264 |
+
]
|
265 |
+
},
|
266 |
+
None,
|
267 |
+
None,
|
268 |
+
True
|
269 |
+
]
|
270 |
+
return json.dumps(router_state)
|
271 |
+
|
272 |
+
@staticmethod
|
273 |
+
def clean_response(text: str) -> str:
|
274 |
+
pattern = r'^\$\@\$v=undefined-rv1\$\@\$'
|
275 |
+
cleaned_text = re.sub(pattern, '', text)
|
276 |
+
return cleaned_text
|
277 |
+
|
278 |
+
@classmethod
|
279 |
+
async def create_async_generator(
|
280 |
+
cls,
|
281 |
+
model: str,
|
282 |
+
messages: List[Dict[str, str]],
|
283 |
+
proxy: Optional[str] = None,
|
284 |
+
websearch: bool = False,
|
285 |
+
**kwargs
|
286 |
+
) -> AsyncGenerator[Union[str, ImageResponse], None]:
|
287 |
+
"""
|
288 |
+
Creates an asynchronous generator for streaming responses from Blackbox AI.
|
289 |
+
|
290 |
+
Parameters:
|
291 |
+
model (str): Model to use for generating responses.
|
292 |
+
messages (List[Dict[str, str]]): Message history.
|
293 |
+
proxy (Optional[str]): Proxy URL, if needed.
|
294 |
+
websearch (bool): Enables or disables web search mode.
|
295 |
+
**kwargs: Additional keyword arguments.
|
296 |
+
|
297 |
+
Yields:
|
298 |
+
Union[str, ImageResponse]: Segments of the generated response or ImageResponse objects.
|
299 |
+
"""
|
300 |
+
model = cls.get_model(model)
|
301 |
+
if model is None:
|
302 |
+
logger.error(f"Model {model} is not available.")
|
303 |
+
raise ModelNotWorkingException(model)
|
304 |
+
|
305 |
+
chat_id = cls.generate_random_string()
|
306 |
+
next_action = cls.generate_next_action()
|
307 |
+
next_router_state_tree = cls.generate_next_router_state_tree()
|
308 |
+
|
309 |
+
agent_mode = cls.agentMode.get(model, {})
|
310 |
+
trending_agent_mode = cls.trendingAgentMode.get(model, {})
|
311 |
+
|
312 |
+
prefix = cls.model_prefixes.get(model, "")
|
313 |
+
|
314 |
+
formatted_prompt = ""
|
315 |
+
for message in messages:
|
316 |
+
role = message.get('role', '').capitalize()
|
317 |
+
content = message.get('content', '')
|
318 |
+
if role and content:
|
319 |
+
formatted_prompt += f"{role}: {content}\n"
|
320 |
+
|
321 |
+
if prefix:
|
322 |
+
formatted_prompt = f"{prefix} {formatted_prompt}".strip()
|
323 |
+
|
324 |
+
referer_path = cls.model_referers.get(model, f"/?model={model}")
|
325 |
+
referer_url = f"{cls.url}{referer_path}"
|
326 |
+
|
327 |
+
common_headers = {
|
328 |
+
'accept': '*/*',
|
329 |
+
'accept-language': 'en-US,en;q=0.9',
|
330 |
+
'cache-control': 'no-cache',
|
331 |
+
'origin': cls.url,
|
332 |
+
'pragma': 'no-cache',
|
333 |
+
'priority': 'u=1, i',
|
334 |
+
'sec-ch-ua': '"Chromium";v="129", "Not=A?Brand";v="8"',
|
335 |
+
'sec-ch-ua-mobile': '?0',
|
336 |
+
'sec-ch-ua-platform': '"Linux"',
|
337 |
+
'sec-fetch-dest': 'empty',
|
338 |
+
'sec-fetch-mode': 'cors',
|
339 |
+
'sec-fetch-site': 'same-origin',
|
340 |
+
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) '
|
341 |
+
'AppleWebKit/537.36 (KHTML, like Gecko) '
|
342 |
+
'Chrome/129.0.0.0 Safari/537.36'
|
343 |
+
}
|
344 |
+
|
345 |
+
headers_api_chat = {
|
346 |
+
'Content-Type': 'application/json',
|
347 |
+
'Referer': referer_url
|
348 |
+
}
|
349 |
+
headers_api_chat_combined = {**common_headers, **headers_api_chat}
|
350 |
+
|
351 |
+
payload_api_chat = {
|
352 |
+
"messages": [
|
353 |
+
{
|
354 |
+
"id": chat_id,
|
355 |
+
"content": formatted_prompt,
|
356 |
+
"role": "user"
|
357 |
+
}
|
358 |
+
],
|
359 |
+
"id": chat_id,
|
360 |
+
"previewToken": None,
|
361 |
+
"userId": None,
|
362 |
+
"codeModelMode": True,
|
363 |
+
"agentMode": agent_mode,
|
364 |
+
"trendingAgentMode": trending_agent_mode,
|
365 |
+
"isMicMode": False,
|
366 |
+
"userSystemPrompt": None,
|
367 |
+
"maxTokens": 1024,
|
368 |
+
"playgroundTopP": 0.9,
|
369 |
+
"playgroundTemperature": 0.5,
|
370 |
+
"isChromeExt": False,
|
371 |
+
"githubToken": None,
|
372 |
+
"clickedAnswer2": False,
|
373 |
+
"clickedAnswer3": False,
|
374 |
+
"clickedForceWebSearch": False,
|
375 |
+
"visitFromDelta": False,
|
376 |
+
"mobileClient": False,
|
377 |
+
"webSearchMode": websearch,
|
378 |
+
"userSelectedModel": cls.userSelectedModel.get(model, model)
|
379 |
+
}
|
380 |
+
|
381 |
+
headers_chat = {
|
382 |
+
'Accept': 'text/x-component',
|
383 |
+
'Content-Type': 'text/plain;charset=UTF-8',
|
384 |
+
'Referer': f'{cls.url}/chat/{chat_id}?model={model}',
|
385 |
+
'next-action': next_action,
|
386 |
+
'next-router-state-tree': next_router_state_tree,
|
387 |
+
'next-url': '/'
|
388 |
+
}
|
389 |
+
headers_chat_combined = {**common_headers, **headers_chat}
|
390 |
+
|
391 |
+
data_chat = '[]'
|
392 |
+
|
393 |
+
async with ClientSession(headers=common_headers) as session:
|
394 |
+
try:
|
395 |
+
# Send initial chat request
|
396 |
+
async with session.post(
|
397 |
+
cls.api_endpoint,
|
398 |
+
headers=headers_api_chat_combined,
|
399 |
+
json=payload_api_chat,
|
400 |
+
proxy=proxy
|
401 |
+
) as response_api_chat:
|
402 |
+
response_api_chat.raise_for_status()
|
403 |
+
text = await response_api_chat.text()
|
404 |
+
cleaned_response = cls.clean_response(text)
|
405 |
+
|
406 |
+
if model in cls.image_models:
|
407 |
+
match = re.search(r'!\[.*?\]\((https?://[^\)]+)\)', cleaned_response)
|
408 |
+
if match:
|
409 |
+
image_url = match.group(1)
|
410 |
+
image_response = ImageResponse(images=image_url, alt="Generated Image")
|
411 |
+
yield image_response
|
412 |
+
else:
|
413 |
+
yield cleaned_response
|
414 |
+
else:
|
415 |
+
if websearch:
|
416 |
+
match = re.search(r'\$~~~\$(.*?)\$~~~\$', cleaned_response, re.DOTALL)
|
417 |
+
if match:
|
418 |
+
source_part = match.group(1).strip()
|
419 |
+
answer_part = cleaned_response[match.end():].strip()
|
420 |
+
try:
|
421 |
+
sources = json.loads(source_part)
|
422 |
+
source_formatted = "**Sources:**\n"
|
423 |
+
for item in sources[:5]:
|
424 |
+
title = item.get('title', 'No Title')
|
425 |
+
link = item.get('link', '#')
|
426 |
+
position = item.get('position', '')
|
427 |
+
source_formatted += f"{position}. [{title}]({link})\n"
|
428 |
+
final_response = f"{answer_part}\n\n{source_formatted}"
|
429 |
+
except json.JSONDecodeError:
|
430 |
+
final_response = f"{answer_part}\n\nSource information is unavailable."
|
431 |
+
else:
|
432 |
+
final_response = cleaned_response
|
433 |
+
else:
|
434 |
+
if '$~~~$' in cleaned_response:
|
435 |
+
final_response = cleaned_response.split('$~~~$')[0].strip()
|
436 |
+
else:
|
437 |
+
final_response = cleaned_response
|
438 |
+
|
439 |
+
yield final_response
|
440 |
+
except ClientResponseError as e:
|
441 |
+
error_text = f"Error {e.status}: {e.message}"
|
442 |
+
try:
|
443 |
+
error_response = await e.response.text()
|
444 |
+
cleaned_error = cls.clean_response(error_response)
|
445 |
+
error_text += f" - {cleaned_error}"
|
446 |
+
except Exception:
|
447 |
+
pass
|
448 |
+
yield error_text
|
449 |
+
except Exception as e:
|
450 |
+
yield f"Unexpected error during /api/chat request: {str(e)}"
|
451 |
+
|
452 |
+
chat_url = f'{cls.url}/chat/{chat_id}?model={model}'
|
453 |
+
|
454 |
+
try:
|
455 |
+
# Send follow-up chat request
|
456 |
+
async with session.post(
|
457 |
+
chat_url,
|
458 |
+
headers=headers_chat_combined,
|
459 |
+
data=data_chat,
|
460 |
+
proxy=proxy
|
461 |
+
) as response_chat:
|
462 |
+
response_chat.raise_for_status()
|
463 |
+
pass
|
464 |
+
except ClientResponseError as e:
|
465 |
+
error_text = f"Error {e.status}: {e.message}"
|
466 |
+
try:
|
467 |
+
error_response = await e.response.text()
|
468 |
+
cleaned_error = cls.clean_response(error_response)
|
469 |
+
error_text += f" - {cleaned_error}"
|
470 |
+
except Exception:
|
471 |
+
pass
|
472 |
+
yield error_text
|
473 |
+
except Exception as e:
|
474 |
+
yield f"Unexpected error during /chat/{chat_id} request: {str(e)}"
|
475 |
+
|
476 |
+
# FastAPI app setup
|
477 |
+
app = FastAPI()
|
478 |
+
|
479 |
+
# Add the cleanup task when the app starts
|
480 |
+
@app.on_event("startup")
|
481 |
+
async def startup_event():
|
482 |
+
asyncio.create_task(cleanup_rate_limit_stores())
|
483 |
+
logger.info("Started rate limit store cleanup task.")
|
484 |
+
|
485 |
+
# Middleware to enhance security and enforce Content-Type for specific endpoints
|
486 |
+
@app.middleware("http")
|
487 |
+
async def security_middleware(request: Request, call_next):
|
488 |
+
client_ip = request.client.host
|
489 |
+
# Enforce that POST requests to /v1/chat/completions must have Content-Type: application/json
|
490 |
+
if request.method == "POST" and request.url.path == "/v1/chat/completions":
|
491 |
+
content_type = request.headers.get("Content-Type")
|
492 |
+
if content_type != "application/json":
|
493 |
+
logger.warning(f"Invalid Content-Type from IP: {client_ip} for path: {request.url.path}")
|
494 |
+
return JSONResponse(
|
495 |
+
status_code=400,
|
496 |
+
content={
|
497 |
+
"error": {
|
498 |
+
"message": "Content-Type must be application/json",
|
499 |
+
"type": "invalid_request_error",
|
500 |
+
"param": None,
|
501 |
+
"code": None
|
502 |
+
}
|
503 |
+
},
|
504 |
+
)
|
505 |
+
response = await call_next(request)
|
506 |
+
return response
|
507 |
+
|
508 |
+
# Request Models
|
509 |
class Message(BaseModel):
|
510 |
role: str
|
511 |
content: str
|
|
|
556 |
"usage": None, # To be filled in non-streaming responses
|
557 |
}
|
558 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
559 |
# FastAPI Endpoints
|
560 |
|
561 |
@app.post("/v1/chat/completions", dependencies=[Depends(rate_limiter_per_ip)])
|
|
|
576 |
async_generator = Blackbox.create_async_generator(
|
577 |
model=request.model,
|
578 |
messages=[{"role": msg.role, "content": msg.content} for msg in request.messages], # Actual message content used here
|
579 |
+
proxy=None, # Add proxy if needed
|
|
|
580 |
websearch=request.webSearchMode
|
581 |
)
|
582 |
|
|
|
610 |
yield f"data: {json.dumps(response_chunk)}\n\n"
|
611 |
|
612 |
# After all chunks are sent, send the final message with finish_reason
|
613 |
+
prompt_tokens = sum(len(msg['content'].split()) for msg in request.messages)
|
614 |
completion_tokens = len(assistant_content.split())
|
615 |
total_tokens = prompt_tokens + completion_tokens
|
616 |
estimated_cost = calculate_estimated_cost(prompt_tokens, completion_tokens)
|