Update main.py
Browse files
main.py
CHANGED
@@ -1,3 +1,5 @@
|
|
|
|
|
|
1 |
import os
|
2 |
import re
|
3 |
import random
|
@@ -53,7 +55,8 @@ async def cleanup_rate_limit_stores():
|
|
53 |
"""
|
54 |
while True:
|
55 |
current_time = time.time()
|
56 |
-
ips_to_delete = [ip for ip, value in rate_limit_store.items()
|
|
|
57 |
for ip in ips_to_delete:
|
58 |
del rate_limit_store[ip]
|
59 |
logger.debug(f"Cleaned up rate_limit_store for IP: {ip}")
|
@@ -118,6 +121,7 @@ class Blackbox:
|
|
118 |
models = [
|
119 |
default_model,
|
120 |
'blackboxai-pro',
|
|
|
121 |
"llama-3.1-8b",
|
122 |
'llama-3.1-70b',
|
123 |
'llama-3.1-405b',
|
@@ -138,8 +142,6 @@ class Blackbox:
|
|
138 |
'ReactAgent',
|
139 |
'XcodeAgent',
|
140 |
'AngularJSAgent',
|
141 |
-
*image_models,
|
142 |
-
'Niansuh',
|
143 |
]
|
144 |
|
145 |
# Filter models based on AVAILABLE_MODELS
|
@@ -148,7 +150,6 @@ class Blackbox:
|
|
148 |
|
149 |
agentMode = {
|
150 |
'ImageGeneration': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"},
|
151 |
-
'Niansuh': {'mode': True, 'id': "NiansuhAIk1HgESy", 'name': "Niansuh"},
|
152 |
}
|
153 |
trendingAgentMode = {
|
154 |
"blackboxai": {},
|
@@ -197,33 +198,65 @@ class Blackbox:
|
|
197 |
'AngularJSAgent': '@AngularJS Agent',
|
198 |
'blackboxai-pro': '@BLACKBOXAI-PRO',
|
199 |
'ImageGeneration': '@Image Generation',
|
200 |
-
'Niansuh': '@Niansuh',
|
201 |
}
|
202 |
|
203 |
model_referers = {
|
204 |
-
"blackboxai":
|
205 |
-
"gpt-4o":
|
206 |
-
"gemini-pro":
|
207 |
-
"claude-sonnet-3.5":
|
208 |
}
|
209 |
|
210 |
model_aliases = {
|
211 |
"gemini-flash": "gemini-1.5-flash",
|
212 |
"claude-3.5-sonnet": "claude-sonnet-3.5",
|
213 |
"flux": "ImageGeneration",
|
214 |
-
"niansuh": "Niansuh",
|
215 |
}
|
216 |
|
217 |
@classmethod
|
218 |
-
def get_model(cls, model: str) ->
|
219 |
if model in cls.models:
|
220 |
return model
|
221 |
-
elif model in cls.
|
222 |
-
return cls.userSelectedModel[model]
|
223 |
-
elif model in cls.model_aliases and cls.model_aliases[model] in cls.models:
|
224 |
return cls.model_aliases[model]
|
225 |
else:
|
226 |
-
return cls.default_model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
227 |
|
228 |
@classmethod
|
229 |
async def create_async_generator(
|
@@ -231,73 +264,87 @@ class Blackbox:
|
|
231 |
model: str,
|
232 |
messages: List[Dict[str, str]],
|
233 |
proxy: Optional[str] = None,
|
234 |
-
|
235 |
-
image_name: Optional[str] = None,
|
236 |
-
webSearchMode: bool = False,
|
237 |
**kwargs
|
238 |
-
) -> AsyncGenerator[
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
239 |
model = cls.get_model(model)
|
240 |
-
if model is None:
|
241 |
-
logger.error(f"Model {model} is not available.")
|
242 |
-
raise ModelNotWorkingException(model)
|
243 |
|
244 |
-
|
|
|
|
|
245 |
|
246 |
-
|
247 |
-
|
248 |
-
raise ModelNotWorkingException(model)
|
249 |
-
|
250 |
-
headers = {
|
251 |
-
"accept": "*/*",
|
252 |
-
"accept-language": "en-US,en;q=0.9",
|
253 |
-
"cache-control": "no-cache",
|
254 |
-
"content-type": "application/json",
|
255 |
-
"origin": cls.url,
|
256 |
-
"pragma": "no-cache",
|
257 |
-
"priority": "u=1, i",
|
258 |
-
"referer": cls.model_referers.get(model, cls.url),
|
259 |
-
"sec-ch-ua": '"Chromium";v="129", "Not=A?Brand";v="8"',
|
260 |
-
"sec-ch-ua-mobile": "?0",
|
261 |
-
"sec-ch-ua-platform": '"Linux"',
|
262 |
-
"sec-fetch-dest": "empty",
|
263 |
-
"sec-fetch-mode": "cors",
|
264 |
-
"sec-fetch-site": "same-origin",
|
265 |
-
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36",
|
266 |
-
}
|
267 |
|
268 |
-
|
269 |
-
prefix = cls.model_prefixes[model]
|
270 |
-
if not messages[0]['content'].startswith(prefix):
|
271 |
-
logger.debug(f"Adding prefix '{prefix}' to the first message.")
|
272 |
-
messages[0]['content'] = f"{prefix} {messages[0]['content']}"
|
273 |
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
if image is not None:
|
282 |
-
messages[-1]['data'] = {
|
283 |
-
'fileText': '',
|
284 |
-
'imageBase64': to_data_uri(image),
|
285 |
-
'title': image_name
|
286 |
-
}
|
287 |
-
messages[-1]['content'] = 'FILE:BB\n$#$\n\n$#$\n' + messages[-1]['content']
|
288 |
-
logger.debug("Image data added to the message.")
|
289 |
|
290 |
-
|
291 |
-
"
|
292 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
293 |
"previewToken": None,
|
294 |
"userId": None,
|
295 |
"codeModelMode": True,
|
296 |
-
"agentMode":
|
297 |
-
"trendingAgentMode":
|
298 |
"isMicMode": False,
|
299 |
"userSystemPrompt": None,
|
300 |
-
"maxTokens":
|
301 |
"playgroundTopP": 0.9,
|
302 |
"playgroundTemperature": 0.5,
|
303 |
"isChromeExt": False,
|
@@ -307,81 +354,102 @@ class Blackbox:
|
|
307 |
"clickedForceWebSearch": False,
|
308 |
"visitFromDelta": False,
|
309 |
"mobileClient": False,
|
310 |
-
"
|
311 |
-
"
|
312 |
}
|
313 |
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
|
320 |
-
|
|
|
|
|
321 |
|
322 |
-
|
323 |
-
retry_attempts = 10 # Set the number of retry attempts
|
324 |
|
325 |
-
|
326 |
try:
|
327 |
-
async with
|
328 |
-
|
329 |
-
|
330 |
-
|
331 |
-
|
332 |
-
|
333 |
-
|
334 |
-
|
335 |
-
|
336 |
-
|
337 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
338 |
else:
|
339 |
-
|
340 |
-
raise Exception("Image URL not found in the response")
|
341 |
else:
|
342 |
-
|
343 |
-
|
344 |
-
|
345 |
-
|
346 |
-
|
347 |
-
|
348 |
-
|
349 |
-
|
350 |
-
|
351 |
-
|
352 |
-
|
353 |
-
|
354 |
-
|
355 |
-
|
356 |
-
|
357 |
-
logger.exception("Error while iterating over response chunks.")
|
358 |
-
raise e
|
359 |
-
if data["webSearchMode"] and search_results_json:
|
360 |
-
match = re.search(r'\$~~~\$(.*?)\$~~~\$', search_results_json, re.DOTALL)
|
361 |
-
if match:
|
362 |
-
try:
|
363 |
-
search_results = json.loads(match.group(1))
|
364 |
-
formatted_results = "\n\n**Sources:**\n"
|
365 |
-
for i, result in enumerate(search_results[:5], 1):
|
366 |
-
formatted_results += f"{i}. [{result['title']}]({result['link']})\n"
|
367 |
-
logger.info("Formatted search results.")
|
368 |
-
yield formatted_results
|
369 |
-
except json.JSONDecodeError as je:
|
370 |
-
logger.error("Failed to parse search results JSON.")
|
371 |
-
raise je
|
372 |
-
break # Exit the retry loop if successful
|
373 |
-
except ClientError as ce:
|
374 |
-
logger.error(f"Client error occurred: {ce}. Retrying attempt {attempt + 1}/{retry_attempts}")
|
375 |
-
if attempt == retry_attempts - 1:
|
376 |
-
raise HTTPException(status_code=502, detail="Error communicating with the external API.")
|
377 |
-
except asyncio.TimeoutError:
|
378 |
-
logger.error(f"Request timed out. Retrying attempt {attempt + 1}/{retry_attempts}")
|
379 |
-
if attempt == retry_attempts - 1:
|
380 |
-
raise HTTPException(status_code=504, detail="External API request timed out.")
|
381 |
except Exception as e:
|
382 |
-
|
383 |
-
|
384 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
385 |
|
386 |
# FastAPI app setup
|
387 |
app = FastAPI()
|
@@ -466,6 +534,7 @@ def create_response(content: str, model: str, finish_reason: Optional[str] = Non
|
|
466 |
"usage": None, # To be filled in non-streaming responses
|
467 |
}
|
468 |
|
|
|
469 |
@app.post("/v1/chat/completions", dependencies=[Depends(rate_limiter_per_ip)])
|
470 |
async def chat_completions(request: ChatRequest, req: Request, api_key: str = Depends(get_api_key)):
|
471 |
client_ip = req.client.host
|
@@ -486,7 +555,7 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
|
|
486 |
messages=[{"role": msg.role, "content": msg.content} for msg in request.messages], # Actual message content used here
|
487 |
image=None,
|
488 |
image_name=None,
|
489 |
-
|
490 |
)
|
491 |
|
492 |
if request.stream:
|
@@ -606,6 +675,45 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
|
|
606 |
logger.exception(f"An unexpected error occurred while processing the chat completions request from IP: {client_ip}.")
|
607 |
raise HTTPException(status_code=500, detail=str(e))
|
608 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
609 |
# Endpoint: POST /v1/tokenizer
|
610 |
@app.post("/v1/tokenizer", dependencies=[Depends(rate_limiter_per_ip)])
|
611 |
async def tokenizer(request: TokenizerRequest, req: Request):
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
import os
|
4 |
import re
|
5 |
import random
|
|
|
55 |
"""
|
56 |
while True:
|
57 |
current_time = time.time()
|
58 |
+
ips_to_delete = [ip for ip, value in rate_limit_store.items()
|
59 |
+
if current_time - value["timestamp"] > RATE_LIMIT_WINDOW * 2]
|
60 |
for ip in ips_to_delete:
|
61 |
del rate_limit_store[ip]
|
62 |
logger.debug(f"Cleaned up rate_limit_store for IP: {ip}")
|
|
|
121 |
models = [
|
122 |
default_model,
|
123 |
'blackboxai-pro',
|
124 |
+
*image_models,
|
125 |
"llama-3.1-8b",
|
126 |
'llama-3.1-70b',
|
127 |
'llama-3.1-405b',
|
|
|
142 |
'ReactAgent',
|
143 |
'XcodeAgent',
|
144 |
'AngularJSAgent',
|
|
|
|
|
145 |
]
|
146 |
|
147 |
# Filter models based on AVAILABLE_MODELS
|
|
|
150 |
|
151 |
agentMode = {
|
152 |
'ImageGeneration': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"},
|
|
|
153 |
}
|
154 |
trendingAgentMode = {
|
155 |
"blackboxai": {},
|
|
|
198 |
'AngularJSAgent': '@AngularJS Agent',
|
199 |
'blackboxai-pro': '@BLACKBOXAI-PRO',
|
200 |
'ImageGeneration': '@Image Generation',
|
|
|
201 |
}
|
202 |
|
203 |
model_referers = {
|
204 |
+
"blackboxai": "/?model=blackboxai",
|
205 |
+
"gpt-4o": "/?model=gpt-4o",
|
206 |
+
"gemini-pro": "/?model=gemini-pro",
|
207 |
+
"claude-sonnet-3.5": "/?model=claude-sonnet-3.5"
|
208 |
}
|
209 |
|
210 |
model_aliases = {
|
211 |
"gemini-flash": "gemini-1.5-flash",
|
212 |
"claude-3.5-sonnet": "claude-sonnet-3.5",
|
213 |
"flux": "ImageGeneration",
|
|
|
214 |
}
|
215 |
|
216 |
@classmethod
|
217 |
+
def get_model(cls, model: str) -> str:
|
218 |
if model in cls.models:
|
219 |
return model
|
220 |
+
elif model in cls.model_aliases:
|
|
|
|
|
221 |
return cls.model_aliases[model]
|
222 |
else:
|
223 |
+
return cls.default_model
|
224 |
+
|
225 |
+
@staticmethod
|
226 |
+
def generate_random_string(length: int = 7) -> str:
|
227 |
+
characters = string.ascii_letters + string.digits
|
228 |
+
return ''.join(random.choices(characters, k=length))
|
229 |
+
|
230 |
+
@staticmethod
|
231 |
+
def generate_next_action() -> str:
|
232 |
+
return uuid.uuid4().hex
|
233 |
+
|
234 |
+
@staticmethod
|
235 |
+
def generate_next_router_state_tree() -> str:
|
236 |
+
router_state = [
|
237 |
+
"",
|
238 |
+
{
|
239 |
+
"children": [
|
240 |
+
"(chat)",
|
241 |
+
{
|
242 |
+
"children": [
|
243 |
+
"__PAGE__",
|
244 |
+
{}
|
245 |
+
]
|
246 |
+
}
|
247 |
+
]
|
248 |
+
},
|
249 |
+
None,
|
250 |
+
None,
|
251 |
+
True
|
252 |
+
]
|
253 |
+
return json.dumps(router_state)
|
254 |
+
|
255 |
+
@staticmethod
|
256 |
+
def clean_response(text: str) -> str:
|
257 |
+
pattern = r'^\$\@\$v=undefined-rv1\$\@\$'
|
258 |
+
cleaned_text = re.sub(pattern, '', text)
|
259 |
+
return cleaned_text
|
260 |
|
261 |
@classmethod
|
262 |
async def create_async_generator(
|
|
|
264 |
model: str,
|
265 |
messages: List[Dict[str, str]],
|
266 |
proxy: Optional[str] = None,
|
267 |
+
websearch: bool = False,
|
|
|
|
|
268 |
**kwargs
|
269 |
+
) -> AsyncGenerator[Union[str, ImageResponse], None]:
|
270 |
+
"""
|
271 |
+
Creates an asynchronous generator for streaming responses from Blackbox AI.
|
272 |
+
|
273 |
+
Parameters:
|
274 |
+
model (str): Model to use for generating responses.
|
275 |
+
messages (List[Dict[str, str]]): Message history.
|
276 |
+
proxy (Optional[str]): Proxy URL, if needed.
|
277 |
+
websearch (bool): Enables or disables web search mode.
|
278 |
+
**kwargs: Additional keyword arguments.
|
279 |
+
|
280 |
+
Yields:
|
281 |
+
Union[str, ImageResponse]: Segments of the generated response or ImageResponse objects.
|
282 |
+
"""
|
283 |
model = cls.get_model(model)
|
|
|
|
|
|
|
284 |
|
285 |
+
chat_id = cls.generate_random_string()
|
286 |
+
next_action = cls.generate_next_action()
|
287 |
+
next_router_state_tree = cls.generate_next_router_state_tree()
|
288 |
|
289 |
+
agent_mode = cls.agentMode.get(model, {})
|
290 |
+
trending_agent_mode = cls.trendingAgentMode.get(model, {})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
291 |
|
292 |
+
prefix = cls.model_prefixes.get(model, "")
|
|
|
|
|
|
|
|
|
293 |
|
294 |
+
formatted_prompt = ""
|
295 |
+
for message in messages:
|
296 |
+
role = message.get('role', '').capitalize()
|
297 |
+
content = message.get('content', '')
|
298 |
+
if role and content:
|
299 |
+
formatted_prompt += f"{role}: {content}\n"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
300 |
|
301 |
+
if prefix:
|
302 |
+
formatted_prompt = f"{prefix} {formatted_prompt}".strip()
|
303 |
+
|
304 |
+
referer_path = cls.model_referers.get(model, f"/?model={model}")
|
305 |
+
referer_url = f"{cls.url}{referer_path}"
|
306 |
+
|
307 |
+
common_headers = {
|
308 |
+
'accept': '*/*',
|
309 |
+
'accept-language': 'en-US,en;q=0.9',
|
310 |
+
'cache-control': 'no-cache',
|
311 |
+
'origin': cls.url,
|
312 |
+
'pragma': 'no-cache',
|
313 |
+
'priority': 'u=1, i',
|
314 |
+
'sec-ch-ua': '"Chromium";v="129", "Not=A?Brand";v="8"',
|
315 |
+
'sec-ch-ua-mobile': '?0',
|
316 |
+
'sec-ch-ua-platform': '"Linux"',
|
317 |
+
'sec-fetch-dest': 'empty',
|
318 |
+
'sec-fetch-mode': 'cors',
|
319 |
+
'sec-fetch-site': 'same-origin',
|
320 |
+
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) '
|
321 |
+
'AppleWebKit/537.36 (KHTML, like Gecko) '
|
322 |
+
'Chrome/129.0.0.0 Safari/537.36'
|
323 |
+
}
|
324 |
+
|
325 |
+
headers_api_chat = {
|
326 |
+
'Content-Type': 'application/json',
|
327 |
+
'Referer': referer_url
|
328 |
+
}
|
329 |
+
headers_api_chat_combined = {**common_headers, **headers_api_chat}
|
330 |
+
|
331 |
+
payload_api_chat = {
|
332 |
+
"messages": [
|
333 |
+
{
|
334 |
+
"id": chat_id,
|
335 |
+
"content": formatted_prompt,
|
336 |
+
"role": "user"
|
337 |
+
}
|
338 |
+
],
|
339 |
+
"id": chat_id,
|
340 |
"previewToken": None,
|
341 |
"userId": None,
|
342 |
"codeModelMode": True,
|
343 |
+
"agentMode": agent_mode,
|
344 |
+
"trendingAgentMode": trending_agent_mode,
|
345 |
"isMicMode": False,
|
346 |
"userSystemPrompt": None,
|
347 |
+
"maxTokens": 1024,
|
348 |
"playgroundTopP": 0.9,
|
349 |
"playgroundTemperature": 0.5,
|
350 |
"isChromeExt": False,
|
|
|
354 |
"clickedForceWebSearch": False,
|
355 |
"visitFromDelta": False,
|
356 |
"mobileClient": False,
|
357 |
+
"webSearchMode": websearch,
|
358 |
+
"userSelectedModel": cls.userSelectedModel.get(model, model)
|
359 |
}
|
360 |
|
361 |
+
headers_chat = {
|
362 |
+
'Accept': 'text/x-component',
|
363 |
+
'Content-Type': 'text/plain;charset=UTF-8',
|
364 |
+
'Referer': f'{cls.url}/chat/{chat_id}?model={model}',
|
365 |
+
'next-action': next_action,
|
366 |
+
'next-router-state-tree': next_router_state_tree,
|
367 |
+
'next-url': '/'
|
368 |
+
}
|
369 |
+
headers_chat_combined = {**common_headers, **headers_chat}
|
370 |
|
371 |
+
data_chat = '[]'
|
|
|
372 |
|
373 |
+
async with ClientSession(headers=common_headers) as session:
|
374 |
try:
|
375 |
+
async with session.post(
|
376 |
+
cls.api_endpoint,
|
377 |
+
headers=headers_api_chat_combined,
|
378 |
+
json=payload_api_chat,
|
379 |
+
proxy=proxy
|
380 |
+
) as response_api_chat:
|
381 |
+
response_api_chat.raise_for_status()
|
382 |
+
text = await response_api_chat.text()
|
383 |
+
cleaned_response = cls.clean_response(text)
|
384 |
+
|
385 |
+
if model in cls.image_models:
|
386 |
+
match = re.search(r'!\[.*?\]\((https?://[^\)]+)\)', cleaned_response)
|
387 |
+
if match:
|
388 |
+
image_url = match.group(1)
|
389 |
+
image_response = ImageResponse(url=image_url, alt="Generated Image")
|
390 |
+
yield image_response
|
391 |
+
else:
|
392 |
+
yield cleaned_response
|
393 |
+
else:
|
394 |
+
if websearch:
|
395 |
+
match = re.search(r'\$~~~\$(.*?)\$~~~\$', cleaned_response, re.DOTALL)
|
396 |
+
if match:
|
397 |
+
source_part = match.group(1).strip()
|
398 |
+
answer_part = cleaned_response[match.end():].strip()
|
399 |
+
try:
|
400 |
+
sources = json.loads(source_part)
|
401 |
+
source_formatted = "**Sources:**\n"
|
402 |
+
for item in sources:
|
403 |
+
title = item.get('title', 'No Title')
|
404 |
+
link = item.get('link', '#')
|
405 |
+
position = item.get('position', '')
|
406 |
+
source_formatted += f"{position}. [{title}]({link})\n"
|
407 |
+
final_response = f"{answer_part}\n\n{source_formatted}"
|
408 |
+
except json.JSONDecodeError:
|
409 |
+
final_response = f"{answer_part}\n\nSource information is unavailable."
|
410 |
else:
|
411 |
+
final_response = cleaned_response
|
|
|
412 |
else:
|
413 |
+
if '$~~~$' in cleaned_response:
|
414 |
+
final_response = cleaned_response.split('$~~~$')[0].strip()
|
415 |
+
else:
|
416 |
+
final_response = cleaned_response
|
417 |
+
|
418 |
+
yield final_response
|
419 |
+
except ClientResponseError as e:
|
420 |
+
error_text = f"Error {e.status}: {e.message}"
|
421 |
+
try:
|
422 |
+
error_response = await e.response.text()
|
423 |
+
cleaned_error = cls.clean_response(error_response)
|
424 |
+
error_text += f" - {cleaned_error}"
|
425 |
+
except Exception:
|
426 |
+
pass
|
427 |
+
yield error_text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
428 |
except Exception as e:
|
429 |
+
yield f"Unexpected error during /api/chat request: {str(e)}"
|
430 |
+
|
431 |
+
chat_url = f'{cls.url}/chat/{chat_id}?model={model}'
|
432 |
+
|
433 |
+
try:
|
434 |
+
async with session.post(
|
435 |
+
chat_url,
|
436 |
+
headers=headers_chat_combined,
|
437 |
+
data=data_chat,
|
438 |
+
proxy=proxy
|
439 |
+
) as response_chat:
|
440 |
+
response_chat.raise_for_status()
|
441 |
+
pass
|
442 |
+
except ClientResponseError as e:
|
443 |
+
error_text = f"Error {e.status}: {e.message}"
|
444 |
+
try:
|
445 |
+
error_response = await e.response.text()
|
446 |
+
cleaned_error = cls.clean_response(error_response)
|
447 |
+
error_text += f" - {cleaned_error}"
|
448 |
+
except Exception:
|
449 |
+
pass
|
450 |
+
yield error_text
|
451 |
+
except Exception as e:
|
452 |
+
yield f"Unexpected error during /chat/{chat_id} request: {str(e)}"
|
453 |
|
454 |
# FastAPI app setup
|
455 |
app = FastAPI()
|
|
|
534 |
"usage": None, # To be filled in non-streaming responses
|
535 |
}
|
536 |
|
537 |
+
# Existing /v1/chat/completions Endpoint
|
538 |
@app.post("/v1/chat/completions", dependencies=[Depends(rate_limiter_per_ip)])
|
539 |
async def chat_completions(request: ChatRequest, req: Request, api_key: str = Depends(get_api_key)):
|
540 |
client_ip = req.client.host
|
|
|
555 |
messages=[{"role": msg.role, "content": msg.content} for msg in request.messages], # Actual message content used here
|
556 |
image=None,
|
557 |
image_name=None,
|
558 |
+
websearch=request.webSearchMode
|
559 |
)
|
560 |
|
561 |
if request.stream:
|
|
|
675 |
logger.exception(f"An unexpected error occurred while processing the chat completions request from IP: {client_ip}.")
|
676 |
raise HTTPException(status_code=500, detail=str(e))
|
677 |
|
678 |
+
# New Web Search Endpoint
|
679 |
+
class WebSearchRequest(BaseModel):
|
680 |
+
query: str
|
681 |
+
num_results: Optional[int] = 5
|
682 |
+
|
683 |
+
@app.post("/v1/websearch", dependencies=[Depends(rate_limiter_per_ip)])
|
684 |
+
async def web_search(request: WebSearchRequest, req: Request, api_key: str = Depends(get_api_key)):
|
685 |
+
"""
|
686 |
+
Handles web search requests.
|
687 |
+
|
688 |
+
Parameters:
|
689 |
+
query (str): The search query.
|
690 |
+
num_results (int): Number of search results to return.
|
691 |
+
|
692 |
+
Returns:
|
693 |
+
JSONResponse: Contains the search results.
|
694 |
+
"""
|
695 |
+
client_ip = req.client.host
|
696 |
+
logger.info(f"Received web search request from API key: {api_key} | IP: {client_ip} | Query: {request.query} | Num Results: {request.num_results}")
|
697 |
+
|
698 |
+
# Implement your web search logic here.
|
699 |
+
# This is a mock implementation. Replace it with actual web search integration.
|
700 |
+
|
701 |
+
try:
|
702 |
+
# Mock search results
|
703 |
+
search_results = []
|
704 |
+
for i in range(1, request.num_results + 1):
|
705 |
+
search_results.append({
|
706 |
+
"position": i,
|
707 |
+
"title": f"Sample Search Result {i} for '{request.query}'",
|
708 |
+
"link": f"https://www.example.com/search-result-{i}"
|
709 |
+
})
|
710 |
+
|
711 |
+
logger.info(f"Web search completed for query: {request.query}")
|
712 |
+
return {"results": search_results}
|
713 |
+
except Exception as e:
|
714 |
+
logger.exception(f"An error occurred during web search from IP: {client_ip}.")
|
715 |
+
raise HTTPException(status_code=500, detail="An error occurred during web search.")
|
716 |
+
|
717 |
# Endpoint: POST /v1/tokenizer
|
718 |
@app.post("/v1/tokenizer", dependencies=[Depends(rate_limiter_per_ip)])
|
719 |
async def tokenizer(request: TokenizerRequest, req: Request):
|