Update main.py
Browse files
main.py
CHANGED
@@ -98,8 +98,8 @@ class ModelNotWorkingException(Exception):
|
|
98 |
|
99 |
# Define ImageResponseData class
|
100 |
class ImageResponseData:
|
101 |
-
def __init__(self,
|
102 |
-
self.
|
103 |
self.alt = alt
|
104 |
|
105 |
# Mock implementation of to_data_uri
|
@@ -140,7 +140,6 @@ class Blackbox:
|
|
140 |
'ReactAgent',
|
141 |
'XcodeAgent',
|
142 |
'AngularJSAgent',
|
143 |
-
'Niansuh',
|
144 |
]
|
145 |
|
146 |
# Filter models based on AVAILABLE_MODELS
|
@@ -266,9 +265,7 @@ class Blackbox:
|
|
266 |
model: str,
|
267 |
messages: List[Dict[str, str]],
|
268 |
proxy: Optional[str] = None,
|
269 |
-
|
270 |
-
image_name: Optional[str] = None,
|
271 |
-
webSearchMode: bool = False,
|
272 |
**kwargs
|
273 |
) -> AsyncGenerator[Union[str, ImageResponseData], None]:
|
274 |
"""
|
@@ -278,9 +275,7 @@ class Blackbox:
|
|
278 |
model (str): Model to use for generating responses.
|
279 |
messages (List[Dict[str, str]]): Message history.
|
280 |
proxy (Optional[str]): Proxy URL, if needed.
|
281 |
-
|
282 |
-
image_name (Optional[str]): Image name, if applicable.
|
283 |
-
webSearchMode (bool): Enables or disables web search mode.
|
284 |
**kwargs: Additional keyword arguments.
|
285 |
|
286 |
Yields:
|
@@ -350,7 +345,7 @@ class Blackbox:
|
|
350 |
"trendingAgentMode": trending_agent_mode,
|
351 |
"isMicMode": False,
|
352 |
"userSystemPrompt": None,
|
353 |
-
"maxTokens":
|
354 |
"playgroundTopP": 0.9,
|
355 |
"playgroundTemperature": 0.5,
|
356 |
"isChromeExt": False,
|
@@ -360,96 +355,91 @@ class Blackbox:
|
|
360 |
"clickedForceWebSearch": False,
|
361 |
"visitFromDelta": False,
|
362 |
"mobileClient": False,
|
363 |
-
"webSearchMode":
|
364 |
-
"userSelectedModel":
|
365 |
}
|
366 |
|
367 |
-
|
368 |
-
|
369 |
-
|
370 |
-
|
371 |
-
|
372 |
-
|
373 |
-
|
374 |
-
|
|
|
375 |
|
376 |
-
|
377 |
-
retry_attempts = 10 # Set the number of retry attempts
|
378 |
|
379 |
async with ClientSession(headers=common_headers) as session:
|
380 |
-
|
381 |
-
|
382 |
-
|
383 |
-
|
384 |
-
|
385 |
-
|
386 |
-
|
387 |
-
|
388 |
-
|
389 |
-
|
390 |
-
|
391 |
-
|
392 |
-
|
393 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
394 |
else:
|
395 |
-
|
396 |
-
raise Exception("Image URL not found in the response")
|
397 |
else:
|
398 |
-
|
399 |
-
|
400 |
-
# Handle streaming-like responses if applicable
|
401 |
-
# Assuming the response contains '$~~~$' for search results
|
402 |
-
if webSearchMode:
|
403 |
-
match = re.search(r'\$~~~\$(.*?)\$~~~\$', cleaned_response, re.DOTALL)
|
404 |
-
if match:
|
405 |
-
search_results_json = match.group(1)
|
406 |
-
answer_part = cleaned_response[:match.start()].strip()
|
407 |
-
try:
|
408 |
-
search_results = json.loads(search_results_json)
|
409 |
-
formatted_results = "\n\n**Sources:**\n"
|
410 |
-
for i, result in enumerate(search_results[:5], 1):
|
411 |
-
formatted_results += f"{i}. [{result['title']}]({result['link']})\n"
|
412 |
-
final_response = f"{answer_part}\n\n{formatted_results}"
|
413 |
-
yield final_response
|
414 |
-
except json.JSONDecodeError as je:
|
415 |
-
logger.error("Failed to parse search results JSON.")
|
416 |
-
yield f"{cleaned_response}\n\n**Sources:** Information unavailable."
|
417 |
-
else:
|
418 |
-
yield cleaned_response
|
419 |
else:
|
420 |
-
|
421 |
-
break # Exit the retry loop if successful
|
422 |
-
except ClientError as ce:
|
423 |
-
logger.error(f"Client error occurred: {ce}. Retrying attempt {attempt + 1}/{retry_attempts}")
|
424 |
-
if attempt == retry_attempts - 1:
|
425 |
-
raise HTTPException(status_code=502, detail="Error communicating with the external API.")
|
426 |
-
except asyncio.TimeoutError:
|
427 |
-
logger.error(f"Request timed out. Retrying attempt {attempt + 1}/{retry_attempts}")
|
428 |
-
if attempt == retry_attempts - 1:
|
429 |
-
raise HTTPException(status_code=504, detail="External API request timed out.")
|
430 |
-
except Exception as e:
|
431 |
-
logger.error(f"Unexpected error: {e}. Retrying attempt {attempt + 1}/{retry_attempts}")
|
432 |
-
if attempt == retry_attempts - 1:
|
433 |
-
raise HTTPException(status_code=500, detail=str(e))
|
434 |
|
435 |
-
|
436 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
437 |
|
438 |
-
|
439 |
-
'Accept': 'text/x-component',
|
440 |
-
'Content-Type': 'text/plain;charset=UTF-8',
|
441 |
-
'Referer': f'{cls.url}/chat/{chat_id}?model={model}',
|
442 |
-
'next-action': cls.generate_next_action(),
|
443 |
-
'next-router-state-tree': cls.generate_next_router_state_tree(),
|
444 |
-
'next-url': '/'
|
445 |
-
}
|
446 |
-
headers_chat_combined = {**common_headers, **headers_chat}
|
447 |
-
data_chat = '[]'
|
448 |
|
449 |
try:
|
450 |
-
async with session.post(
|
|
|
|
|
|
|
|
|
|
|
451 |
response_chat.raise_for_status()
|
452 |
-
|
453 |
except ClientResponseError as e:
|
454 |
error_text = f"Error {e.status}: {e.message}"
|
455 |
try:
|
@@ -458,12 +448,9 @@ class Blackbox:
|
|
458 |
error_text += f" - {cleaned_error}"
|
459 |
except Exception:
|
460 |
pass
|
461 |
-
logger.error(error_text)
|
462 |
yield error_text
|
463 |
except Exception as e:
|
464 |
-
|
465 |
-
logger.error(error_text)
|
466 |
-
yield error_text
|
467 |
|
468 |
# FastAPI app setup
|
469 |
app = FastAPI()
|
@@ -515,7 +502,7 @@ class ChatRequest(BaseModel):
|
|
515 |
frequency_penalty: Optional[float] = 0.0
|
516 |
logit_bias: Optional[Dict[str, float]] = None
|
517 |
user: Optional[str] = None
|
518 |
-
|
519 |
|
520 |
class TokenizerRequest(BaseModel):
|
521 |
text: str
|
@@ -566,7 +553,7 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
|
|
566 |
async_generator = Blackbox.create_async_generator(
|
567 |
model=request.model,
|
568 |
messages=[{"role": msg.role, "content": msg.content} for msg in request.messages], # Actual message content used here
|
569 |
-
|
570 |
)
|
571 |
|
572 |
if request.stream:
|
@@ -576,27 +563,46 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
|
|
576 |
async for chunk in async_generator:
|
577 |
if isinstance(chunk, ImageResponseData):
|
578 |
# Handle image responses if necessary
|
579 |
-
image_markdown = f"}",
|
587 |
-
"object": "chat.completion
|
588 |
"created": int(datetime.now().timestamp()),
|
589 |
"model": request.model,
|
590 |
"choices": [
|
591 |
{
|
592 |
"index": 0,
|
593 |
-
"
|
594 |
-
|
|
|
|
|
|
|
595 |
}
|
596 |
],
|
597 |
"usage": None, # Usage can be updated if you track tokens in real-time
|
598 |
}
|
599 |
-
yield f"
|
600 |
|
601 |
# After all chunks are sent, send the final message with finish_reason
|
602 |
prompt_tokens = sum(len(msg.content.split()) for msg in request.messages)
|
@@ -626,22 +632,22 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
|
|
626 |
"estimated_cost": estimated_cost
|
627 |
},
|
628 |
}
|
629 |
-
yield f"
|
630 |
-
yield "
|
631 |
except HTTPException as he:
|
632 |
error_response = {"error": he.detail}
|
633 |
-
yield f"
|
634 |
except Exception as e:
|
635 |
logger.exception(f"Error during streaming response generation from IP: {client_ip}.")
|
636 |
error_response = {"error": str(e)}
|
637 |
-
yield f"
|
638 |
|
639 |
-
return StreamingResponse(generate(), media_type="text/
|
640 |
else:
|
641 |
response_content = ""
|
642 |
async for chunk in async_generator:
|
643 |
if isinstance(chunk, ImageResponseData):
|
644 |
-
response_content += f":
|
102 |
+
self.images = images
|
103 |
self.alt = alt
|
104 |
|
105 |
# Mock implementation of to_data_uri
|
|
|
140 |
'ReactAgent',
|
141 |
'XcodeAgent',
|
142 |
'AngularJSAgent',
|
|
|
143 |
]
|
144 |
|
145 |
# Filter models based on AVAILABLE_MODELS
|
|
|
265 |
model: str,
|
266 |
messages: List[Dict[str, str]],
|
267 |
proxy: Optional[str] = None,
|
268 |
+
websearch: bool = False,
|
|
|
|
|
269 |
**kwargs
|
270 |
) -> AsyncGenerator[Union[str, ImageResponseData], None]:
|
271 |
"""
|
|
|
275 |
model (str): Model to use for generating responses.
|
276 |
messages (List[Dict[str, str]]): Message history.
|
277 |
proxy (Optional[str]): Proxy URL, if needed.
|
278 |
+
websearch (bool): Enables or disables web search mode.
|
|
|
|
|
279 |
**kwargs: Additional keyword arguments.
|
280 |
|
281 |
Yields:
|
|
|
345 |
"trendingAgentMode": trending_agent_mode,
|
346 |
"isMicMode": False,
|
347 |
"userSystemPrompt": None,
|
348 |
+
"maxTokens": 1024,
|
349 |
"playgroundTopP": 0.9,
|
350 |
"playgroundTemperature": 0.5,
|
351 |
"isChromeExt": False,
|
|
|
355 |
"clickedForceWebSearch": False,
|
356 |
"visitFromDelta": False,
|
357 |
"mobileClient": False,
|
358 |
+
"webSearchMode": websearch,
|
359 |
+
"userSelectedModel": cls.userSelectedModel.get(model, model)
|
360 |
}
|
361 |
|
362 |
+
headers_chat = {
|
363 |
+
'Accept': 'text/x-component',
|
364 |
+
'Content-Type': 'text/plain;charset=UTF-8',
|
365 |
+
'Referer': f'{cls.url}/chat/{chat_id}?model={model}',
|
366 |
+
'next-action': next_action,
|
367 |
+
'next-router-state-tree': next_router_state_tree,
|
368 |
+
'next-url': '/'
|
369 |
+
}
|
370 |
+
headers_chat_combined = {**common_headers, **headers_chat}
|
371 |
|
372 |
+
data_chat = '[]'
|
|
|
373 |
|
374 |
async with ClientSession(headers=common_headers) as session:
|
375 |
+
try:
|
376 |
+
async with session.post(
|
377 |
+
cls.api_endpoint,
|
378 |
+
headers=headers_api_chat_combined,
|
379 |
+
json=payload_api_chat,
|
380 |
+
proxy=proxy
|
381 |
+
) as response_api_chat:
|
382 |
+
response_api_chat.raise_for_status()
|
383 |
+
text = await response_api_chat.text()
|
384 |
+
cleaned_response = cls.clean_response(text)
|
385 |
+
|
386 |
+
if model in cls.image_models:
|
387 |
+
match = re.search(r'!\[.*?\]\((https?://[^\)]+)\)', cleaned_response)
|
388 |
+
if match:
|
389 |
+
image_url = match.group(1)
|
390 |
+
image_response = ImageResponseData(images=image_url, alt="Generated Image")
|
391 |
+
yield image_response
|
392 |
+
else:
|
393 |
+
yield cleaned_response
|
394 |
+
else:
|
395 |
+
if websearch:
|
396 |
+
match = re.search(r'\$~~~\$(.*?)\$~~~\$', cleaned_response, re.DOTALL)
|
397 |
+
if match:
|
398 |
+
source_part = match.group(1).strip()
|
399 |
+
answer_part = cleaned_response[match.end():].strip()
|
400 |
+
try:
|
401 |
+
sources = json.loads(source_part)
|
402 |
+
source_formatted = "**Sources:**\n"
|
403 |
+
for item in sources[:5]:
|
404 |
+
title = item.get('title', 'No Title')
|
405 |
+
link = item.get('link', '#')
|
406 |
+
position = item.get('position', '')
|
407 |
+
source_formatted += f"- [{title}]({link})\n"
|
408 |
+
final_response = f"{answer_part}\n\n{source_formatted}"
|
409 |
+
except json.JSONDecodeError:
|
410 |
+
final_response = f"{answer_part}\n\nSource information is unavailable."
|
411 |
else:
|
412 |
+
final_response = cleaned_response
|
|
|
413 |
else:
|
414 |
+
if '$~~~$' in cleaned_response:
|
415 |
+
final_response = cleaned_response.split('$~~~$')[0].strip()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
416 |
else:
|
417 |
+
final_response = cleaned_response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
418 |
|
419 |
+
yield final_response
|
420 |
+
except ClientResponseError as e:
|
421 |
+
error_text = f"Error {e.status}: {e.message}"
|
422 |
+
try:
|
423 |
+
error_response = await e.response.text()
|
424 |
+
cleaned_error = cls.clean_response(error_response)
|
425 |
+
error_text += f" - {cleaned_error}"
|
426 |
+
except Exception:
|
427 |
+
pass
|
428 |
+
yield error_text
|
429 |
+
except Exception as e:
|
430 |
+
yield f"Unexpected error during /api/chat request: {str(e)}"
|
431 |
|
432 |
+
chat_url = f'{cls.url}/chat/{chat_id}?model={model}'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
433 |
|
434 |
try:
|
435 |
+
async with session.post(
|
436 |
+
chat_url,
|
437 |
+
headers=headers_chat_combined,
|
438 |
+
data=data_chat,
|
439 |
+
proxy=proxy
|
440 |
+
) as response_chat:
|
441 |
response_chat.raise_for_status()
|
442 |
+
# No action needed based on the original code
|
443 |
except ClientResponseError as e:
|
444 |
error_text = f"Error {e.status}: {e.message}"
|
445 |
try:
|
|
|
448 |
error_text += f" - {cleaned_error}"
|
449 |
except Exception:
|
450 |
pass
|
|
|
451 |
yield error_text
|
452 |
except Exception as e:
|
453 |
+
yield f"Unexpected error during /chat/{chat_id} request: {str(e)}"
|
|
|
|
|
454 |
|
455 |
# FastAPI app setup
|
456 |
app = FastAPI()
|
|
|
502 |
frequency_penalty: Optional[float] = 0.0
|
503 |
logit_bias: Optional[Dict[str, float]] = None
|
504 |
user: Optional[str] = None
|
505 |
+
websearch: Optional[bool] = False # Custom parameter
|
506 |
|
507 |
class TokenizerRequest(BaseModel):
|
508 |
text: str
|
|
|
553 |
async_generator = Blackbox.create_async_generator(
|
554 |
model=request.model,
|
555 |
messages=[{"role": msg.role, "content": msg.content} for msg in request.messages], # Actual message content used here
|
556 |
+
websearch=request.websearch
|
557 |
)
|
558 |
|
559 |
if request.stream:
|
|
|
563 |
async for chunk in async_generator:
|
564 |
if isinstance(chunk, ImageResponseData):
|
565 |
# Handle image responses if necessary
|
566 |
+
image_markdown = f"\n"
|
567 |
assistant_content += image_markdown
|
568 |
+
response_chunk = {
|
569 |
+
"id": f"chatcmpl-{uuid.uuid4()}",
|
570 |
+
"object": "chat.completion",
|
571 |
+
"created": int(datetime.now().timestamp()),
|
572 |
+
"model": request.model,
|
573 |
+
"choices": [
|
574 |
+
{
|
575 |
+
"index": 0,
|
576 |
+
"message": {
|
577 |
+
"role": "assistant",
|
578 |
+
"content": image_markdown
|
579 |
+
},
|
580 |
+
"finish_reason": None
|
581 |
+
}
|
582 |
+
],
|
583 |
+
"usage": None, # Usage can be updated if you track tokens in real-time
|
584 |
+
}
|
585 |
else:
|
586 |
assistant_content += chunk
|
587 |
# Yield the chunk as a partial choice
|
588 |
response_chunk = {
|
589 |
"id": f"chatcmpl-{uuid.uuid4()}",
|
590 |
+
"object": "chat.completion",
|
591 |
"created": int(datetime.now().timestamp()),
|
592 |
"model": request.model,
|
593 |
"choices": [
|
594 |
{
|
595 |
"index": 0,
|
596 |
+
"message": {
|
597 |
+
"role": "assistant",
|
598 |
+
"content": chunk
|
599 |
+
},
|
600 |
+
"finish_reason": None
|
601 |
}
|
602 |
],
|
603 |
"usage": None, # Usage can be updated if you track tokens in real-time
|
604 |
}
|
605 |
+
yield f"{json.dumps(response_chunk)}\n\n"
|
606 |
|
607 |
# After all chunks are sent, send the final message with finish_reason
|
608 |
prompt_tokens = sum(len(msg.content.split()) for msg in request.messages)
|
|
|
632 |
"estimated_cost": estimated_cost
|
633 |
},
|
634 |
}
|
635 |
+
yield f"{json.dumps(final_response)}\n\n"
|
636 |
+
yield "DONE\n\n"
|
637 |
except HTTPException as he:
|
638 |
error_response = {"error": he.detail}
|
639 |
+
yield f"{json.dumps(error_response)}\n\n"
|
640 |
except Exception as e:
|
641 |
logger.exception(f"Error during streaming response generation from IP: {client_ip}.")
|
642 |
error_response = {"error": str(e)}
|
643 |
+
yield f"{json.dumps(error_response)}\n\n"
|
644 |
|
645 |
+
return StreamingResponse(generate(), media_type="text/plain")
|
646 |
else:
|
647 |
response_content = ""
|
648 |
async for chunk in async_generator:
|
649 |
if isinstance(chunk, ImageResponseData):
|
650 |
+
response_content += f"\n"
|
651 |
else:
|
652 |
response_content += chunk
|
653 |
|