Niansuh commited on
Commit
efe8643
·
verified ·
1 Parent(s): 7725f9f

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +96 -83
main.py CHANGED
@@ -96,10 +96,10 @@ class ModelNotWorkingException(Exception):
96
  self.message = f"The model '{model}' is currently not working. Please try another model or wait for it to be fixed."
97
  super().__init__(self.message)
98
 
99
- # Define ImageResponse class
100
  class ImageResponseData:
101
- def __init__(self, images: str, alt: str):
102
- self.images = images
103
  self.alt = alt
104
 
105
  # Mock implementation of to_data_uri
@@ -140,6 +140,7 @@ class Blackbox:
140
  'ReactAgent',
141
  'XcodeAgent',
142
  'AngularJSAgent',
 
143
  ]
144
 
145
  # Filter models based on AVAILABLE_MODELS
@@ -265,7 +266,9 @@ class Blackbox:
265
  model: str,
266
  messages: List[Dict[str, str]],
267
  proxy: Optional[str] = None,
268
- websearch: bool = False,
 
 
269
  **kwargs
270
  ) -> AsyncGenerator[Union[str, ImageResponseData], None]:
271
  """
@@ -275,7 +278,9 @@ class Blackbox:
275
  model (str): Model to use for generating responses.
276
  messages (List[Dict[str, str]]): Message history.
277
  proxy (Optional[str]): Proxy URL, if needed.
278
- websearch (bool): Enables or disables web search mode.
 
 
279
  **kwargs: Additional keyword arguments.
280
 
281
  Yields:
@@ -345,7 +350,7 @@ class Blackbox:
345
  "trendingAgentMode": trending_agent_mode,
346
  "isMicMode": False,
347
  "userSystemPrompt": None,
348
- "maxTokens": 1024,
349
  "playgroundTopP": 0.9,
350
  "playgroundTemperature": 0.5,
351
  "isChromeExt": False,
@@ -355,91 +360,96 @@ class Blackbox:
355
  "clickedForceWebSearch": False,
356
  "visitFromDelta": False,
357
  "mobileClient": False,
358
- "webSearchMode": websearch,
359
- "userSelectedModel": cls.userSelectedModel.get(model, model)
360
  }
361
 
362
- headers_chat = {
363
- 'Accept': 'text/x-component',
364
- 'Content-Type': 'text/plain;charset=UTF-8',
365
- 'Referer': f'{cls.url}/chat/{chat_id}?model={model}',
366
- 'next-action': next_action,
367
- 'next-router-state-tree': next_router_state_tree,
368
- 'next-url': '/'
369
- }
370
- headers_chat_combined = {**common_headers, **headers_chat}
371
 
372
- data_chat = '[]'
 
373
 
374
  async with ClientSession(headers=common_headers) as session:
375
- try:
376
- async with session.post(
377
- cls.api_endpoint,
378
- headers=headers_api_chat_combined,
379
- json=payload_api_chat,
380
- proxy=proxy
381
- ) as response_api_chat:
382
- response_api_chat.raise_for_status()
383
- text = await response_api_chat.text()
384
- cleaned_response = cls.clean_response(text)
385
-
386
- if model in cls.image_models:
387
- match = re.search(r'!\[.*?\]\((https?://[^\)]+)\)', cleaned_response)
388
- if match:
389
- image_url = match.group(1)
390
- image_response = ImageResponseData(images=image_url, alt="Generated Image")
391
- yield image_response
392
- else:
393
- yield cleaned_response
394
- else:
395
- if websearch:
396
- match = re.search(r'\$~~~\$(.*?)\$~~~\$', cleaned_response, re.DOTALL)
397
- if match:
398
- source_part = match.group(1).strip()
399
- answer_part = cleaned_response[match.end():].strip()
400
- try:
401
- sources = json.loads(source_part)
402
- source_formatted = "**Sources:**\n"
403
- for item in sources[:5]:
404
- title = item.get('title', 'No Title')
405
- link = item.get('link', '#')
406
- position = item.get('position', '')
407
- source_formatted += f"- [{title}]({link})\n"
408
- final_response = f"{answer_part}\n\n{source_formatted}"
409
- except json.JSONDecodeError:
410
- final_response = f"{answer_part}\n\nSource information is unavailable."
411
  else:
412
- final_response = cleaned_response
 
413
  else:
414
- if '$~~~$' in cleaned_response:
415
- final_response = cleaned_response.split('$~~~$')[0].strip()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
416
  else:
417
- final_response = cleaned_response
418
-
419
- yield final_response
420
- except ClientResponseError as e:
421
- error_text = f"Error {e.status}: {e.message}"
422
- try:
423
- error_response = await e.response.text()
424
- cleaned_error = cls.clean_response(error_response)
425
- error_text += f" - {cleaned_error}"
426
- except Exception:
427
- pass
428
- yield error_text
429
- except Exception as e:
430
- yield f"Unexpected error during /api/chat request: {str(e)}"
431
 
 
432
  chat_url = f'{cls.url}/chat/{chat_id}?model={model}'
433
 
 
 
 
 
 
 
 
 
 
 
 
434
  try:
435
- async with session.post(
436
- chat_url,
437
- headers=headers_chat_combined,
438
- data=data_chat,
439
- proxy=proxy
440
- ) as response_chat:
441
  response_chat.raise_for_status()
442
- # No action needed based on the original code
443
  except ClientResponseError as e:
444
  error_text = f"Error {e.status}: {e.message}"
445
  try:
@@ -448,9 +458,12 @@ class Blackbox:
448
  error_text += f" - {cleaned_error}"
449
  except Exception:
450
  pass
 
451
  yield error_text
452
  except Exception as e:
453
- yield f"Unexpected error during /chat/{chat_id} request: {str(e)}"
 
 
454
 
455
  # FastAPI app setup
456
  app = FastAPI()
@@ -502,7 +515,7 @@ class ChatRequest(BaseModel):
502
  frequency_penalty: Optional[float] = 0.0
503
  logit_bias: Optional[Dict[str, float]] = None
504
  user: Optional[str] = None
505
- websearch: Optional[bool] = False # Custom parameter
506
 
507
  class TokenizerRequest(BaseModel):
508
  text: str
@@ -553,7 +566,7 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
553
  async_generator = Blackbox.create_async_generator(
554
  model=request.model,
555
  messages=[{"role": msg.role, "content": msg.content} for msg in request.messages], # Actual message content used here
556
- websearch=request.websearch
557
  )
558
 
559
  if request.stream:
@@ -563,7 +576,7 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
563
  async for chunk in async_generator:
564
  if isinstance(chunk, ImageResponseData):
565
  # Handle image responses if necessary
566
- image_markdown = f"![image]({chunk.images})\n"
567
  assistant_content += image_markdown
568
  response_chunk = create_response(image_markdown, request.model, finish_reason=None)
569
  else:
@@ -628,7 +641,7 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
628
  response_content = ""
629
  async for chunk in async_generator:
630
  if isinstance(chunk, ImageResponseData):
631
- response_content += f"![image]({chunk.images})\n"
632
  else:
633
  response_content += chunk
634
 
 
96
  self.message = f"The model '{model}' is currently not working. Please try another model or wait for it to be fixed."
97
  super().__init__(self.message)
98
 
99
+ # Define ImageResponseData class
100
  class ImageResponseData:
101
+ def __init__(self, url: str, alt: str):
102
+ self.url = url
103
  self.alt = alt
104
 
105
  # Mock implementation of to_data_uri
 
140
  'ReactAgent',
141
  'XcodeAgent',
142
  'AngularJSAgent',
143
+ 'Niansuh',
144
  ]
145
 
146
  # Filter models based on AVAILABLE_MODELS
 
266
  model: str,
267
  messages: List[Dict[str, str]],
268
  proxy: Optional[str] = None,
269
+ image: Any = None,
270
+ image_name: Optional[str] = None,
271
+ webSearchMode: bool = False,
272
  **kwargs
273
  ) -> AsyncGenerator[Union[str, ImageResponseData], None]:
274
  """
 
278
  model (str): Model to use for generating responses.
279
  messages (List[Dict[str, str]]): Message history.
280
  proxy (Optional[str]): Proxy URL, if needed.
281
+ image (Any): Image data, if applicable.
282
+ image_name (Optional[str]): Image name, if applicable.
283
+ webSearchMode (bool): Enables or disables web search mode.
284
  **kwargs: Additional keyword arguments.
285
 
286
  Yields:
 
350
  "trendingAgentMode": trending_agent_mode,
351
  "isMicMode": False,
352
  "userSystemPrompt": None,
353
+ "maxTokens": 99999999,
354
  "playgroundTopP": 0.9,
355
  "playgroundTemperature": 0.5,
356
  "isChromeExt": False,
 
360
  "clickedForceWebSearch": False,
361
  "visitFromDelta": False,
362
  "mobileClient": False,
363
+ "webSearchMode": webSearchMode,
364
+ "userSelectedModel": None,
365
  }
366
 
367
+ if model in cls.agentMode:
368
+ payload_api_chat["agentMode"] = cls.agentMode[model]
369
+ elif model in cls.trendingAgentMode:
370
+ payload_api_chat["trendingAgentMode"] = cls.trendingAgentMode[model]
371
+ elif model in cls.userSelectedModel:
372
+ payload_api_chat["userSelectedModel"] = cls.userSelectedModel[model]
373
+
374
+ logger.info(f"Sending request to {cls.api_endpoint} with data (excluding messages).")
 
375
 
376
+ timeout = ClientTimeout(total=60) # Set an appropriate timeout
377
+ retry_attempts = 10 # Set the number of retry attempts
378
 
379
  async with ClientSession(headers=common_headers) as session:
380
+ for attempt in range(retry_attempts):
381
+ try:
382
+ async with session.post(cls.api_endpoint, json=payload_api_chat, proxy=proxy) as response:
383
+ response.raise_for_status()
384
+ logger.info(f"Received response with status {response.status}")
385
+ response_text = await response.text()
386
+ cleaned_response = cls.clean_response(response_text)
387
+
388
+ if model in cls.image_models:
389
+ url_match = re.search(r'https://storage\.googleapis\.com/[^\s\)]+', cleaned_response)
390
+ if url_match:
391
+ image_url = url_match.group(0)
392
+ logger.info(f"Image URL found: {image_url}")
393
+ yield ImageResponseData(url=image_url, alt=messages[-1]['content'])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
394
  else:
395
+ logger.error("Image URL not found in the response.")
396
+ raise Exception("Image URL not found in the response")
397
  else:
398
+ full_response = ""
399
+ search_results_json = ""
400
+ # Handle streaming-like responses if applicable
401
+ # Assuming the response contains '$~~~$' for search results
402
+ if webSearchMode:
403
+ match = re.search(r'\$~~~\$(.*?)\$~~~\$', cleaned_response, re.DOTALL)
404
+ if match:
405
+ search_results_json = match.group(1)
406
+ answer_part = cleaned_response[:match.start()].strip()
407
+ try:
408
+ search_results = json.loads(search_results_json)
409
+ formatted_results = "\n\n**Sources:**\n"
410
+ for i, result in enumerate(search_results[:5], 1):
411
+ formatted_results += f"{i}. [{result['title']}]({result['link']})\n"
412
+ final_response = f"{answer_part}\n\n{formatted_results}"
413
+ yield final_response
414
+ except json.JSONDecodeError as je:
415
+ logger.error("Failed to parse search results JSON.")
416
+ yield f"{cleaned_response}\n\n**Sources:** Information unavailable."
417
+ else:
418
+ yield cleaned_response
419
  else:
420
+ yield cleaned_response
421
+ break # Exit the retry loop if successful
422
+ except ClientError as ce:
423
+ logger.error(f"Client error occurred: {ce}. Retrying attempt {attempt + 1}/{retry_attempts}")
424
+ if attempt == retry_attempts - 1:
425
+ raise HTTPException(status_code=502, detail="Error communicating with the external API.")
426
+ except asyncio.TimeoutError:
427
+ logger.error(f"Request timed out. Retrying attempt {attempt + 1}/{retry_attempts}")
428
+ if attempt == retry_attempts - 1:
429
+ raise HTTPException(status_code=504, detail="External API request timed out.")
430
+ except Exception as e:
431
+ logger.error(f"Unexpected error: {e}. Retrying attempt {attempt + 1}/{retry_attempts}")
432
+ if attempt == retry_attempts - 1:
433
+ raise HTTPException(status_code=500, detail=str(e))
434
 
435
+ # Additional request to /chat/{chat_id} endpoint if necessary
436
  chat_url = f'{cls.url}/chat/{chat_id}?model={model}'
437
 
438
+ headers_chat = {
439
+ 'Accept': 'text/x-component',
440
+ 'Content-Type': 'text/plain;charset=UTF-8',
441
+ 'Referer': f'{cls.url}/chat/{chat_id}?model={model}',
442
+ 'next-action': cls.generate_next_action(),
443
+ 'next-router-state-tree': cls.generate_next_router_state_tree(),
444
+ 'next-url': '/'
445
+ }
446
+ headers_chat_combined = {**common_headers, **headers_chat}
447
+ data_chat = '[]'
448
+
449
  try:
450
+ async with session.post(chat_url, headers=headers_chat_combined, data=data_chat, proxy=proxy) as response_chat:
 
 
 
 
 
451
  response_chat.raise_for_status()
452
+ logger.info(f"Successfully posted to {chat_url}")
453
  except ClientResponseError as e:
454
  error_text = f"Error {e.status}: {e.message}"
455
  try:
 
458
  error_text += f" - {cleaned_error}"
459
  except Exception:
460
  pass
461
+ logger.error(error_text)
462
  yield error_text
463
  except Exception as e:
464
+ error_text = f"Unexpected error during /chat/{chat_id} request: {str(e)}"
465
+ logger.error(error_text)
466
+ yield error_text
467
 
468
  # FastAPI app setup
469
  app = FastAPI()
 
515
  frequency_penalty: Optional[float] = 0.0
516
  logit_bias: Optional[Dict[str, float]] = None
517
  user: Optional[str] = None
518
+ webSearchMode: Optional[bool] = False # Custom parameter
519
 
520
  class TokenizerRequest(BaseModel):
521
  text: str
 
566
  async_generator = Blackbox.create_async_generator(
567
  model=request.model,
568
  messages=[{"role": msg.role, "content": msg.content} for msg in request.messages], # Actual message content used here
569
+ webSearchMode=request.webSearchMode
570
  )
571
 
572
  if request.stream:
 
576
  async for chunk in async_generator:
577
  if isinstance(chunk, ImageResponseData):
578
  # Handle image responses if necessary
579
+ image_markdown = f"![image]({chunk.url})\n"
580
  assistant_content += image_markdown
581
  response_chunk = create_response(image_markdown, request.model, finish_reason=None)
582
  else:
 
641
  response_content = ""
642
  async for chunk in async_generator:
643
  if isinstance(chunk, ImageResponseData):
644
+ response_content += f"![image]({chunk.url})\n"
645
  else:
646
  response_content += chunk
647