Niansuh commited on
Commit
62372c9
·
verified ·
1 Parent(s): 3fdbe06

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +48 -40
main.py CHANGED
@@ -99,15 +99,12 @@ class ModelNotWorkingException(Exception):
99
  self.message = f"The model '{model}' is currently not working. Please try another model or wait for it to be fixed."
100
  super().__init__(self.message)
101
 
102
- # Mock implementations for ImageResponse and to_data_uri
103
  class ImageResponse:
104
- def __init__(self, images: str, alt: str):
105
- self.images = images
106
  self.alt = alt
107
 
108
- def to_data_uri(image: Any) -> str:
109
- return "data:image/png;base64,..." # Replace with actual base64 data
110
-
111
  # Placeholder classes for AsyncGeneratorProvider and ProviderModelMixin
112
  class AsyncGeneratorProvider:
113
  pass # Implement as per your actual provider's requirements
@@ -390,31 +387,35 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
390
 
391
  async with ClientSession(headers=common_headers) as session:
392
  try:
393
- # Send initial chat request
394
  async with session.post(
395
  cls.api_endpoint,
396
  headers=headers_api_chat_combined,
397
  json=payload_api_chat,
398
- proxy=proxy
 
399
  ) as response_api_chat:
400
  response_api_chat.raise_for_status()
401
- text = await response_api_chat.text()
402
- cleaned_response = cls.clean_response(text)
403
-
404
- if model in cls.image_models:
405
- match = re.search(r'!\[.*?\]\((https?://[^\)]+)\)', cleaned_response)
406
- if match:
407
- image_url = match.group(1)
408
- image_response = ImageResponse(images=image_url, alt="Generated Image")
 
 
 
409
  yield image_response
410
- else:
411
- yield cleaned_response
412
- else:
413
  if websearch:
414
- match = re.search(r'\$~~~\$(.*?)\$~~~\$', cleaned_response, re.DOTALL)
415
- if match:
416
- source_part = match.group(1).strip()
417
- answer_part = cleaned_response[match.end():].strip()
418
  try:
419
  sources = json.loads(source_part)
420
  source_formatted = "**Sources:**\n"
@@ -426,15 +427,12 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
426
  final_response = f"{answer_part}\n\n{source_formatted}"
427
  except json.JSONDecodeError:
428
  final_response = f"{answer_part}\n\nSource information is unavailable."
429
- else:
430
- final_response = cleaned_response
431
- else:
432
- if '$~~~$' in cleaned_response:
433
- final_response = cleaned_response.split('$~~~$')[0].strip()
434
- else:
435
- final_response = cleaned_response
436
 
437
- yield final_response
 
 
438
  except ClientResponseError as e:
439
  error_text = f"Error {e.status}: {e.message}"
440
  try:
@@ -450,15 +448,21 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
450
  chat_url = f'{cls.url}/chat/{chat_id}?model={model}'
451
 
452
  try:
453
- # Send follow-up chat request
454
  async with session.post(
455
  chat_url,
456
  headers=headers_chat_combined,
457
  data=data_chat,
458
- proxy=proxy
 
459
  ) as response_chat:
460
  response_chat.raise_for_status()
461
- pass
 
 
 
 
 
462
  except ClientResponseError as e:
463
  error_text = f"Error {e.status}: {e.message}"
464
  try:
@@ -582,15 +586,19 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
582
  async def generate():
583
  try:
584
  assistant_content = ""
 
 
 
585
  async for chunk in async_generator:
586
  if isinstance(chunk, ImageResponse):
587
  # Handle image responses if necessary
588
- image_markdown = f"![image]({chunk.images})\n"
589
  assistant_content += image_markdown
590
  response_chunk = create_response(image_markdown, request.model, finish_reason=None)
 
591
  else:
 
592
  assistant_content += chunk
593
- # Yield the chunk as a partial choice
594
  response_chunk = {
595
  "id": f"chatcmpl-{uuid.uuid4()}",
596
  "object": "chat.completion.chunk",
@@ -605,9 +613,9 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
605
  ],
606
  "usage": None, # Usage can be updated if you track tokens in real-time
607
  }
608
- yield f"data: {json.dumps(response_chunk)}\n\n"
609
-
610
- # After all chunks are sent, send the final message with finish_reason
611
  prompt_tokens = sum(len(msg['content'].split()) for msg in request.messages)
612
  completion_tokens = len(assistant_content.split())
613
  total_tokens = prompt_tokens + completion_tokens
@@ -650,7 +658,7 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
650
  response_content = ""
651
  async for chunk in async_generator:
652
  if isinstance(chunk, ImageResponse):
653
- response_content += f"![image]({chunk.images})\n"
654
  else:
655
  response_content += chunk
656
 
 
99
  self.message = f"The model '{model}' is currently not working. Please try another model or wait for it to be fixed."
100
  super().__init__(self.message)
101
 
102
+ # ImageResponse class
103
  class ImageResponse:
104
+ def __init__(self, url: str, alt: str):
105
+ self.url = url
106
  self.alt = alt
107
 
 
 
 
108
  # Placeholder classes for AsyncGeneratorProvider and ProviderModelMixin
109
  class AsyncGeneratorProvider:
110
  pass # Implement as per your actual provider's requirements
 
387
 
388
  async with ClientSession(headers=common_headers) as session:
389
  try:
390
+ # Send initial chat request with streaming
391
  async with session.post(
392
  cls.api_endpoint,
393
  headers=headers_api_chat_combined,
394
  json=payload_api_chat,
395
+ proxy=proxy,
396
+ timeout=ClientTimeout(total=600), # Adjust timeout as needed
397
  ) as response_api_chat:
398
  response_api_chat.raise_for_status()
399
+
400
+ # Stream the response in chunks
401
+ async for data in response_api_chat.content.iter_chunked(1024):
402
+ decoded_data = data.decode('utf-8', errors='ignore')
403
+ cleaned_data = cls.clean_response(decoded_data)
404
+
405
+ # Check for image response
406
+ image_match = re.search(r'!\[.*?\]\((https?://[^\)]+)\)', cleaned_data)
407
+ if image_match:
408
+ image_url = image_match.group(1)
409
+ image_response = ImageResponse(url=image_url, alt="Generated Image")
410
  yield image_response
411
+ continue # Continue to the next chunk
412
+
413
+ # Check for web search sources
414
  if websearch:
415
+ source_match = re.search(r'\$~~~\$(.*?)\$~~~\$', cleaned_data, re.DOTALL)
416
+ if source_match:
417
+ source_part = source_match.group(1).strip()
418
+ answer_part = cleaned_data[source_match.end():].strip()
419
  try:
420
  sources = json.loads(source_part)
421
  source_formatted = "**Sources:**\n"
 
427
  final_response = f"{answer_part}\n\n{source_formatted}"
428
  except json.JSONDecodeError:
429
  final_response = f"{answer_part}\n\nSource information is unavailable."
430
+ yield final_response
431
+ continue # Continue to the next chunk
 
 
 
 
 
432
 
433
+ # Yield the cleaned data chunk
434
+ if cleaned_data.strip():
435
+ yield cleaned_data.strip()
436
  except ClientResponseError as e:
437
  error_text = f"Error {e.status}: {e.message}"
438
  try:
 
448
  chat_url = f'{cls.url}/chat/{chat_id}?model={model}'
449
 
450
  try:
451
+ # Send follow-up chat request (if necessary)
452
  async with session.post(
453
  chat_url,
454
  headers=headers_chat_combined,
455
  data=data_chat,
456
+ proxy=proxy,
457
+ timeout=ClientTimeout(total=600),
458
  ) as response_chat:
459
  response_chat.raise_for_status()
460
+ # If there's additional streaming data from the chat URL, handle it here
461
+ async for data in response_chat.content.iter_chunked(1024):
462
+ decoded_data = data.decode('utf-8', errors='ignore')
463
+ cleaned_data = cls.clean_response(decoded_data)
464
+ if cleaned_data.strip():
465
+ yield cleaned_data.strip()
466
  except ClientResponseError as e:
467
  error_text = f"Error {e.status}: {e.message}"
468
  try:
 
586
  async def generate():
587
  try:
588
  assistant_content = ""
589
+ prompt_tokens = 0
590
+ completion_tokens = 0
591
+
592
  async for chunk in async_generator:
593
  if isinstance(chunk, ImageResponse):
594
  # Handle image responses if necessary
595
+ image_markdown = f"![{chunk.alt}]({chunk.url})\n"
596
  assistant_content += image_markdown
597
  response_chunk = create_response(image_markdown, request.model, finish_reason=None)
598
+ yield f"data: {json.dumps(response_chunk)}\n\n"
599
  else:
600
+ # Assuming 'chunk' is a string of text
601
  assistant_content += chunk
 
602
  response_chunk = {
603
  "id": f"chatcmpl-{uuid.uuid4()}",
604
  "object": "chat.completion.chunk",
 
613
  ],
614
  "usage": None, # Usage can be updated if you track tokens in real-time
615
  }
616
+ yield f"data: {json.dumps(response_chunk)}\n\n"
617
+
618
+ # After all chunks are sent, calculate tokens and estimated cost
619
  prompt_tokens = sum(len(msg['content'].split()) for msg in request.messages)
620
  completion_tokens = len(assistant_content.split())
621
  total_tokens = prompt_tokens + completion_tokens
 
658
  response_content = ""
659
  async for chunk in async_generator:
660
  if isinstance(chunk, ImageResponse):
661
+ response_content += f"![{chunk.alt}]({chunk.url})\n"
662
  else:
663
  response_content += chunk
664