Niansuh commited on
Commit
f626b99
·
verified ·
1 Parent(s): 0705903

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +207 -178
main.py CHANGED
@@ -9,16 +9,16 @@ import asyncio
9
  import time
10
  from collections import defaultdict
11
  from typing import List, Dict, Any, Optional, Union, AsyncGenerator
 
12
 
13
  from aiohttp import ClientSession, ClientResponseError
14
  from fastapi import FastAPI, HTTPException, Request, Depends, Header
15
  from fastapi.responses import JSONResponse
16
  from pydantic import BaseModel
17
- from datetime import datetime
18
 
19
  # Configure logging
20
  logging.basicConfig(
21
- level=logging.INFO,
22
  format="%(asctime)s [%(levelname)s] %(name)s: %(message)s",
23
  handlers=[logging.StreamHandler()]
24
  )
@@ -202,167 +202,190 @@ class Blackbox:
202
  return cleaned_text
203
 
204
  @classmethod
205
- async def generate_response(
206
- cls,
207
- model: str,
208
- messages: List[Dict[str, str]],
209
- proxy: Optional[str] = None,
210
- websearch: bool = False,
211
- **kwargs
212
- ) -> Union[str, ImageResponseModel]:
213
- model = cls.get_model(model)
214
- chat_id = cls.generate_random_string()
215
- next_action = cls.generate_next_action()
216
- next_router_state_tree = cls.generate_next_router_state_tree()
217
-
218
- agent_mode = cls.agentMode.get(model, {})
219
- trending_agent_mode = cls.trendingAgentMode.get(model, {})
220
-
221
- prefix = cls.model_prefixes.get(model, "")
222
-
223
- formatted_prompt = ""
224
- for message in messages:
225
- role = message.get('role', '').capitalize()
226
- content = message.get('content', '')
227
- if role and content:
228
- formatted_prompt += f"{role}: {content}\n"
229
-
230
- if prefix:
231
- formatted_prompt = f"{prefix} {formatted_prompt}".strip()
232
-
233
- referer_path = cls.model_referers.get(model, f"/?model={model}")
234
- referer_url = f"{cls.url}{referer_path}"
235
-
236
- common_headers = {
237
- 'accept': '*/*',
238
- 'accept-language': 'en-US,en;q=0.9',
239
- 'cache-control': 'no-cache',
240
- 'origin': cls.url,
241
- 'pragma': 'no-cache',
242
- 'priority': 'u=1, i',
243
- 'sec-ch-ua': '"Chromium";v="129", "Not=A?Brand";v="8"',
244
- 'sec-ch-ua-mobile': '?0',
245
- 'sec-ch-ua-platform': '"Linux"',
246
- 'sec-fetch-dest': 'empty',
247
- 'sec-fetch-mode': 'cors',
248
- 'sec-fetch-site': 'same-origin',
249
- 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) '
250
- 'AppleWebKit/537.36 (KHTML, like Gecko) '
251
- 'Chrome/129.0.0.0 Safari/537.36'
252
- }
253
 
254
- headers_api_chat = {
255
- 'Content-Type': 'application/json',
256
- 'Referer': referer_url
257
- }
258
- headers_api_chat_combined = {**common_headers, **headers_api_chat}
259
 
260
- payload_api_chat = {
261
- "messages": [
262
- {
263
- "id": chat_id,
264
- "content": formatted_prompt,
265
- "role": "user"
266
- }
267
- ],
268
- "id": chat_id,
269
- "previewToken": None,
270
- "userId": None,
271
- "codeModelMode": True,
272
- "agentMode": agent_mode,
273
- "trendingAgentMode": trending_agent_mode,
274
- "isMicMode": False,
275
- "userSystemPrompt": None,
276
- "maxTokens": 1024,
277
- "playgroundTopP": 0.9,
278
- "playgroundTemperature": 0.5,
279
- "isChromeExt": False,
280
- "githubToken": None,
281
- "clickedAnswer2": False,
282
- "clickedAnswer3": False,
283
- "clickedForceWebSearch": False,
284
- "visitFromDelta": False,
285
- "mobileClient": False,
286
- "webSearchMode": websearch,
287
- "userSelectedModel": cls.userSelectedModel.get(model, model)
288
- }
289
 
290
- headers_chat = {
291
- 'Accept': 'text/x-component',
292
- 'Content-Type': 'text/plain;charset=UTF-8',
293
- 'Referer': f'{cls.url}/chat/{chat_id}?model={model}',
294
- 'next-action': next_action,
295
- 'next-router-state-tree': next_router_state_tree,
296
- 'next-url': '/'
297
- }
298
- headers_chat_combined = {**common_headers, **headers_chat}
299
 
300
- data_chat = '[]'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
301
 
302
- async with ClientSession(headers=common_headers) as session:
303
- try:
304
- async with session.post(
305
- cls.api_endpoint,
306
- headers=headers_api_chat_combined,
307
- json=payload_api_chat,
308
- proxy=proxy
309
- ) as response_api_chat:
310
- response_api_chat.raise_for_status()
311
- text = await response_api_chat.text()
312
- logger.debug(f"Raw response from Blackbox API: {text}") # Added logging
313
- cleaned_response = cls.clean_response(text)
314
- logger.debug(f"Cleaned response: {cleaned_response}") # Added logging
315
-
316
- if model in cls.image_models:
317
- match = re.search(r'!\[.*?\]\((https?://[^\)]+)\)', cleaned_response)
318
- if match:
319
- image_url = match.group(1)
320
- image_response = ImageResponseModel(images=image_url, alt="Generated Image")
321
- logger.debug(f"Image URL extracted: {image_url}") # Added logging
322
- return image_response
323
- else:
324
- logger.debug("No image URL found in the response.") # Added logging
325
- return cleaned_response
326
- else:
327
- if websearch:
328
- match = re.search(r'\$~~~\$(.*?)\$~~~\$', cleaned_response, re.DOTALL)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
329
  if match:
330
- source_part = match.group(1).strip()
331
- answer_part = cleaned_response[match.end():].strip()
332
- try:
333
- sources = json.loads(source_part)
334
- source_formatted = "**Source:**\n"
335
- for item in sources:
336
- title = item.get('title', 'No Title')
337
- link = item.get('link', '#')
338
- position = item.get('position', '')
339
- source_formatted += f"{position}. [{title}]({link})\n"
340
- final_response = f"{answer_part}\n\n{source_formatted}"
341
- except json.JSONDecodeError:
342
- final_response = f"{answer_part}\n\nSource information is unavailable."
343
  else:
344
- final_response = cleaned_response
 
345
  else:
346
- if '$~~~$' in cleaned_response:
347
- final_response = cleaned_response.split('$~~~$')[0].strip()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
348
  else:
349
- final_response = cleaned_response
 
 
 
350
 
351
- logger.debug(f"Final response to return: {final_response}") # Added logging
352
- return final_response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
353
  except ClientResponseError as e:
354
  error_text = f"Error {e.status}: {e.message}"
355
  try:
356
  error_response = await e.response.text()
357
  cleaned_error = cls.clean_response(error_response)
358
  error_text += f" - {cleaned_error}"
359
- logger.error(f"ClientResponseError: {error_text}") # Added logging
360
  except Exception:
361
  pass
362
  return error_text
363
  except Exception as e:
364
- logger.exception(f"Unexpected error during /api/chat request: {str(e)}") # Added logging
365
- return f"Unexpected error during /api/chat request: {str(e)}"
366
 
367
  @classmethod
368
  async def create_async_generator(
@@ -485,15 +508,19 @@ async def generate_response(
485
  ) as response_api_chat:
486
  response_api_chat.raise_for_status()
487
  text = await response_api_chat.text()
 
488
  cleaned_response = cls.clean_response(text)
 
489
 
490
  if model in cls.image_models:
491
  match = re.search(r'!\[.*?\]\((https?://[^\)]+)\)', cleaned_response)
492
  if match:
493
  image_url = match.group(1)
494
  image_response = ImageResponseModel(images=image_url, alt="Generated Image")
 
495
  yield image_response
496
  else:
 
497
  yield cleaned_response
498
  else:
499
  if websearch:
@@ -520,6 +547,7 @@ async def generate_response(
520
  else:
521
  final_response = cleaned_response
522
 
 
523
  yield final_response
524
  except ClientResponseError as e:
525
  error_text = f"Error {e.status}: {e.message}"
@@ -527,38 +555,52 @@ async def generate_response(
527
  error_response = await e.response.text()
528
  cleaned_error = cls.clean_response(error_response)
529
  error_text += f" - {cleaned_error}"
 
530
  except Exception:
531
  pass
532
  yield error_text
533
  except Exception as e:
534
  yield f"Unexpected error during /api/chat request: {str(e)}"
535
 
536
- chat_url = f'{cls.url}/chat/{chat_id}?model={model}'
537
 
 
 
 
 
 
 
 
 
 
 
 
538
  try:
539
- async with session.post(
540
- chat_url,
541
- headers=headers_chat_combined,
542
- data=data_chat,
543
- proxy=proxy
544
- ) as response_chat:
545
- response_chat.raise_for_status()
546
- pass
547
- except ClientResponseError as e:
548
- error_text = f"Error {e.status}: {e.message}"
549
- try:
550
- error_response = await e.response.text()
551
- cleaned_error = cls.clean_response(error_response)
552
- error_text += f" - {cleaned_error}"
553
- except Exception:
554
- pass
555
- yield error_text
556
- except Exception as e:
557
- yield f"Unexpected error during /chat/{chat_id} request: {str(e)}"
558
 
559
  # FastAPI app setup
560
  app = FastAPI()
561
 
 
 
 
 
 
 
 
 
 
 
 
 
 
562
  # Add the cleanup task when the app starts
563
  @app.on_event("startup")
564
  async def startup_event():
@@ -605,19 +647,6 @@ class ChatRequest(BaseModel):
605
  logit_bias: Optional[Dict[str, float]] = None
606
  user: Optional[str] = None
607
 
608
- # Rate Limiter Cleanup Task
609
- async def cleanup_rate_limit_stores():
610
- """
611
- Periodically cleans up stale entries in the rate_limit_store to prevent memory bloat.
612
- """
613
- while True:
614
- current_time = time.time()
615
- ips_to_delete = [ip for ip, value in rate_limit_store.items() if current_time - value["timestamp"] > RATE_LIMIT_WINDOW * 2]
616
- for ip in ips_to_delete:
617
- del rate_limit_store[ip]
618
- logger.debug(f"Cleaned up rate_limit_store for IP: {ip}")
619
- await asyncio.sleep(CLEANUP_INTERVAL)
620
-
621
  # Rate Limiter Dependency
622
  async def rate_limiter_per_ip(request: Request):
623
  """
@@ -682,7 +711,7 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
682
  return {
683
  "id": f"chatcmpl-{uuid.uuid4()}",
684
  "object": "chat.completion",
685
- "created": int(datetime.now().timestamp()),
686
  "model": request.model,
687
  "choices": [
688
  {
@@ -705,7 +734,7 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
705
  return {
706
  "id": f"chatcmpl-{uuid.uuid4()}",
707
  "object": "chat.completion",
708
- "created": int(datetime.now().timestamp()),
709
  "model": request.model,
710
  "choices": [
711
  {
 
9
  import time
10
  from collections import defaultdict
11
  from typing import List, Dict, Any, Optional, Union, AsyncGenerator
12
+ from datetime import datetime # <-- Added import
13
 
14
  from aiohttp import ClientSession, ClientResponseError
15
  from fastapi import FastAPI, HTTPException, Request, Depends, Header
16
  from fastapi.responses import JSONResponse
17
  from pydantic import BaseModel
 
18
 
19
  # Configure logging
20
  logging.basicConfig(
21
+ level=logging.DEBUG, # Changed to DEBUG for detailed logs
22
  format="%(asctime)s [%(levelname)s] %(name)s: %(message)s",
23
  handlers=[logging.StreamHandler()]
24
  )
 
202
  return cleaned_text
203
 
204
  @classmethod
205
+ async def generate_response(
206
+ cls,
207
+ model: str,
208
+ messages: List[Dict[str, str]],
209
+ proxy: Optional[str] = None,
210
+ websearch: bool = False,
211
+ **kwargs
212
+ ) -> Union[str, ImageResponseModel]:
213
+ model = cls.get_model(model)
214
+ chat_id = cls.generate_random_string()
215
+ next_action = cls.generate_next_action()
216
+ next_router_state_tree = cls.generate_next_router_state_tree()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
217
 
218
+ agent_mode = cls.agentMode.get(model, {})
219
+ trending_agent_mode = cls.trendingAgentMode.get(model, {})
 
 
 
220
 
221
+ prefix = cls.model_prefixes.get(model, "")
222
+
223
+ formatted_prompt = ""
224
+ for message in messages:
225
+ role = message.get('role', '').capitalize()
226
+ content = message.get('content', '')
227
+ if role and content:
228
+ formatted_prompt += f"{role}: {content}\n"
229
+
230
+ if prefix:
231
+ formatted_prompt = f"{prefix} {formatted_prompt}".strip()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
232
 
233
+ referer_path = cls.model_referers.get(model, f"/?model={model}")
234
+ referer_url = f"{cls.url}{referer_path}"
 
 
 
 
 
 
 
235
 
236
+ common_headers = {
237
+ 'accept': '*/*',
238
+ 'accept-language': 'en-US,en;q=0.9',
239
+ 'cache-control': 'no-cache',
240
+ 'origin': cls.url,
241
+ 'pragma': 'no-cache',
242
+ 'priority': 'u=1, i',
243
+ 'sec-ch-ua': '"Chromium";v="129", "Not=A?Brand";v="8"',
244
+ 'sec-ch-ua-mobile': '?0',
245
+ 'sec-ch-ua-platform': '"Linux"',
246
+ 'sec-fetch-dest': 'empty',
247
+ 'sec-fetch-mode': 'cors',
248
+ 'sec-fetch-site': 'same-origin',
249
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) '
250
+ 'AppleWebKit/537.36 (KHTML, like Gecko) '
251
+ 'Chrome/129.0.0.0 Safari/537.36'
252
+ }
253
 
254
+ headers_api_chat = {
255
+ 'Content-Type': 'application/json',
256
+ 'Referer': referer_url
257
+ }
258
+ headers_api_chat_combined = {**common_headers, **headers_api_chat}
259
+
260
+ payload_api_chat = {
261
+ "messages": [
262
+ {
263
+ "id": chat_id,
264
+ "content": formatted_prompt,
265
+ "role": "user"
266
+ }
267
+ ],
268
+ "id": chat_id,
269
+ "previewToken": None,
270
+ "userId": None,
271
+ "codeModelMode": True,
272
+ "agentMode": agent_mode,
273
+ "trendingAgentMode": trending_agent_mode,
274
+ "isMicMode": False,
275
+ "userSystemPrompt": None,
276
+ "maxTokens": 1024,
277
+ "playgroundTopP": 0.9,
278
+ "playgroundTemperature": 0.5,
279
+ "isChromeExt": False,
280
+ "githubToken": None,
281
+ "clickedAnswer2": False,
282
+ "clickedAnswer3": False,
283
+ "clickedForceWebSearch": False,
284
+ "visitFromDelta": False,
285
+ "mobileClient": False,
286
+ "webSearchMode": websearch,
287
+ "userSelectedModel": cls.userSelectedModel.get(model, model)
288
+ }
289
+
290
+ headers_chat = {
291
+ 'Accept': 'text/x-component',
292
+ 'Content-Type': 'text/plain;charset=UTF-8',
293
+ 'Referer': f'{cls.url}/chat/{chat_id}?model={model}',
294
+ 'next-action': next_action,
295
+ 'next-router-state-tree': next_router_state_tree,
296
+ 'next-url': '/'
297
+ }
298
+ headers_chat_combined = {**common_headers, **headers_chat}
299
+
300
+ data_chat = '[]'
301
+
302
+ async with ClientSession(headers=common_headers) as session:
303
+ try:
304
+ async with session.post(
305
+ cls.api_endpoint,
306
+ headers=headers_api_chat_combined,
307
+ json=payload_api_chat,
308
+ proxy=proxy
309
+ ) as response_api_chat:
310
+ response_api_chat.raise_for_status()
311
+ text = await response_api_chat.text()
312
+ logger.debug(f"Raw response from Blackbox API: {text}") # Added logging
313
+ cleaned_response = cls.clean_response(text)
314
+ logger.debug(f"Cleaned response: {cleaned_response}") # Added logging
315
+
316
+ if model in cls.image_models:
317
+ match = re.search(r'!\[.*?\]\((https?://[^\)]+)\)', cleaned_response)
318
  if match:
319
+ image_url = match.group(1)
320
+ image_response = ImageResponseModel(images=image_url, alt="Generated Image")
321
+ logger.debug(f"Image URL extracted: {image_url}") # Added logging
322
+ return image_response
 
 
 
 
 
 
 
 
 
323
  else:
324
+ logger.debug("No image URL found in the response.") # Added logging
325
+ return cleaned_response
326
  else:
327
+ if websearch:
328
+ match = re.search(r'\$~~~\$(.*?)\$~~~\$', cleaned_response, re.DOTALL)
329
+ if match:
330
+ source_part = match.group(1).strip()
331
+ answer_part = cleaned_response[match.end():].strip()
332
+ try:
333
+ sources = json.loads(source_part)
334
+ source_formatted = "**Source:**\n"
335
+ for item in sources:
336
+ title = item.get('title', 'No Title')
337
+ link = item.get('link', '#')
338
+ position = item.get('position', '')
339
+ source_formatted += f"{position}. [{title}]({link})\n"
340
+ final_response = f"{answer_part}\n\n{source_formatted}"
341
+ except json.JSONDecodeError:
342
+ final_response = f"{answer_part}\n\nSource information is unavailable."
343
+ else:
344
+ final_response = cleaned_response
345
  else:
346
+ if '$~~~$' in cleaned_response:
347
+ final_response = cleaned_response.split('$~~~$')[0].strip()
348
+ else:
349
+ final_response = cleaned_response
350
 
351
+ logger.debug(f"Final response to return: {final_response}") # Added logging
352
+ return final_response
353
+ except ClientResponseError as e:
354
+ error_text = f"Error {e.status}: {e.message}"
355
+ try:
356
+ error_response = await e.response.text()
357
+ cleaned_error = cls.clean_response(error_response)
358
+ error_text += f" - {cleaned_error}"
359
+ logger.error(f"ClientResponseError: {error_text}") # Added logging
360
+ except Exception:
361
+ pass
362
+ return error_text
363
+ except Exception as e:
364
+ logger.exception(f"Unexpected error during /api/chat request: {str(e)}") # Added logging
365
+ return f"Unexpected error during /api/chat request: {str(e)}"
366
+
367
+ chat_url = f'{cls.url}/chat/{chat_id}?model={model}'
368
+
369
+ try:
370
+ async with session.post(
371
+ chat_url,
372
+ headers=headers_chat_combined,
373
+ data=data_chat,
374
+ proxy=proxy
375
+ ) as response_chat:
376
+ response_chat.raise_for_status()
377
+ pass
378
  except ClientResponseError as e:
379
  error_text = f"Error {e.status}: {e.message}"
380
  try:
381
  error_response = await e.response.text()
382
  cleaned_error = cls.clean_response(error_response)
383
  error_text += f" - {cleaned_error}"
 
384
  except Exception:
385
  pass
386
  return error_text
387
  except Exception as e:
388
+ return f"Unexpected error during /chat/{chat_id} request: {str(e)}"
 
389
 
390
  @classmethod
391
  async def create_async_generator(
 
508
  ) as response_api_chat:
509
  response_api_chat.raise_for_status()
510
  text = await response_api_chat.text()
511
+ logger.debug(f"Raw response from Blackbox API: {text}") # Added logging
512
  cleaned_response = cls.clean_response(text)
513
+ logger.debug(f"Cleaned response: {cleaned_response}") # Added logging
514
 
515
  if model in cls.image_models:
516
  match = re.search(r'!\[.*?\]\((https?://[^\)]+)\)', cleaned_response)
517
  if match:
518
  image_url = match.group(1)
519
  image_response = ImageResponseModel(images=image_url, alt="Generated Image")
520
+ logger.debug(f"Image URL extracted: {image_url}") # Added logging
521
  yield image_response
522
  else:
523
+ logger.debug("No image URL found in the response.") # Added logging
524
  yield cleaned_response
525
  else:
526
  if websearch:
 
547
  else:
548
  final_response = cleaned_response
549
 
550
+ logger.debug(f"Final response to yield: {final_response}") # Added logging
551
  yield final_response
552
  except ClientResponseError as e:
553
  error_text = f"Error {e.status}: {e.message}"
 
555
  error_response = await e.response.text()
556
  cleaned_error = cls.clean_response(error_response)
557
  error_text += f" - {cleaned_error}"
558
+ logger.error(f"ClientResponseError: {error_text}") # Added logging
559
  except Exception:
560
  pass
561
  yield error_text
562
  except Exception as e:
563
  yield f"Unexpected error during /api/chat request: {str(e)}"
564
 
565
+ chat_url = f'{cls.url}/chat/{chat_id}?model={model}'
566
 
567
+ try:
568
+ async with session.post(
569
+ chat_url,
570
+ headers=headers_chat_combined,
571
+ data=data_chat,
572
+ proxy=proxy
573
+ ) as response_chat:
574
+ response_chat.raise_for_status()
575
+ pass
576
+ except ClientResponseError as e:
577
+ error_text = f"Error {e.status}: {e.message}"
578
  try:
579
+ error_response = await e.response.text()
580
+ cleaned_error = cls.clean_response(error_response)
581
+ error_text += f" - {cleaned_error}"
582
+ except Exception:
583
+ pass
584
+ yield error_text
585
+ except Exception as e:
586
+ yield f"Unexpected error during /chat/{chat_id} request: {str(e)}"
 
 
 
 
 
 
 
 
 
 
 
587
 
588
  # FastAPI app setup
589
  app = FastAPI()
590
 
591
+ # Rate Limiter Cleanup Task
592
+ async def cleanup_rate_limit_stores():
593
+ """
594
+ Periodically cleans up stale entries in the rate_limit_store to prevent memory bloat.
595
+ """
596
+ while True:
597
+ current_time = time.time()
598
+ ips_to_delete = [ip for ip, value in rate_limit_store.items() if current_time - value["timestamp"] > RATE_LIMIT_WINDOW * 2]
599
+ for ip in ips_to_delete:
600
+ del rate_limit_store[ip]
601
+ logger.debug(f"Cleaned up rate_limit_store for IP: {ip}")
602
+ await asyncio.sleep(CLEANUP_INTERVAL)
603
+
604
  # Add the cleanup task when the app starts
605
  @app.on_event("startup")
606
  async def startup_event():
 
647
  logit_bias: Optional[Dict[str, float]] = None
648
  user: Optional[str] = None
649
 
 
 
 
 
 
 
 
 
 
 
 
 
 
650
  # Rate Limiter Dependency
651
  async def rate_limiter_per_ip(request: Request):
652
  """
 
711
  return {
712
  "id": f"chatcmpl-{uuid.uuid4()}",
713
  "object": "chat.completion",
714
+ "created": int(datetime.now().timestamp()), # Fixed: datetime is now imported
715
  "model": request.model,
716
  "choices": [
717
  {
 
734
  return {
735
  "id": f"chatcmpl-{uuid.uuid4()}",
736
  "object": "chat.completion",
737
+ "created": int(datetime.now().timestamp()), # Fixed: datetime is now imported
738
  "model": request.model,
739
  "choices": [
740
  {