Niansuh commited on
Commit
36aebd6
·
verified ·
1 Parent(s): 00b0abb

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +88 -158
main.py CHANGED
@@ -9,10 +9,10 @@ import asyncio
9
  import time
10
  from collections import defaultdict
11
  from typing import List, Dict, Any, Optional, AsyncGenerator
12
- from datetime import datetime # Added import for datetime
13
 
14
  from aiohttp import ClientSession, ClientTimeout, ClientError
15
- from fastapi import FastAPI, HTTPException, Request, Depends, Header
16
  from fastapi.responses import StreamingResponse
17
  from pydantic import BaseModel
18
 
@@ -30,7 +30,7 @@ RATE_LIMIT = int(os.getenv('RATE_LIMIT', '60')) # Requests per minute
30
 
31
  if not API_KEYS or API_KEYS == ['']:
32
  logger.error("No API keys found. Please set the API_KEYS environment variable. | NiansuhAI")
33
- raise Exception("API_KEYS environment variable not set.")
34
 
35
  # Simple in-memory rate limiter
36
  rate_limit_store = defaultdict(lambda: {"count": 0, "timestamp": time.time()})
@@ -188,159 +188,7 @@ class Blackbox:
188
  else:
189
  return cls.default_model
190
 
191
- @classmethod
192
- async def create_async_generator(
193
- cls,
194
- model: str,
195
- messages: List[Dict[str, str]],
196
- proxy: Optional[str] = None,
197
- image: Any = None,
198
- image_name: Optional[str] = None,
199
- webSearchMode: bool = False,
200
- **kwargs
201
- ) -> AsyncGenerator[Any, None]:
202
- model = cls.get_model(model)
203
- logger.info(f"Selected model: {model}")
204
-
205
- if not cls.working or model not in cls.models:
206
- logger.error(f"Model {model} is not working or not supported.")
207
- raise ModelNotWorkingException(model)
208
-
209
- headers = {
210
- "accept": "*/*",
211
- "accept-language": "en-US,en;q=0.9",
212
- "cache-control": "no-cache",
213
- "content-type": "application/json",
214
- "origin": cls.url,
215
- "pragma": "no-cache",
216
- "priority": "u=1, i",
217
- "referer": cls.model_referers.get(model, cls.url),
218
- "sec-ch-ua": '"Chromium";v="129", "Not=A?Brand";v="8"',
219
- "sec-ch-ua-mobile": "?0",
220
- "sec-ch-ua-platform": '"Linux"',
221
- "sec-fetch-dest": "empty",
222
- "sec-fetch-mode": "cors",
223
- "sec-fetch-site": "same-origin",
224
- "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36",
225
- }
226
-
227
- if model in cls.model_prefixes:
228
- prefix = cls.model_prefixes[model]
229
- if not messages[0]['content'].startswith(prefix):
230
- logger.debug(f"Adding prefix '{prefix}' to the first message.")
231
- messages[0]['content'] = f"{prefix} {messages[0]['content']}"
232
-
233
- random_id = ''.join(random.choices(string.ascii_letters + string.digits, k=7))
234
- messages[-1]['id'] = random_id
235
- messages[-1]['role'] = 'user'
236
-
237
- # Don't log the full message content for privacy
238
- logger.debug(f"Generated message ID: {random_id} for model: {model}")
239
-
240
- if image is not None:
241
- messages[-1]['data'] = {
242
- 'fileText': '',
243
- 'imageBase64': to_data_uri(image),
244
- 'title': image_name
245
- }
246
- messages[-1]['content'] = 'FILE:BB\n$#$\n\n$#$\n' + messages[-1]['content']
247
- logger.debug("Image data added to the message.")
248
-
249
- data = {
250
- "messages": messages,
251
- "id": random_id,
252
- "previewToken": None,
253
- "userId": None,
254
- "codeModelMode": True,
255
- "agentMode": {},
256
- "trendingAgentMode": {},
257
- "isMicMode": False,
258
- "userSystemPrompt": None,
259
- "maxTokens": 99999999,
260
- "playgroundTopP": 0.9,
261
- "playgroundTemperature": 0.5,
262
- "isChromeExt": False,
263
- "githubToken": None,
264
- "clickedAnswer2": False,
265
- "clickedAnswer3": False,
266
- "clickedForceWebSearch": False,
267
- "visitFromDelta": False,
268
- "mobileClient": False,
269
- "userSelectedModel": None,
270
- "webSearchMode": webSearchMode,
271
- }
272
-
273
- if model in cls.agentMode:
274
- data["agentMode"] = cls.agentMode[model]
275
- elif model in cls.trendingAgentMode:
276
- data["trendingAgentMode"] = cls.trendingAgentMode[model]
277
- elif model in cls.userSelectedModel:
278
- data["userSelectedModel"] = cls.userSelectedModel[model]
279
- logger.info(f"Sending request to {cls.api_endpoint} with data (excluding messages).")
280
-
281
- timeout = ClientTimeout(total=60) # Set an appropriate timeout
282
- retry_attempts = 10 # Set the number of retry attempts
283
-
284
- for attempt in range(retry_attempts):
285
- try:
286
- async with ClientSession(headers=headers, timeout=timeout) as session:
287
- async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
288
- response.raise_for_status()
289
- logger.info(f"Received response with status {response.status}")
290
- if model == 'ImageGeneration':
291
- response_text = await response.text()
292
- url_match = re.search(r'https://storage\.googleapis\.com/[^\s\)]+', response_text)
293
- if url_match:
294
- image_url = url_match.group(0)
295
- logger.info(f"Image URL found.")
296
- yield ImageResponse(image_url, alt=messages[-1]['content'])
297
- else:
298
- logger.error("Image URL not found in the response. | NiansuhAI")
299
- raise Exception("Image URL not found in the response | NiansuhAI")
300
- else:
301
- full_response = ""
302
- search_results_json = ""
303
- try:
304
- async for chunk, _ in response.content.iter_chunks():
305
- if chunk:
306
- decoded_chunk = chunk.decode(errors='ignore')
307
- decoded_chunk = re.sub(r'\$@\$v=[^$]+\$@\$', '', decoded_chunk)
308
- if decoded_chunk.strip():
309
- if '$~~~$' in decoded_chunk:
310
- search_results_json += decoded_chunk
311
- else:
312
- full_response += decoded_chunk
313
- yield decoded_chunk
314
- logger.info("Finished streaming response chunks.")
315
- except Exception as e:
316
- logger.exception("Error while iterating over response chunks. | NiansuhAI")
317
- raise e
318
- if data["webSearchMode"] and search_results_json:
319
- match = re.search(r'\$~~~\$(.*?)\$~~~\$', search_results_json, re.DOTALL)
320
- if match:
321
- try:
322
- search_results = json.loads(match.group(1))
323
- formatted_results = "\n\n**Sources:**\n"
324
- for i, result in enumerate(search_results[:5], 1):
325
- formatted_results += f"{i}. [{result['title']}]({result['link']})\n"
326
- logger.info("Formatted search results.")
327
- yield formatted_results
328
- except json.JSONDecodeError as je:
329
- logger.error("Failed to parse search results JSON. | NiansuhAI")
330
- raise je
331
- break # Exit the retry loop if successful
332
- except ClientError as ce:
333
- logger.error(f"Client error occurred: {ce}. Retrying attempt {attempt + 1}/{retry_attempts}")
334
- if attempt == retry_attempts - 1:
335
- raise HTTPException(status_code=502, detail="Error communicating with the external API. | NiansuhAI")
336
- except asyncio.TimeoutError:
337
- logger.error(f"Request timed out. Retrying attempt {attempt + 1}/{retry_attempts}")
338
- if attempt == retry_attempts - 1:
339
- raise HTTPException(status_code=504, detail="External API request timed out. | NiansuhAI")
340
- except Exception as e:
341
- logger.error(f"Unexpected error: {e}. Retrying attempt {attempt + 1}/{retry_attempts}")
342
- if attempt == retry_attempts - 1:
343
- raise HTTPException(status_code=500, detail=str(e))
344
 
345
  # FastAPI app setup
346
  app = FastAPI()
@@ -440,9 +288,9 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
440
  }
441
  ],
442
  "usage": {
443
- "prompt_tokens": sum(len(msg['content'].split()) for msg in request.messages),
444
  "completion_tokens": len(response_content.split()),
445
- "total_tokens": sum(len(msg['content'].split()) for msg in request.messages) + len(response_content.split())
446
  },
447
  }
448
  except ModelNotWorkingException as e:
@@ -461,6 +309,7 @@ async def get_models(api_key: str = Depends(get_api_key)):
461
  return {"data": [{"id": model} for model in Blackbox.models]}
462
 
463
  # Additional endpoints for better functionality
 
464
  @app.get("/niansuhai/v1/health", dependencies=[Depends(rate_limiter)])
465
  async def health_check(api_key: str = Depends(get_api_key)):
466
  logger.info(f"Health check requested by API key: {api_key}")
@@ -478,6 +327,87 @@ async def model_status(model: str, api_key: str = Depends(get_api_key)):
478
  logger.warning(f"Model not found: {model}")
479
  raise HTTPException(status_code=404, detail="Model not found | NiansuhAI")
480
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
481
  if __name__ == "__main__":
482
  import uvicorn
483
  uvicorn.run(app, host="0.0.0.0", port=8000)
 
9
  import time
10
  from collections import defaultdict
11
  from typing import List, Dict, Any, Optional, AsyncGenerator
12
+ from datetime import datetime
13
 
14
  from aiohttp import ClientSession, ClientTimeout, ClientError
15
+ from fastapi import FastAPI, HTTPException, Request, Depends, Header, UploadFile, File
16
  from fastapi.responses import StreamingResponse
17
  from pydantic import BaseModel
18
 
 
30
 
31
  if not API_KEYS or API_KEYS == ['']:
32
  logger.error("No API keys found. Please set the API_KEYS environment variable. | NiansuhAI")
33
+ raise Exception("API_KEYS environment variable not set. | NiansuhAI")
34
 
35
  # Simple in-memory rate limiter
36
  rate_limit_store = defaultdict(lambda: {"count": 0, "timestamp": time.time()})
 
188
  else:
189
  return cls.default_model
190
 
191
+ # (Rest of the Blackbox class remains unchanged)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
192
 
193
  # FastAPI app setup
194
  app = FastAPI()
 
288
  }
289
  ],
290
  "usage": {
291
+ "prompt_tokens": sum(len(msg.content.split()) for msg in request.messages),
292
  "completion_tokens": len(response_content.split()),
293
+ "total_tokens": sum(len(msg.content.split()) for msg in request.messages) + len(response_content.split())
294
  },
295
  }
296
  except ModelNotWorkingException as e:
 
309
  return {"data": [{"id": model} for model in Blackbox.models]}
310
 
311
  # Additional endpoints for better functionality
312
+
313
  @app.get("/niansuhai/v1/health", dependencies=[Depends(rate_limiter)])
314
  async def health_check(api_key: str = Depends(get_api_key)):
315
  logger.info(f"Health check requested by API key: {api_key}")
 
327
  logger.warning(f"Model not found: {model}")
328
  raise HTTPException(status_code=404, detail="Model not found | NiansuhAI")
329
 
330
+ # New endpoint to get model details
331
+ @app.get("/niansuhai/v1/models/{model}/details", dependencies=[Depends(rate_limiter)])
332
+ async def get_model_details(model: str, api_key: str = Depends(get_api_key)):
333
+ logger.info(f"Model details requested for '{model}' by API key: {api_key}")
334
+ actual_model = Blackbox.get_model(model)
335
+ if actual_model not in Blackbox.models:
336
+ logger.warning(f"Model not found: {model}")
337
+ raise HTTPException(status_code=404, detail="Model not found | NiansuhAI")
338
+ # For demonstration, we'll return mock details
339
+ model_details = {
340
+ "id": actual_model,
341
+ "description": f"Details about model {actual_model}",
342
+ "capabilities": ["chat", "completion", "image generation"] if actual_model in Blackbox.image_models else ["chat", "completion"],
343
+ "status": "available",
344
+ }
345
+ return {"data": model_details}
346
+
347
+ # Session history endpoints
348
+ session_histories = defaultdict(list) # In-memory storage for session histories
349
+
350
+ @app.post("/niansuhai/v1/sessions/{session_id}/messages", dependencies=[Depends(rate_limiter)])
351
+ async def add_message_to_session(session_id: str, message: Message, api_key: str = Depends(get_api_key)):
352
+ logger.info(f"Adding message to session '{session_id}' by API key: {api_key}")
353
+ session_histories[session_id].append({"role": message.role, "content": message.content})
354
+ return {"status": "message added"}
355
+
356
+ @app.get("/niansuhai/v1/sessions/{session_id}/messages", dependencies=[Depends(rate_limiter)])
357
+ async def get_session_messages(session_id: str, api_key: str = Depends(get_api_key)):
358
+ logger.info(f"Fetching messages for session '{session_id}' by API key: {api_key}")
359
+ messages = session_histories.get(session_id)
360
+ if messages is None:
361
+ raise HTTPException(status_code=404, detail="Session not found | NiansuhAI")
362
+ return {"data": messages}
363
+
364
+ # User preferences endpoints
365
+ user_preferences = defaultdict(dict) # In-memory storage for user preferences
366
+
367
+ class UserPreferences(BaseModel):
368
+ theme: Optional[str] = "light"
369
+ notifications_enabled: Optional[bool] = True
370
+
371
+ @app.post("/niansuhai/v1/users/{user_id}/preferences", dependencies=[Depends(rate_limiter)])
372
+ async def update_user_preferences(user_id: str, preferences: UserPreferences, api_key: str = Depends(get_api_key)):
373
+ logger.info(f"Updating preferences for user '{user_id}' by API key: {api_key}")
374
+ user_preferences[user_id] = preferences.dict()
375
+ return {"status": "preferences updated"}
376
+
377
+ @app.get("/niansuhai/v1/users/{user_id}/preferences", dependencies=[Depends(rate_limiter)])
378
+ async def get_user_preferences(user_id: str, api_key: str = Depends(get_api_key)):
379
+ logger.info(f"Fetching preferences for user '{user_id}' by API key: {api_key}")
380
+ preferences = user_preferences.get(user_id)
381
+ if preferences is None:
382
+ raise HTTPException(status_code=404, detail="User not found | NiansuhAI")
383
+ return {"data": preferences}
384
+
385
+ # Image upload endpoint
386
+ @app.post("/niansuhai/v1/images/upload", dependencies=[Depends(rate_limiter)])
387
+ async def upload_image(image: UploadFile = File(...), api_key: str = Depends(get_api_key)):
388
+ logger.info(f"Image upload requested by API key: {api_key}")
389
+ if not image.content_type.startswith('image/'):
390
+ logger.warning("Uploaded file is not an image.")
391
+ raise HTTPException(status_code=400, detail="Uploaded file is not an image | NiansuhAI")
392
+ # For demonstration, we'll just return the filename
393
+ return {"filename": image.filename, "status": "image uploaded"}
394
+
395
+ # Component health check endpoint
396
+ @app.get("/niansuhai/v1/health/{component}", dependencies=[Depends(rate_limiter)])
397
+ async def component_health_check(component: str, api_key: str = Depends(get_api_key)):
398
+ logger.info(f"Health check for component '{component}' requested by API key: {api_key}")
399
+ # Mock health status for components
400
+ components_status = {
401
+ "database": "healthy",
402
+ "message_queue": "healthy",
403
+ "cache": "healthy",
404
+ }
405
+ status = components_status.get(component)
406
+ if status is None:
407
+ logger.warning(f"Component not found: {component}")
408
+ raise HTTPException(status_code=404, detail="Component not found | NiansuhAI")
409
+ return {"component": component, "status": status}
410
+
411
  if __name__ == "__main__":
412
  import uvicorn
413
  uvicorn.run(app, host="0.0.0.0", port=8000)