Niansuh commited on
Commit
c6f5a0b
·
verified ·
1 Parent(s): ee7af5a

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +326 -7
main.py CHANGED
@@ -1,4 +1,4 @@
1
- # app/main.py
2
 
3
  import os
4
  import re
@@ -18,9 +18,8 @@ from aiohttp import ClientSession, ClientTimeout, ClientError
18
  from fastapi import FastAPI, HTTPException, Request, Depends, Header
19
  from fastapi.responses import StreamingResponse, JSONResponse, RedirectResponse
20
  from pydantic import BaseModel
21
-
22
- from .blackbox import Blackbox, ImageResponse
23
- from .image import to_data_uri, ImageType
24
 
25
  # Configure logging
26
  logging.basicConfig(
@@ -31,6 +30,10 @@ logging.basicConfig(
31
  logger = logging.getLogger(__name__)
32
 
33
  # Load environment variables
 
 
 
 
34
  API_KEYS = os.getenv('API_KEYS', '').split(',') # Comma-separated API keys
35
  RATE_LIMIT = int(os.getenv('RATE_LIMIT', '60')) # Requests per minute
36
  AVAILABLE_MODELS = os.getenv('AVAILABLE_MODELS', '') # Comma-separated available models
@@ -101,6 +104,322 @@ class ModelNotWorkingException(Exception):
101
  self.message = f"The model '{model}' is currently not working. Please try another model or wait for it to be fixed."
102
  super().__init__(self.message)
103
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  # FastAPI app setup
105
  app = FastAPI()
106
 
@@ -227,7 +546,7 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
227
  try:
228
  assistant_content = ""
229
  async for chunk in async_generator:
230
- if isinstance(chunk, ImageResponse):
231
  # Handle image responses if necessary
232
  image_markdown = f"![image]({chunk.url})\n"
233
  assistant_content += image_markdown
@@ -293,7 +612,7 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
293
  else:
294
  response_content = ""
295
  async for chunk in async_generator:
296
- if isinstance(chunk, ImageResponse):
297
  response_content += f"![image]({chunk.url})\n"
298
  else:
299
  response_content += chunk
@@ -401,4 +720,4 @@ async def http_exception_handler(request: Request, exc: HTTPException):
401
  # Run the application
402
  if __name__ == "__main__":
403
  import uvicorn
404
- uvicorn.run("app.main:app", host="0.0.0.0", port=8000, reload=True)
 
1
+ # main.py
2
 
3
  import os
4
  import re
 
18
  from fastapi import FastAPI, HTTPException, Request, Depends, Header
19
  from fastapi.responses import StreamingResponse, JSONResponse, RedirectResponse
20
  from pydantic import BaseModel
21
+ from io import BytesIO
22
+ import base64
 
23
 
24
  # Configure logging
25
  logging.basicConfig(
 
30
  logger = logging.getLogger(__name__)
31
 
32
  # Load environment variables
33
+ from dotenv import load_dotenv
34
+
35
+ load_dotenv()
36
+
37
  API_KEYS = os.getenv('API_KEYS', '').split(',') # Comma-separated API keys
38
  RATE_LIMIT = int(os.getenv('RATE_LIMIT', '60')) # Requests per minute
39
  AVAILABLE_MODELS = os.getenv('AVAILABLE_MODELS', '') # Comma-separated available models
 
104
  self.message = f"The model '{model}' is currently not working. Please try another model or wait for it to be fixed."
105
  super().__init__(self.message)
106
 
107
+ # Image Handling Functions
108
+ ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif', 'webp', 'svg'}
109
+
110
+ def is_allowed_extension(filename: str) -> bool:
111
+ """
112
+ Checks if the given filename has an allowed extension.
113
+ """
114
+ return '.' in filename and \
115
+ filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
116
+
117
+ def is_data_uri_an_image(data_uri: str) -> bool:
118
+ """
119
+ Checks if the given data URI represents an image.
120
+ """
121
+ match = re.match(r'data:image/(\w+);base64,', data_uri)
122
+ if not match:
123
+ raise ValueError("Invalid data URI image.")
124
+ image_format = match.group(1).lower()
125
+ if image_format not in ALLOWED_EXTENSIONS and image_format != "svg+xml":
126
+ raise ValueError("Invalid image format (from MIME type).")
127
+ return True
128
+
129
+ def extract_data_uri(data_uri: str) -> bytes:
130
+ """
131
+ Extracts the binary data from the given data URI.
132
+ """
133
+ return base64.b64decode(data_uri.split(",")[1])
134
+
135
+ def to_data_uri(image: str) -> str:
136
+ """
137
+ Validates and returns the data URI for an image.
138
+ """
139
+ is_data_uri_an_image(image)
140
+ return image
141
+
142
+ class ImageResponseCustom:
143
+ def __init__(self, url: str, alt: str):
144
+ self.url = url
145
+ self.alt = alt
146
+
147
+ # Blackbox AI Integration
148
+ class Blackbox:
149
+ url = "https://www.blackbox.ai"
150
+ api_endpoint = "https://www.blackbox.ai/api/chat"
151
+ working = True
152
+ supports_stream = True
153
+ supports_system_message = True
154
+ supports_message_history = True
155
+
156
+ default_model = 'blackboxai'
157
+ image_models = ['ImageGeneration']
158
+ models = [
159
+ default_model,
160
+ 'blackboxai-pro',
161
+ *image_models,
162
+ "llama-3.1-8b",
163
+ 'llama-3.1-70b',
164
+ 'llama-3.1-405b',
165
+ 'gpt-4o',
166
+ 'gemini-pro',
167
+ 'gemini-1.5-flash',
168
+ 'claude-sonnet-3.5',
169
+ 'PythonAgent',
170
+ 'JavaAgent',
171
+ 'JavaScriptAgent',
172
+ 'HTMLAgent',
173
+ 'GoogleCloudAgent',
174
+ 'AndroidDeveloper',
175
+ 'SwiftDeveloper',
176
+ 'Next.jsAgent',
177
+ 'MongoDBAgent',
178
+ 'PyTorchAgent',
179
+ 'ReactAgent',
180
+ 'XcodeAgent',
181
+ 'AngularJSAgent',
182
+ ]
183
+
184
+ agentMode = {
185
+ 'ImageGeneration': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"},
186
+ 'Niansuh': {'mode': True, 'id': "NiansuhAIk1HgESy", 'name': "Niansuh"},
187
+ }
188
+
189
+ trendingAgentMode = {
190
+ "blackboxai": {},
191
+ "gemini-1.5-flash": {'mode': True, 'id': 'Gemini'},
192
+ "llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"},
193
+ 'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"},
194
+ 'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405b"},
195
+ 'blackboxai-pro': {'mode': True, 'id': "BLACKBOXAI-PRO"},
196
+ 'PythonAgent': {'mode': True, 'id': "Python Agent"},
197
+ 'JavaAgent': {'mode': True, 'id': "Java Agent"},
198
+ 'JavaScriptAgent': {'mode': True, 'id': "JavaScript Agent"},
199
+ 'HTMLAgent': {'mode': True, 'id': "HTML Agent"},
200
+ 'GoogleCloudAgent': {'mode': True, 'id': "Google Cloud Agent"},
201
+ 'AndroidDeveloper': {'mode': True, 'id': "Android Developer"},
202
+ 'SwiftDeveloper': {'mode': True, 'id': "Swift Developer"},
203
+ 'Next.jsAgent': {'mode': True, 'id': "Next.js Agent"},
204
+ 'MongoDBAgent': {'mode': True, 'id': "MongoDB Agent"},
205
+ 'PyTorchAgent': {'mode': True, 'id': "PyTorch Agent"},
206
+ 'ReactAgent': {'mode': True, 'id': "React Agent"},
207
+ 'XcodeAgent': {'mode': True, 'id': "Xcode Agent"},
208
+ 'AngularJSAgent': {'mode': True, 'id': "AngularJS Agent"},
209
+ }
210
+
211
+ userSelectedModel = {
212
+ "gpt-4o": "gpt-4o",
213
+ "gemini-pro": "gemini-pro",
214
+ 'claude-sonnet-3.5': "claude-sonnet-3.5",
215
+ }
216
+
217
+ model_prefixes = {
218
+ 'gpt-4o': '@GPT-4o',
219
+ 'gemini-pro': '@Gemini-PRO',
220
+ 'claude-sonnet-3.5': '@Claude-Sonnet-3.5',
221
+ 'PythonAgent': '@Python Agent',
222
+ 'JavaAgent': '@Java Agent',
223
+ 'JavaScriptAgent': '@JavaScript Agent',
224
+ 'HTMLAgent': '@HTML Agent',
225
+ 'GoogleCloudAgent': '@Google Cloud Agent',
226
+ 'AndroidDeveloper': '@Android Developer',
227
+ 'SwiftDeveloper': '@Swift Developer',
228
+ 'Next.jsAgent': '@Next.js Agent',
229
+ 'MongoDBAgent': '@MongoDB Agent',
230
+ 'PyTorchAgent': '@PyTorch Agent',
231
+ 'ReactAgent': '@React Agent',
232
+ 'XcodeAgent': '@Xcode Agent',
233
+ 'AngularJSAgent': '@AngularJS Agent',
234
+ 'blackboxai-pro': '@BLACKBOXAI-PRO',
235
+ 'ImageGeneration': '@Image Generation',
236
+ 'Niansuh': '@Niansuh',
237
+ }
238
+
239
+ model_referers = {
240
+ "blackboxai": f"{url}/?model=blackboxai",
241
+ "gpt-4o": f"{url}/?model=gpt-4o",
242
+ "gemini-pro": f"{url}/?model=gemini-pro",
243
+ "claude-sonnet-3.5": f"{url}/?model=claude-sonnet-3.5"
244
+ }
245
+
246
+ model_aliases = {
247
+ "gemini-flash": "gemini-1.5-flash",
248
+ "claude-3.5-sonnet": "claude-sonnet-3.5",
249
+ "flux": "ImageGeneration",
250
+ "niansuh": "Niansuh",
251
+ }
252
+
253
+ @classmethod
254
+ def get_model(cls, model: str) -> Optional[str]:
255
+ if model in cls.models:
256
+ return model
257
+ elif model in cls.userSelectedModel and cls.userSelectedModel[model] in cls.models:
258
+ return cls.userSelectedModel[model]
259
+ elif model in cls.model_aliases and cls.model_aliases[model] in cls.models:
260
+ return cls.model_aliases[model]
261
+ else:
262
+ return cls.default_model if cls.default_model in cls.models else None
263
+
264
+ @classmethod
265
+ async def create_async_generator(
266
+ cls,
267
+ model: str,
268
+ messages: List[Dict[str, str]],
269
+ proxy: Optional[str] = None,
270
+ image: Optional[str] = None,
271
+ image_name: Optional[str] = None,
272
+ webSearchMode: bool = False,
273
+ **kwargs
274
+ ) -> AsyncGenerator[Union[str, ImageResponseCustom], None]:
275
+ model = cls.get_model(model)
276
+ if model is None:
277
+ logger.error(f"Model {model} is not available.")
278
+ raise ModelNotWorkingException(model)
279
+
280
+ logger.info(f"Selected model: {model}")
281
+
282
+ if not cls.working or model not in cls.models:
283
+ logger.error(f"Model {model} is not working or not supported.")
284
+ raise ModelNotWorkingException(model)
285
+
286
+ headers = {
287
+ "accept": "*/*",
288
+ "accept-language": "en-US,en;q=0.9",
289
+ "cache-control": "no-cache",
290
+ "content-type": "application/json",
291
+ "origin": cls.url,
292
+ "pragma": "no-cache",
293
+ "priority": "u=1, i",
294
+ "referer": cls.model_referers.get(model, cls.url),
295
+ "sec-ch-ua": '"Chromium";v="129", "Not=A?Brand";v="8"',
296
+ "sec-ch-ua-mobile": "?0",
297
+ "sec-ch-ua-platform": '"Linux"',
298
+ "sec-fetch-dest": "empty",
299
+ "sec-fetch-mode": "cors",
300
+ "sec-fetch-site": "same-origin",
301
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36",
302
+ }
303
+
304
+ if model in cls.model_prefixes:
305
+ prefix = cls.model_prefixes[model]
306
+ if not messages[0]['content'].startswith(prefix):
307
+ logger.debug(f"Adding prefix '{prefix}' to the first message.")
308
+ messages[0]['content'] = f"{prefix} {messages[0]['content']}"
309
+
310
+ random_id = ''.join(random.choices(string.ascii_letters + string.digits, k=7))
311
+ messages[-1]['id'] = random_id
312
+ messages[-1]['role'] = 'user'
313
+
314
+ # Don't log the full message content for privacy
315
+ logger.debug(f"Generated message ID: {random_id} for model: {model}")
316
+
317
+ if image is not None:
318
+ messages[-1]['data'] = {
319
+ 'fileText': '',
320
+ 'imageBase64': image,
321
+ 'title': image_name
322
+ }
323
+ messages[-1]['content'] = 'FILE:BB\n$#$\n\n$#$\n' + messages[-1]['content']
324
+ logger.debug("Image data added to the message.")
325
+
326
+ data = {
327
+ "messages": messages,
328
+ "id": random_id,
329
+ "previewToken": None,
330
+ "userId": None,
331
+ "codeModelMode": True,
332
+ "agentMode": {},
333
+ "trendingAgentMode": {},
334
+ "isMicMode": False,
335
+ "userSystemPrompt": None,
336
+ "maxTokens": 99999999,
337
+ "playgroundTopP": 0.9,
338
+ "playgroundTemperature": 0.5,
339
+ "isChromeExt": False,
340
+ "githubToken": None,
341
+ "clickedAnswer2": False,
342
+ "clickedAnswer3": False,
343
+ "clickedForceWebSearch": False,
344
+ "visitFromDelta": False,
345
+ "mobileClient": False,
346
+ "userSelectedModel": None,
347
+ "webSearchMode": webSearchMode,
348
+ }
349
+
350
+ if model in cls.agentMode:
351
+ data["agentMode"] = cls.agentMode[model]
352
+ elif model in cls.trendingAgentMode:
353
+ data["trendingAgentMode"] = cls.trendingAgentMode[model]
354
+ elif model in cls.userSelectedModel:
355
+ data["userSelectedModel"] = cls.userSelectedModel[model]
356
+ logger.info(f"Sending request to {cls.api_endpoint} with data (excluding messages).")
357
+
358
+ timeout = ClientTimeout(total=60) # Set an appropriate timeout
359
+ retry_attempts = 10 # Set the number of retry attempts
360
+
361
+ for attempt in range(retry_attempts):
362
+ try:
363
+ async with ClientSession(headers=headers, timeout=timeout) as session:
364
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
365
+ response.raise_for_status()
366
+ logger.info(f"Received response with status {response.status}")
367
+ if model in cls.image_models:
368
+ response_text = await response.text()
369
+ # Extract image URL from the response
370
+ url_match = re.search(r'https://storage\.googleapis\.com/[^\s\)]+', response_text)
371
+ if url_match:
372
+ image_url = url_match.group(0)
373
+ logger.info(f"Image URL found: {image_url}")
374
+ yield ImageResponseCustom(url=image_url, alt=messages[-1]['content'])
375
+ else:
376
+ logger.error("Image URL not found in the response.")
377
+ raise Exception("Image URL not found in the response")
378
+ else:
379
+ full_response = ""
380
+ search_results_json = ""
381
+ try:
382
+ async for chunk, _ in response.content.iter_chunks():
383
+ if chunk:
384
+ decoded_chunk = chunk.decode(errors='ignore')
385
+ decoded_chunk = re.sub(r'\$@\$v=[^$]+\$@\$', '', decoded_chunk)
386
+ if decoded_chunk.strip():
387
+ if '$~~~$' in decoded_chunk:
388
+ search_results_json += decoded_chunk
389
+ else:
390
+ full_response += decoded_chunk
391
+ yield decoded_chunk
392
+ logger.info("Finished streaming response chunks.")
393
+ except Exception as e:
394
+ logger.exception("Error while iterating over response chunks.")
395
+ raise e
396
+ if data["webSearchMode"] and search_results_json:
397
+ match = re.search(r'\$~~~\$(.*?)\$~~~\$', search_results_json, re.DOTALL)
398
+ if match:
399
+ try:
400
+ search_results = json.loads(match.group(1))
401
+ formatted_results = "\n\n**Sources:**\n"
402
+ for i, result in enumerate(search_results[:5], 1):
403
+ formatted_results += f"{i}. [{result['title']}]({result['link']})\n"
404
+ logger.info("Formatted search results.")
405
+ yield formatted_results
406
+ except json.JSONDecodeError as je:
407
+ logger.error("Failed to parse search results JSON.")
408
+ raise je
409
+ break # Exit the retry loop if successful
410
+ except ClientError as ce:
411
+ logger.error(f"Client error occurred: {ce}. Retrying attempt {attempt + 1}/{retry_attempts}")
412
+ if attempt == retry_attempts - 1:
413
+ raise HTTPException(status_code=502, detail="Error communicating with the external API.")
414
+ except asyncio.TimeoutError:
415
+ logger.error(f"Request timed out. Retrying attempt {attempt + 1}/{retry_attempts}")
416
+ if attempt == retry_attempts - 1:
417
+ raise HTTPException(status_code=504, detail="External API request timed out.")
418
+ except Exception as e:
419
+ logger.error(f"Unexpected error: {e}. Retrying attempt {attempt + 1}/{retry_attempts}")
420
+ if attempt == retry_attempts - 1:
421
+ raise HTTPException(status_code=500, detail=str(e))
422
+
423
  # FastAPI app setup
424
  app = FastAPI()
425
 
 
546
  try:
547
  assistant_content = ""
548
  async for chunk in async_generator:
549
+ if isinstance(chunk, ImageResponseCustom):
550
  # Handle image responses if necessary
551
  image_markdown = f"![image]({chunk.url})\n"
552
  assistant_content += image_markdown
 
612
  else:
613
  response_content = ""
614
  async for chunk in async_generator:
615
+ if isinstance(chunk, ImageResponseCustom):
616
  response_content += f"![image]({chunk.url})\n"
617
  else:
618
  response_content += chunk
 
720
  # Run the application
721
  if __name__ == "__main__":
722
  import uvicorn
723
+ uvicorn.run("main:app", host="0.0.0.0", port=8000, reload=True)