Niansuh commited on
Commit
4b6f3a0
·
verified ·
1 Parent(s): 11cade3

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +28 -65
main.py CHANGED
@@ -8,17 +8,10 @@ import json
8
  from aiohttp import ClientSession
9
  from fastapi import FastAPI, HTTPException
10
  from pydantic import BaseModel
11
- from typing import List, Dict, Any, Optional
12
  from datetime import datetime
13
  from fastapi.responses import StreamingResponse
14
 
15
- # Custom exception for model not working
16
- class ModelNotWorkingException(Exception):
17
- def __init__(self, model: str):
18
- self.model = model
19
- self.message = f"The model '{model}' is currently not working. Please wait for NiansuhAI to fix this. Thank you for your patience."
20
- super().__init__(self.message)
21
-
22
  # Mock implementations for ImageResponse and to_data_uri
23
  class ImageResponse:
24
  def __init__(self, url: str, alt: str):
@@ -26,6 +19,7 @@ class ImageResponse:
26
  self.alt = alt
27
 
28
  def to_data_uri(image: Any) -> str:
 
29
  return "data:image/png;base64,..." # Replace with actual base64 data
30
 
31
  class AsyncGeneratorProvider:
@@ -47,7 +41,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
47
  'blackbox',
48
  'gemini-1.5-flash',
49
  "llama-3.1-8b",
50
- 'llama-3.1-70b', # Example of a non-working model
51
  'llama-3.1-405b',
52
  'ImageGenerationLV45LJp',
53
  'gpt-4o',
@@ -55,19 +49,6 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
55
  'claude-sonnet-3.5',
56
  ]
57
 
58
- # Define the working status of models
59
- model_status = {
60
- 'blackbox': True,
61
- 'gemini-1.5-flash': True,
62
- 'llama-3.1-8b': True,
63
- 'llama-3.1-70b': False, # Non-working model
64
- 'llama-3.1-405b': True,
65
- 'ImageGenerationLV45LJp': True,
66
- 'gpt-4o': True,
67
- 'gemini-pro': True,
68
- 'claude-sonnet-3.5': True,
69
- }
70
-
71
  agentMode = {
72
  'ImageGenerationLV45LJp': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"},
73
  }
@@ -111,12 +92,11 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
111
  image: Optional[Any] = None,
112
  image_name: Optional[str] = None,
113
  **kwargs
114
- ) -> Any:
115
- model = cls.get_model(model)
 
116
 
117
- # Check if the model is working
118
- if not cls.model_status.get(model, False):
119
- raise ModelNotWorkingException(model)
120
 
121
  headers = {
122
  "accept": "*/*",
@@ -139,14 +119,14 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
139
  prefix = f"@{cls.userSelectedModel[model]}"
140
  if not messages[0]['content'].startswith(prefix):
141
  messages[0]['content'] = f"{prefix} {messages[0]['content']}"
142
-
143
  async with ClientSession(headers=headers) as session:
144
  if image is not None:
145
  messages[-1]["data"] = {
146
  "fileText": image_name,
147
  "imageBase64": to_data_uri(image)
148
  }
149
-
150
  random_id = ''.join(random.choices(string.ascii_letters + string.digits, k=7))
151
 
152
  data = {
@@ -173,6 +153,13 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
173
  "webSearchMode": False,
174
  }
175
 
 
 
 
 
 
 
 
176
  async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
177
  response.raise_for_status()
178
  if model == 'ImageGenerationLV45LJp':
@@ -184,19 +171,12 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
184
  else:
185
  raise Exception("Image URL not found in the response")
186
  else:
187
- response_content = ""
188
  async for chunk in response.content.iter_any():
189
  if chunk:
190
- decoded_chunk = chunk.decode(errors='ignore')
191
  decoded_chunk = re.sub(r'\$@\$v=[^$]+\$@\$', '', decoded_chunk)
192
  if decoded_chunk.strip():
193
- response_content += decoded_chunk
194
-
195
- # Check if the response content is empty
196
- if not response_content.strip():
197
- raise ModelNotWorkingException(model)
198
-
199
- yield response_content
200
 
201
  # FastAPI app setup
202
  app = FastAPI()
@@ -228,32 +208,24 @@ def create_response(content: str, model: str, finish_reason: Optional[str] = Non
228
 
229
  @app.post("/niansuhai/v1/chat/completions")
230
  async def chat_completions(request: ChatRequest):
231
- # Validate the model
232
- valid_models = Blackbox.models + list(Blackbox.userSelectedModel.keys()) + list(Blackbox.model_aliases.keys())
233
- if request.model not in valid_models:
234
- raise HTTPException(status_code=400, detail=f"Invalid model name: {request.model}. Valid models are: {valid_models}")
235
-
236
  messages = [{"role": msg.role, "content": msg.content} for msg in request.messages]
237
 
238
- try:
239
- async_generator = Blackbox.create_async_generator(
240
- model=request.model,
241
- messages=messages,
242
- image=None, # Pass the image if required
243
- image_name=None # Pass image name if required
244
- )
245
- except ModelNotWorkingException as e:
246
- raise HTTPException(status_code=503, detail=str(e))
247
 
248
  if request.stream:
249
  async def generate():
250
  async for chunk in async_generator:
251
  if isinstance(chunk, ImageResponse):
 
252
  image_markdown = f"![image]({chunk.url})"
253
  yield f"data: {json.dumps(create_response(image_markdown, request.model))}\n\n"
254
  else:
255
- response_with_footer = f"{chunk}\nNiansuhAI" # Concatenating here
256
- yield f"data: {json.dumps(create_response(response_with_footer, request.model))}\n\n"
257
  yield "data: [DONE]\n\n"
258
 
259
  return StreamingResponse(generate(), media_type="text/event-stream")
@@ -261,12 +233,10 @@ async def chat_completions(request: ChatRequest):
261
  response_content = ""
262
  async for chunk in async_generator:
263
  if isinstance(chunk, ImageResponse):
 
264
  response_content += f"![image]({chunk.url})\n"
265
  else:
266
- response_content += chunk
267
-
268
- # Append "\nNiansuhAI" to the final response content
269
- response_content += "\nNiansuhAI"
270
 
271
  return {
272
  "id": f"chatcmpl-{uuid.uuid4()}",
@@ -285,10 +255,3 @@ async def chat_completions(request: ChatRequest):
285
  ],
286
  "usage": None,
287
  }
288
-
289
-
290
-
291
- @app.get("/niansuhai/v1/models")
292
- async def get_models():
293
- return {"models": Blackbox.models}
294
-
 
8
  from aiohttp import ClientSession
9
  from fastapi import FastAPI, HTTPException
10
  from pydantic import BaseModel
11
+ from typing import List, Dict, Any, Optional, AsyncGenerator
12
  from datetime import datetime
13
  from fastapi.responses import StreamingResponse
14
 
 
 
 
 
 
 
 
15
  # Mock implementations for ImageResponse and to_data_uri
16
  class ImageResponse:
17
  def __init__(self, url: str, alt: str):
 
19
  self.alt = alt
20
 
21
  def to_data_uri(image: Any) -> str:
22
+ # Placeholder for actual image encoding
23
  return "data:image/png;base64,..." # Replace with actual base64 data
24
 
25
  class AsyncGeneratorProvider:
 
41
  'blackbox',
42
  'gemini-1.5-flash',
43
  "llama-3.1-8b",
44
+ 'llama-3.1-70b',
45
  'llama-3.1-405b',
46
  'ImageGenerationLV45LJp',
47
  'gpt-4o',
 
49
  'claude-sonnet-3.5',
50
  ]
51
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  agentMode = {
53
  'ImageGenerationLV45LJp': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"},
54
  }
 
92
  image: Optional[Any] = None,
93
  image_name: Optional[str] = None,
94
  **kwargs
95
+ ) -> AsyncGenerator:
96
+ if not messages:
97
+ raise ValueError("Messages cannot be empty")
98
 
99
+ model = cls.get_model(model)
 
 
100
 
101
  headers = {
102
  "accept": "*/*",
 
119
  prefix = f"@{cls.userSelectedModel[model]}"
120
  if not messages[0]['content'].startswith(prefix):
121
  messages[0]['content'] = f"{prefix} {messages[0]['content']}"
122
+
123
  async with ClientSession(headers=headers) as session:
124
  if image is not None:
125
  messages[-1]["data"] = {
126
  "fileText": image_name,
127
  "imageBase64": to_data_uri(image)
128
  }
129
+
130
  random_id = ''.join(random.choices(string.ascii_letters + string.digits, k=7))
131
 
132
  data = {
 
153
  "webSearchMode": False,
154
  }
155
 
156
+ if model in cls.agentMode:
157
+ data["agentMode"] = cls.agentMode[model]
158
+ elif model in cls.trendingAgentMode:
159
+ data["trendingAgentMode"] = cls.trendingAgentMode[model]
160
+ elif model in cls.userSelectedModel:
161
+ data["userSelectedModel"] = cls.userSelectedModel[model]
162
+
163
  async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
164
  response.raise_for_status()
165
  if model == 'ImageGenerationLV45LJp':
 
171
  else:
172
  raise Exception("Image URL not found in the response")
173
  else:
 
174
  async for chunk in response.content.iter_any():
175
  if chunk:
176
+ decoded_chunk = chunk.decode(errors='ignore') # Handle decoding errors
177
  decoded_chunk = re.sub(r'\$@\$v=[^$]+\$@\$', '', decoded_chunk)
178
  if decoded_chunk.strip():
179
+ yield decoded_chunk
 
 
 
 
 
 
180
 
181
  # FastAPI app setup
182
  app = FastAPI()
 
208
 
209
  @app.post("/niansuhai/v1/chat/completions")
210
  async def chat_completions(request: ChatRequest):
 
 
 
 
 
211
  messages = [{"role": msg.role, "content": msg.content} for msg in request.messages]
212
 
213
+ async_generator = Blackbox.create_async_generator(
214
+ model=request.model,
215
+ messages=messages,
216
+ image=None, # Pass the image if required
217
+ image_name=None # Pass image name if required
218
+ )
 
 
 
219
 
220
  if request.stream:
221
  async def generate():
222
  async for chunk in async_generator:
223
  if isinstance(chunk, ImageResponse):
224
+ # Format the response as a Markdown image
225
  image_markdown = f"![image]({chunk.url})"
226
  yield f"data: {json.dumps(create_response(image_markdown, request.model))}\n\n"
227
  else:
228
+ yield f"data: {json.dumps(create_response(chunk, request.model))}\n\n"
 
229
  yield "data: [DONE]\n\n"
230
 
231
  return StreamingResponse(generate(), media_type="text/event-stream")
 
233
  response_content = ""
234
  async for chunk in async_generator:
235
  if isinstance(chunk, ImageResponse):
236
+ # Add Markdown image to the response
237
  response_content += f"![image]({chunk.url})\n"
238
  else:
239
+ response_content += chunk # Concatenate text responses
 
 
 
240
 
241
  return {
242
  "id": f"chatcmpl-{uuid.uuid4()}",
 
255
  ],
256
  "usage": None,
257
  }