Update main.py
Browse files
main.py
CHANGED
@@ -1,27 +1,32 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
import re
|
4 |
import random
|
5 |
import string
|
6 |
import uuid
|
7 |
import json
|
8 |
import logging
|
9 |
-
|
10 |
-
from
|
|
|
11 |
from pydantic import BaseModel
|
12 |
-
from typing import List, Dict, Any, Optional
|
13 |
from datetime import datetime
|
14 |
from fastapi.responses import StreamingResponse
|
15 |
|
16 |
-
#
|
17 |
-
logging.basicConfig(
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
logger = logging.getLogger(__name__)
|
19 |
|
20 |
# Custom exception for model not working
|
21 |
class ModelNotWorkingException(Exception):
|
22 |
def __init__(self, model: str):
|
23 |
self.model = model
|
24 |
-
self.message = f"The model '{model}' is currently not working. Please
|
25 |
super().__init__(self.message)
|
26 |
|
27 |
# Mock implementations for ImageResponse and to_data_uri
|
@@ -33,43 +38,67 @@ class ImageResponse:
|
|
33 |
def to_data_uri(image: Any) -> str:
|
34 |
return "data:image/png;base64,..." # Replace with actual base64 data
|
35 |
|
36 |
-
class
|
37 |
-
pass
|
38 |
-
|
39 |
-
class ProviderModelMixin:
|
40 |
-
pass
|
41 |
-
|
42 |
-
class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
43 |
url = "https://www.blackbox.ai"
|
44 |
api_endpoint = "https://www.blackbox.ai/api/chat"
|
45 |
working = True
|
46 |
supports_stream = True
|
47 |
supports_system_message = True
|
48 |
supports_message_history = True
|
49 |
-
|
50 |
-
default_model = '
|
|
|
51 |
models = [
|
52 |
-
|
53 |
-
'
|
54 |
"llama-3.1-8b",
|
55 |
'llama-3.1-70b',
|
56 |
'llama-3.1-405b',
|
57 |
-
'ImageGenerationLV45LJp',
|
58 |
'gpt-4o',
|
59 |
'gemini-pro',
|
|
|
60 |
'claude-sonnet-3.5',
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
]
|
62 |
|
63 |
agentMode = {
|
64 |
-
'
|
|
|
65 |
}
|
66 |
-
|
67 |
trendingAgentMode = {
|
68 |
-
"
|
69 |
"gemini-1.5-flash": {'mode': True, 'id': 'Gemini'},
|
70 |
"llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"},
|
71 |
'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"},
|
72 |
'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405b"},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
}
|
74 |
|
75 |
userSelectedModel = {
|
@@ -78,9 +107,40 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
|
78 |
'claude-sonnet-3.5': "claude-sonnet-3.5",
|
79 |
}
|
80 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
81 |
model_aliases = {
|
82 |
"gemini-flash": "gemini-1.5-flash",
|
83 |
-
"
|
|
|
|
|
84 |
}
|
85 |
|
86 |
@classmethod
|
@@ -100,15 +160,18 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
|
100 |
model: str,
|
101 |
messages: List[Dict[str, str]],
|
102 |
proxy: Optional[str] = None,
|
103 |
-
image:
|
104 |
image_name: Optional[str] = None,
|
|
|
105 |
**kwargs
|
106 |
-
) -> Any:
|
107 |
model = cls.get_model(model)
|
|
|
108 |
|
109 |
if not cls.working or model not in cls.models:
|
|
|
110 |
raise ModelNotWorkingException(model)
|
111 |
-
|
112 |
headers = {
|
113 |
"accept": "*/*",
|
114 |
"accept-language": "en-US,en;q=0.9",
|
@@ -116,89 +179,130 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
|
116 |
"content-type": "application/json",
|
117 |
"origin": cls.url,
|
118 |
"pragma": "no-cache",
|
119 |
-
"
|
120 |
-
"
|
|
|
121 |
"sec-ch-ua-mobile": "?0",
|
122 |
"sec-ch-ua-platform": '"Linux"',
|
123 |
"sec-fetch-dest": "empty",
|
124 |
"sec-fetch-mode": "cors",
|
125 |
"sec-fetch-site": "same-origin",
|
126 |
-
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/
|
127 |
}
|
128 |
|
129 |
-
if model in cls.
|
130 |
-
prefix =
|
131 |
if not messages[0]['content'].startswith(prefix):
|
|
|
132 |
messages[0]['content'] = f"{prefix} {messages[0]['content']}"
|
133 |
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
data = {
|
144 |
-
"messages": messages,
|
145 |
-
"id": random_id,
|
146 |
-
"previewToken": None,
|
147 |
-
"userId": None,
|
148 |
-
"codeModelMode": True,
|
149 |
-
"agentMode": {},
|
150 |
-
"trendingAgentMode": {},
|
151 |
-
"userSelectedModel": None,
|
152 |
-
"userSystemPrompt": None,
|
153 |
-
"isMicMode": False,
|
154 |
-
"maxTokens": 8192,
|
155 |
-
"playgroundTopP": 0.9,
|
156 |
-
"playgroundTemperature": 0.5,
|
157 |
-
"isChromeExt": False,
|
158 |
-
"githubToken": None,
|
159 |
-
"clickedAnswer2": False,
|
160 |
-
"clickedAnswer3": False,
|
161 |
-
"clickedForceWebSearch": False,
|
162 |
-
"visitFromDelta": False,
|
163 |
-
"mobileClient": False,
|
164 |
-
"webSearchMode": False,
|
165 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
166 |
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
try:
|
175 |
-
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
|
176 |
-
response.raise_for_status()
|
177 |
-
response_text = await response.text()
|
178 |
|
179 |
-
|
180 |
-
|
181 |
-
raise ModelNotWorkingException(model)
|
182 |
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
188 |
else:
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
202 |
|
203 |
# FastAPI app setup
|
204 |
app = FastAPI()
|
@@ -210,7 +314,8 @@ class Message(BaseModel):
|
|
210 |
class ChatRequest(BaseModel):
|
211 |
model: str
|
212 |
messages: List[Message]
|
213 |
-
stream: Optional[bool] = False
|
|
|
214 |
|
215 |
def create_response(content: str, model: str, finish_reason: Optional[str] = None) -> Dict[str, Any]:
|
216 |
return {
|
@@ -228,62 +333,106 @@ def create_response(content: str, model: str, finish_reason: Optional[str] = Non
|
|
228 |
"usage": None,
|
229 |
}
|
230 |
|
231 |
-
@app.post("/
|
232 |
-
async def chat_completions(request: ChatRequest):
|
233 |
-
|
234 |
-
valid_models = Blackbox.models + list(Blackbox.userSelectedModel.keys()) + list(Blackbox.model_aliases.keys())
|
235 |
-
if request.model not in valid_models:
|
236 |
-
raise HTTPException(status_code=400, detail=f"Invalid model name: {request.model}. Valid models are: {valid_models}")
|
237 |
-
|
238 |
-
messages = [{"role": msg.role, "content": msg.content} for msg in request.messages]
|
239 |
-
|
240 |
try:
|
|
|
|
|
241 |
async_generator = Blackbox.create_async_generator(
|
242 |
model=request.model,
|
243 |
messages=messages,
|
244 |
image=None,
|
245 |
-
image_name=None
|
|
|
246 |
)
|
247 |
-
except ModelNotWorkingException as e:
|
248 |
-
raise HTTPException(status_code=503, detail=str(e))
|
249 |
|
250 |
-
|
251 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
252 |
async for chunk in async_generator:
|
253 |
if isinstance(chunk, ImageResponse):
|
254 |
-
|
255 |
-
yield f"data: {json.dumps(create_response(image_markdown, request.model))}\n\n"
|
256 |
else:
|
257 |
-
|
258 |
-
yield "data: [DONE]\n\n"
|
259 |
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
|
|
|
|
|
|
|
|
|
|
286 |
|
287 |
-
@app.get("/
|
288 |
async def get_models():
|
289 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import re
|
2 |
import random
|
3 |
import string
|
4 |
import uuid
|
5 |
import json
|
6 |
import logging
|
7 |
+
import asyncio
|
8 |
+
from aiohttp import ClientSession, ClientTimeout, ClientError
|
9 |
+
from fastapi import FastAPI, HTTPException, Request
|
10 |
from pydantic import BaseModel
|
11 |
+
from typing import List, Dict, Any, Optional, AsyncGenerator
|
12 |
from datetime import datetime
|
13 |
from fastapi.responses import StreamingResponse
|
14 |
|
15 |
+
# Configure logging
|
16 |
+
logging.basicConfig(
|
17 |
+
level=logging.INFO,
|
18 |
+
format="%(asctime)s [%(levelname)s] %(name)s: %(message)s",
|
19 |
+
handlers=[
|
20 |
+
logging.StreamHandler()
|
21 |
+
]
|
22 |
+
)
|
23 |
logger = logging.getLogger(__name__)
|
24 |
|
25 |
# Custom exception for model not working
|
26 |
class ModelNotWorkingException(Exception):
|
27 |
def __init__(self, model: str):
|
28 |
self.model = model
|
29 |
+
self.message = f"The model '{model}' is currently not working. Please try another model or wait for it to be fixed."
|
30 |
super().__init__(self.message)
|
31 |
|
32 |
# Mock implementations for ImageResponse and to_data_uri
|
|
|
38 |
def to_data_uri(image: Any) -> str:
|
39 |
return "data:image/png;base64,..." # Replace with actual base64 data
|
40 |
|
41 |
+
class Blackbox:
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
url = "https://www.blackbox.ai"
|
43 |
api_endpoint = "https://www.blackbox.ai/api/chat"
|
44 |
working = True
|
45 |
supports_stream = True
|
46 |
supports_system_message = True
|
47 |
supports_message_history = True
|
48 |
+
|
49 |
+
default_model = 'blackboxai'
|
50 |
+
image_models = ['ImageGeneration']
|
51 |
models = [
|
52 |
+
default_model,
|
53 |
+
'blackboxai-pro',
|
54 |
"llama-3.1-8b",
|
55 |
'llama-3.1-70b',
|
56 |
'llama-3.1-405b',
|
|
|
57 |
'gpt-4o',
|
58 |
'gemini-pro',
|
59 |
+
'gemini-1.5-flash',
|
60 |
'claude-sonnet-3.5',
|
61 |
+
'PythonAgent',
|
62 |
+
'JavaAgent',
|
63 |
+
'JavaScriptAgent',
|
64 |
+
'HTMLAgent',
|
65 |
+
'GoogleCloudAgent',
|
66 |
+
'AndroidDeveloper',
|
67 |
+
'SwiftDeveloper',
|
68 |
+
'Next.jsAgent',
|
69 |
+
'MongoDBAgent',
|
70 |
+
'PyTorchAgent',
|
71 |
+
'ReactAgent',
|
72 |
+
'XcodeAgent',
|
73 |
+
'AngularJSAgent',
|
74 |
+
*image_models,
|
75 |
+
'Niansuh',
|
76 |
]
|
77 |
|
78 |
agentMode = {
|
79 |
+
'ImageGeneration': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"},
|
80 |
+
'Niansuh': {'mode': True, 'id': "NiansuhAIk1HgESy", 'name': "Niansuh"},
|
81 |
}
|
|
|
82 |
trendingAgentMode = {
|
83 |
+
"blackboxai": {},
|
84 |
"gemini-1.5-flash": {'mode': True, 'id': 'Gemini'},
|
85 |
"llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"},
|
86 |
'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"},
|
87 |
'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405b"},
|
88 |
+
'blackboxai-pro': {'mode': True, 'id': "BLACKBOXAI-PRO"},
|
89 |
+
'PythonAgent': {'mode': True, 'id': "Python Agent"},
|
90 |
+
'JavaAgent': {'mode': True, 'id': "Java Agent"},
|
91 |
+
'JavaScriptAgent': {'mode': True, 'id': "JavaScript Agent"},
|
92 |
+
'HTMLAgent': {'mode': True, 'id': "HTML Agent"},
|
93 |
+
'GoogleCloudAgent': {'mode': True, 'id': "Google Cloud Agent"},
|
94 |
+
'AndroidDeveloper': {'mode': True, 'id': "Android Developer"},
|
95 |
+
'SwiftDeveloper': {'mode': True, 'id': "Swift Developer"},
|
96 |
+
'Next.jsAgent': {'mode': True, 'id': "Next.js Agent"},
|
97 |
+
'MongoDBAgent': {'mode': True, 'id': "MongoDB Agent"},
|
98 |
+
'PyTorchAgent': {'mode': True, 'id': "PyTorch Agent"},
|
99 |
+
'ReactAgent': {'mode': True, 'id': "React Agent"},
|
100 |
+
'XcodeAgent': {'mode': True, 'id': "Xcode Agent"},
|
101 |
+
'AngularJSAgent': {'mode': True, 'id': "AngularJS Agent"},
|
102 |
}
|
103 |
|
104 |
userSelectedModel = {
|
|
|
107 |
'claude-sonnet-3.5': "claude-sonnet-3.5",
|
108 |
}
|
109 |
|
110 |
+
model_prefixes = {
|
111 |
+
'gpt-4o': '@GPT-4o',
|
112 |
+
'gemini-pro': '@Gemini-PRO',
|
113 |
+
'claude-sonnet-3.5': '@Claude-Sonnet-3.5',
|
114 |
+
'PythonAgent': '@Python Agent',
|
115 |
+
'JavaAgent': '@Java Agent',
|
116 |
+
'JavaScriptAgent': '@JavaScript Agent',
|
117 |
+
'HTMLAgent': '@HTML Agent',
|
118 |
+
'GoogleCloudAgent': '@Google Cloud Agent',
|
119 |
+
'AndroidDeveloper': '@Android Developer',
|
120 |
+
'SwiftDeveloper': '@Swift Developer',
|
121 |
+
'Next.jsAgent': '@Next.js Agent',
|
122 |
+
'MongoDBAgent': '@MongoDB Agent',
|
123 |
+
'PyTorchAgent': '@PyTorch Agent',
|
124 |
+
'ReactAgent': '@React Agent',
|
125 |
+
'XcodeAgent': '@Xcode Agent',
|
126 |
+
'AngularJSAgent': '@AngularJS Agent',
|
127 |
+
'blackboxai-pro': '@BLACKBOXAI-PRO',
|
128 |
+
'ImageGeneration': '@Image Generation',
|
129 |
+
'Niansuh': '@Niansuh',
|
130 |
+
}
|
131 |
+
|
132 |
+
model_referers = {
|
133 |
+
"blackboxai": f"{url}/?model=blackboxai",
|
134 |
+
"gpt-4o": f"{url}/?model=gpt-4o",
|
135 |
+
"gemini-pro": f"{url}/?model=gemini-pro",
|
136 |
+
"claude-sonnet-3.5": f"{url}/?model=claude-sonnet-3.5"
|
137 |
+
}
|
138 |
+
|
139 |
model_aliases = {
|
140 |
"gemini-flash": "gemini-1.5-flash",
|
141 |
+
"claude-3.5-sonnet": "claude-sonnet-3.5",
|
142 |
+
"flux": "ImageGeneration",
|
143 |
+
"niansuh": "Niansuh",
|
144 |
}
|
145 |
|
146 |
@classmethod
|
|
|
160 |
model: str,
|
161 |
messages: List[Dict[str, str]],
|
162 |
proxy: Optional[str] = None,
|
163 |
+
image: Any = None,
|
164 |
image_name: Optional[str] = None,
|
165 |
+
webSearchMode: bool = False,
|
166 |
**kwargs
|
167 |
+
) -> AsyncGenerator[Any, None]:
|
168 |
model = cls.get_model(model)
|
169 |
+
logger.info(f"Selected model: {model}")
|
170 |
|
171 |
if not cls.working or model not in cls.models:
|
172 |
+
logger.error(f"Model {model} is not working or not supported.")
|
173 |
raise ModelNotWorkingException(model)
|
174 |
+
|
175 |
headers = {
|
176 |
"accept": "*/*",
|
177 |
"accept-language": "en-US,en;q=0.9",
|
|
|
179 |
"content-type": "application/json",
|
180 |
"origin": cls.url,
|
181 |
"pragma": "no-cache",
|
182 |
+
"priority": "u=1, i",
|
183 |
+
"referer": cls.model_referers.get(model, cls.url),
|
184 |
+
"sec-ch-ua": '"Chromium";v="129", "Not=A?Brand";v="8"',
|
185 |
"sec-ch-ua-mobile": "?0",
|
186 |
"sec-ch-ua-platform": '"Linux"',
|
187 |
"sec-fetch-dest": "empty",
|
188 |
"sec-fetch-mode": "cors",
|
189 |
"sec-fetch-site": "same-origin",
|
190 |
+
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36",
|
191 |
}
|
192 |
|
193 |
+
if model in cls.model_prefixes:
|
194 |
+
prefix = cls.model_prefixes[model]
|
195 |
if not messages[0]['content'].startswith(prefix):
|
196 |
+
logger.debug(f"Adding prefix '{prefix}' to the first message.")
|
197 |
messages[0]['content'] = f"{prefix} {messages[0]['content']}"
|
198 |
|
199 |
+
random_id = ''.join(random.choices(string.ascii_letters + string.digits, k=7))
|
200 |
+
messages[-1]['id'] = random_id
|
201 |
+
messages[-1]['role'] = 'user'
|
202 |
+
if image is not None:
|
203 |
+
messages[-1]['data'] = {
|
204 |
+
'fileText': '',
|
205 |
+
'imageBase64': to_data_uri(image),
|
206 |
+
'title': image_name
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
207 |
}
|
208 |
+
messages[-1]['content'] = 'FILE:BB\n$#$\n\n$#$\n' + messages[-1]['content']
|
209 |
+
logger.debug("Image data added to the message.")
|
210 |
+
|
211 |
+
data = {
|
212 |
+
"messages": messages,
|
213 |
+
"id": random_id,
|
214 |
+
"previewToken": None,
|
215 |
+
"userId": None,
|
216 |
+
"codeModelMode": True,
|
217 |
+
"agentMode": {},
|
218 |
+
"trendingAgentMode": {},
|
219 |
+
"isMicMode": False,
|
220 |
+
"userSystemPrompt": None,
|
221 |
+
"maxTokens": 99999999,
|
222 |
+
"playgroundTopP": 0.9,
|
223 |
+
"playgroundTemperature": 0.5,
|
224 |
+
"isChromeExt": False,
|
225 |
+
"githubToken": None,
|
226 |
+
"clickedAnswer2": False,
|
227 |
+
"clickedAnswer3": False,
|
228 |
+
"clickedForceWebSearch": False,
|
229 |
+
"visitFromDelta": False,
|
230 |
+
"mobileClient": False,
|
231 |
+
"userSelectedModel": None,
|
232 |
+
"webSearchMode": webSearchMode,
|
233 |
+
}
|
234 |
|
235 |
+
if model in cls.agentMode:
|
236 |
+
data["agentMode"] = cls.agentMode[model]
|
237 |
+
elif model in cls.trendingAgentMode:
|
238 |
+
data["trendingAgentMode"] = cls.trendingAgentMode[model]
|
239 |
+
elif model in cls.userSelectedModel:
|
240 |
+
data["userSelectedModel"] = cls.userSelectedModel[model]
|
241 |
+
logger.info(f"Sending request to {cls.api_endpoint} with data: {data}")
|
|
|
|
|
|
|
|
|
242 |
|
243 |
+
timeout = ClientTimeout(total=60) # Set an appropriate timeout
|
244 |
+
retry_attempts = 10 # Set the number of retry attempts
|
|
|
245 |
|
246 |
+
for attempt in range(retry_attempts):
|
247 |
+
try:
|
248 |
+
async with ClientSession(headers=headers, timeout=timeout) as session:
|
249 |
+
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
|
250 |
+
response.raise_for_status()
|
251 |
+
logger.info(f"Received response with status {response.status}")
|
252 |
+
if model == 'ImageGeneration':
|
253 |
+
response_text = await response.text()
|
254 |
+
url_match = re.search(r'https://storage\.googleapis\.com/[^\s\)]+', response_text)
|
255 |
+
if url_match:
|
256 |
+
image_url = url_match.group(0)
|
257 |
+
logger.info(f"Image URL found: {image_url}")
|
258 |
+
yield ImageResponse(image_url, alt=messages[-1]['content'])
|
259 |
+
else:
|
260 |
+
logger.error("Image URL not found in the response.")
|
261 |
+
raise Exception("Image URL not found in the response")
|
262 |
else:
|
263 |
+
full_response = ""
|
264 |
+
search_results_json = ""
|
265 |
+
try:
|
266 |
+
async for chunk, _ in response.content.iter_chunks():
|
267 |
+
if chunk:
|
268 |
+
decoded_chunk = chunk.decode(errors='ignore')
|
269 |
+
decoded_chunk = re.sub(r'\$@\$v=[^$]+\$@\$', '', decoded_chunk)
|
270 |
+
if decoded_chunk.strip():
|
271 |
+
if '$~~~$' in decoded_chunk:
|
272 |
+
search_results_json += decoded_chunk
|
273 |
+
else:
|
274 |
+
full_response += decoded_chunk
|
275 |
+
yield decoded_chunk
|
276 |
+
logger.info("Finished streaming response chunks.")
|
277 |
+
except Exception as e:
|
278 |
+
logger.exception("Error while iterating over response chunks.")
|
279 |
+
raise e
|
280 |
+
if data["webSearchMode"] and search_results_json:
|
281 |
+
match = re.search(r'\$~~~\$(.*?)\$~~~\$', search_results_json, re.DOTALL)
|
282 |
+
if match:
|
283 |
+
try:
|
284 |
+
search_results = json.loads(match.group(1))
|
285 |
+
formatted_results = "\n\n**Sources:**\n"
|
286 |
+
for i, result in enumerate(search_results[:5], 1):
|
287 |
+
formatted_results += f"{i}. [{result['title']}]({result['link']})\n"
|
288 |
+
logger.info("Formatted search results.")
|
289 |
+
yield formatted_results
|
290 |
+
except json.JSONDecodeError as je:
|
291 |
+
logger.error("Failed to parse search results JSON.")
|
292 |
+
raise je
|
293 |
+
break # Exit the retry loop if successful
|
294 |
+
except ClientError as ce:
|
295 |
+
logger.error(f"Client error occurred: {ce}. Retrying attempt {attempt + 1}/{retry_attempts}")
|
296 |
+
if attempt == retry_attempts - 1:
|
297 |
+
raise HTTPException(status_code=502, detail="Error communicating with the external API. | NiansuhAI")
|
298 |
+
except asyncio.TimeoutError:
|
299 |
+
logger.error(f"Request timed out. Retrying attempt {attempt + 1}/{retry_attempts}")
|
300 |
+
if attempt == retry_attempts - 1:
|
301 |
+
raise HTTPException(status_code=504, detail="External API request timed out. | NiansuhAI")
|
302 |
+
except Exception as e:
|
303 |
+
logger.error(f"Unexpected error: {e}. Retrying attempt {attempt + 1}/{retry_attempts}")
|
304 |
+
if attempt == retry_attempts - 1:
|
305 |
+
raise HTTPException(status_code=500, detail=str(e))
|
306 |
|
307 |
# FastAPI app setup
|
308 |
app = FastAPI()
|
|
|
314 |
class ChatRequest(BaseModel):
|
315 |
model: str
|
316 |
messages: List[Message]
|
317 |
+
stream: Optional[bool] = False
|
318 |
+
webSearchMode: Optional[bool] = False
|
319 |
|
320 |
def create_response(content: str, model: str, finish_reason: Optional[str] = None) -> Dict[str, Any]:
|
321 |
return {
|
|
|
333 |
"usage": None,
|
334 |
}
|
335 |
|
336 |
+
@app.post("/v1/chat/completions")
|
337 |
+
async def chat_completions(request: ChatRequest, req: Request):
|
338 |
+
logger.info(f"Received chat completions request: {request}")
|
|
|
|
|
|
|
|
|
|
|
|
|
339 |
try:
|
340 |
+
messages = [{"role": msg.role, "content": msg.content} for msg in request.messages]
|
341 |
+
|
342 |
async_generator = Blackbox.create_async_generator(
|
343 |
model=request.model,
|
344 |
messages=messages,
|
345 |
image=None,
|
346 |
+
image_name=None,
|
347 |
+
webSearchMode=request.webSearchMode
|
348 |
)
|
|
|
|
|
349 |
|
350 |
+
if request.stream:
|
351 |
+
async def generate():
|
352 |
+
try:
|
353 |
+
async for chunk in async_generator:
|
354 |
+
if isinstance(chunk, ImageResponse):
|
355 |
+
image_markdown = f""
|
356 |
+
response_chunk = create_response(image_markdown, request.model)
|
357 |
+
else:
|
358 |
+
response_chunk = create_response(chunk, request.model)
|
359 |
+
|
360 |
+
# Yield each chunk in SSE format
|
361 |
+
yield f"data: {json.dumps(response_chunk)}\n\n"
|
362 |
+
|
363 |
+
# Signal the end of the stream
|
364 |
+
yield "data: [DONE]\n\n"
|
365 |
+
except HTTPException as he:
|
366 |
+
error_response = {"error": he.detail}
|
367 |
+
yield f"data: {json.dumps(error_response)}\n\n"
|
368 |
+
except Exception as e:
|
369 |
+
logger.exception("Error during streaming response generation.")
|
370 |
+
error_response = {"error": str(e)}
|
371 |
+
yield f"data: {json.dumps(error_response)}\n\n"
|
372 |
+
|
373 |
+
return StreamingResponse(generate(), media_type="text/event-stream")
|
374 |
+
else:
|
375 |
+
response_content = ""
|
376 |
async for chunk in async_generator:
|
377 |
if isinstance(chunk, ImageResponse):
|
378 |
+
response_content += f"\n"
|
|
|
379 |
else:
|
380 |
+
response_content += chunk
|
|
|
381 |
|
382 |
+
logger.info("Completed non-streaming response generation.")
|
383 |
+
return {
|
384 |
+
"id": f"chatcmpl-{uuid.uuid4()}",
|
385 |
+
"object": "chat.completion",
|
386 |
+
"created": int(datetime.now().timestamp()),
|
387 |
+
"model": request.model,
|
388 |
+
"choices": [
|
389 |
+
{
|
390 |
+
"message": {
|
391 |
+
"role": "assistant",
|
392 |
+
"content": response_content
|
393 |
+
},
|
394 |
+
"finish_reason": "stop",
|
395 |
+
"index": 0
|
396 |
+
}
|
397 |
+
],
|
398 |
+
"usage": {
|
399 |
+
"prompt_tokens": sum(len(msg['content'].split()) for msg in messages),
|
400 |
+
"completion_tokens": len(response_content.split()),
|
401 |
+
"total_tokens": sum(len(msg['content'].split()) for msg in messages) + len(response_content.split())
|
402 |
+
},
|
403 |
+
}
|
404 |
+
except ModelNotWorkingException as e:
|
405 |
+
logger.warning(f"Model not working: {e}")
|
406 |
+
raise HTTPException(status_code=503, detail=str(e))
|
407 |
+
except HTTPException as he:
|
408 |
+
logger.warning(f"HTTPException: {he.detail}")
|
409 |
+
raise he
|
410 |
+
except Exception as e:
|
411 |
+
logger.exception("An unexpected error occurred while processing the chat completions request.")
|
412 |
+
raise HTTPException(status_code=500, detail=str(e))
|
413 |
|
414 |
+
@app.get("/v1/models")
|
415 |
async def get_models():
|
416 |
+
logger.info("Fetching available models.")
|
417 |
+
return {"data": [{"id": model} for model in Blackbox.models]}
|
418 |
+
|
419 |
+
# Additional endpoints for better functionality
|
420 |
+
@app.get("/v1/health")
|
421 |
+
async def health_check():
|
422 |
+
"""Health check endpoint to verify the service is running."""
|
423 |
+
return {"status": "ok"}
|
424 |
+
|
425 |
+
@app.get("/v1/models/{model}/status")
|
426 |
+
async def model_status(model: str):
|
427 |
+
"""Check if a specific model is available."""
|
428 |
+
if model in Blackbox.models:
|
429 |
+
return {"model": model, "status": "available"}
|
430 |
+
elif model in Blackbox.model_aliases:
|
431 |
+
actual_model = Blackbox.model_aliases[model]
|
432 |
+
return {"model": actual_model, "status": "available via alias"}
|
433 |
+
else:
|
434 |
+
raise HTTPException(status_code=404, detail="Model not found")
|
435 |
+
|
436 |
+
if __name__ == "__main__":
|
437 |
+
import uvicorn
|
438 |
+
uvicorn.run(app, host="0.0.0.0", port=8000)
|