Update main.py
Browse files
main.py
CHANGED
@@ -12,7 +12,7 @@ from typing import List, Dict, Any, Optional, AsyncGenerator, Union
|
|
12 |
|
13 |
from datetime import datetime
|
14 |
|
15 |
-
from aiohttp import ClientSession, ClientTimeout, ClientError
|
16 |
from fastapi import FastAPI, HTTPException, Request, Depends, Header
|
17 |
from fastapi.responses import StreamingResponse, JSONResponse, RedirectResponse
|
18 |
from pydantic import BaseModel
|
@@ -98,20 +98,17 @@ class ModelNotWorkingException(Exception):
|
|
98 |
|
99 |
# Mock implementations for ImageResponse and to_data_uri
|
100 |
class ImageResponse:
|
101 |
-
def __init__(self,
|
102 |
-
self.
|
103 |
self.alt = alt
|
104 |
|
105 |
def to_data_uri(image: Any) -> str:
|
106 |
return "data:image/png;base64,..." # Replace with actual base64 data
|
107 |
|
108 |
-
# New Blackbox Class Integration
|
109 |
class Blackbox:
|
110 |
-
label = "Blackbox AI"
|
111 |
url = "https://www.blackbox.ai"
|
112 |
api_endpoint = "https://www.blackbox.ai/api/chat"
|
113 |
working = True
|
114 |
-
supports_gpt_4 = True
|
115 |
supports_stream = True
|
116 |
supports_system_message = True
|
117 |
supports_message_history = True
|
@@ -121,7 +118,6 @@ class Blackbox:
|
|
121 |
models = [
|
122 |
default_model,
|
123 |
'blackboxai-pro',
|
124 |
-
*image_models,
|
125 |
"llama-3.1-8b",
|
126 |
'llama-3.1-70b',
|
127 |
'llama-3.1-405b',
|
@@ -142,12 +138,18 @@ class Blackbox:
|
|
142 |
'ReactAgent',
|
143 |
'XcodeAgent',
|
144 |
'AngularJSAgent',
|
|
|
|
|
145 |
]
|
146 |
|
|
|
|
|
|
|
|
|
147 |
agentMode = {
|
148 |
'ImageGeneration': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"},
|
|
|
149 |
}
|
150 |
-
|
151 |
trendingAgentMode = {
|
152 |
"blackboxai": {},
|
153 |
"gemini-1.5-flash": {'mode': True, 'id': 'Gemini'},
|
@@ -195,65 +197,33 @@ class Blackbox:
|
|
195 |
'AngularJSAgent': '@AngularJS Agent',
|
196 |
'blackboxai-pro': '@BLACKBOXAI-PRO',
|
197 |
'ImageGeneration': '@Image Generation',
|
|
|
198 |
}
|
199 |
|
200 |
model_referers = {
|
201 |
-
"blackboxai": "/?model=blackboxai",
|
202 |
-
"gpt-4o": "/?model=gpt-4o",
|
203 |
-
"gemini-pro": "/?model=gemini-pro",
|
204 |
-
"claude-sonnet-3.5": "/?model=claude-sonnet-3.5"
|
205 |
}
|
206 |
|
207 |
model_aliases = {
|
208 |
"gemini-flash": "gemini-1.5-flash",
|
209 |
"claude-3.5-sonnet": "claude-sonnet-3.5",
|
210 |
"flux": "ImageGeneration",
|
|
|
211 |
}
|
212 |
|
213 |
@classmethod
|
214 |
-
def get_model(cls, model: str) -> str:
|
215 |
if model in cls.models:
|
216 |
return model
|
217 |
-
elif model in cls.
|
|
|
|
|
218 |
return cls.model_aliases[model]
|
219 |
else:
|
220 |
-
return cls.default_model
|
221 |
-
|
222 |
-
@staticmethod
|
223 |
-
def generate_random_string(length: int = 7) -> str:
|
224 |
-
characters = string.ascii_letters + string.digits
|
225 |
-
return ''.join(random.choices(characters, k=length))
|
226 |
-
|
227 |
-
@staticmethod
|
228 |
-
def generate_next_action() -> str:
|
229 |
-
return uuid.uuid4().hex
|
230 |
-
|
231 |
-
@staticmethod
|
232 |
-
def generate_next_router_state_tree() -> str:
|
233 |
-
router_state = [
|
234 |
-
"",
|
235 |
-
{
|
236 |
-
"children": [
|
237 |
-
"(chat)",
|
238 |
-
{
|
239 |
-
"children": [
|
240 |
-
"__PAGE__",
|
241 |
-
{}
|
242 |
-
]
|
243 |
-
}
|
244 |
-
]
|
245 |
-
},
|
246 |
-
None,
|
247 |
-
None,
|
248 |
-
True
|
249 |
-
]
|
250 |
-
return json.dumps(router_state)
|
251 |
-
|
252 |
-
@staticmethod
|
253 |
-
def clean_response(text: str) -> str:
|
254 |
-
pattern = r'^\$\@\$v=undefined-rv1\$\@\$'
|
255 |
-
cleaned_text = re.sub(pattern, '', text)
|
256 |
-
return cleaned_text
|
257 |
|
258 |
@classmethod
|
259 |
async def create_async_generator(
|
@@ -265,87 +235,69 @@ class Blackbox:
|
|
265 |
image_name: Optional[str] = None,
|
266 |
webSearchMode: bool = False,
|
267 |
**kwargs
|
268 |
-
) -> AsyncGenerator[
|
269 |
-
"""
|
270 |
-
Creates an asynchronous generator for streaming responses from Blackbox AI.
|
271 |
-
|
272 |
-
Parameters:
|
273 |
-
model (str): Model to use for generating responses.
|
274 |
-
messages (List[Dict[str, str]]): Message history.
|
275 |
-
proxy (Optional[str]): Proxy URL, if needed.
|
276 |
-
image (Any): Image data, if any.
|
277 |
-
image_name (Optional[str]): Name of the image, if any.
|
278 |
-
webSearchMode (bool): Enables or disables web search mode.
|
279 |
-
**kwargs: Additional keyword arguments.
|
280 |
-
|
281 |
-
Yields:
|
282 |
-
Union[str, ImageResponse]: Segments of the generated response or ImageResponse objects.
|
283 |
-
"""
|
284 |
model = cls.get_model(model)
|
|
|
|
|
|
|
285 |
|
286 |
-
|
287 |
-
next_action = cls.generate_next_action()
|
288 |
-
next_router_state_tree = cls.generate_next_router_state_tree()
|
289 |
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
-
prefix = cls.model_prefixes.get(model, "")
|
294 |
-
|
295 |
-
formatted_prompt = ""
|
296 |
-
for message in messages:
|
297 |
-
role = message.get('role', '').capitalize()
|
298 |
-
content = message.get('content', '')
|
299 |
-
if role and content:
|
300 |
-
formatted_prompt += f"{role}: {content}\n"
|
301 |
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
|
318 |
-
'sec-fetch-dest': 'empty',
|
319 |
-
'sec-fetch-mode': 'cors',
|
320 |
-
'sec-fetch-site': 'same-origin',
|
321 |
-
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) '
|
322 |
-
'AppleWebKit/537.36 (KHTML, like Gecko) '
|
323 |
-
'Chrome/129.0.0.0 Safari/537.36'
|
324 |
}
|
325 |
|
326 |
-
|
327 |
-
|
328 |
-
'
|
329 |
-
|
330 |
-
|
331 |
-
|
332 |
-
|
333 |
-
|
334 |
-
|
335 |
-
|
336 |
-
|
337 |
-
|
338 |
-
|
339 |
-
|
340 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
341 |
"previewToken": None,
|
342 |
"userId": None,
|
343 |
"codeModelMode": True,
|
344 |
-
"agentMode":
|
345 |
-
"trendingAgentMode":
|
346 |
"isMicMode": False,
|
347 |
"userSystemPrompt": None,
|
348 |
-
"maxTokens":
|
349 |
"playgroundTopP": 0.9,
|
350 |
"playgroundTemperature": 0.5,
|
351 |
"isChromeExt": False,
|
@@ -355,101 +307,81 @@ class Blackbox:
|
|
355 |
"clickedForceWebSearch": False,
|
356 |
"visitFromDelta": False,
|
357 |
"mobileClient": False,
|
|
|
358 |
"webSearchMode": webSearchMode,
|
359 |
-
"userSelectedModel": cls.userSelectedModel.get(model, model)
|
360 |
}
|
361 |
|
362 |
-
|
363 |
-
|
364 |
-
|
365 |
-
|
366 |
-
|
367 |
-
|
368 |
-
|
369 |
-
}
|
370 |
-
headers_chat_combined = {**common_headers, **headers_chat}
|
371 |
|
372 |
-
|
|
|
373 |
|
374 |
-
|
375 |
try:
|
376 |
-
async with session
|
377 |
-
cls.api_endpoint,
|
378 |
-
|
379 |
-
|
380 |
-
|
381 |
-
|
382 |
-
|
383 |
-
|
384 |
-
|
385 |
-
|
386 |
-
|
387 |
-
match = re.search(r'!\[.*?\]\((https?://[^\)]+)\)', cleaned_response)
|
388 |
-
if match:
|
389 |
-
image_url = match.group(1)
|
390 |
-
image_response = ImageResponse(images=image_url, alt="Generated Image")
|
391 |
-
yield image_response
|
392 |
-
else:
|
393 |
-
yield cleaned_response
|
394 |
-
else:
|
395 |
-
if webSearchMode:
|
396 |
-
match = re.search(r'\$~~~\$(.*?)\$~~~\$', cleaned_response, re.DOTALL)
|
397 |
-
if match:
|
398 |
-
source_part = match.group(1).strip()
|
399 |
-
answer_part = cleaned_response[match.end():].strip()
|
400 |
-
try:
|
401 |
-
sources = json.loads(source_part)
|
402 |
-
source_formatted = "**Sources:**\n"
|
403 |
-
for item in sources[:5]:
|
404 |
-
title = item.get('title', 'No Title')
|
405 |
-
link = item.get('link', '#')
|
406 |
-
source_formatted += f"- [{title}]({link})\n"
|
407 |
-
final_response = f"{answer_part}\n\n{source_formatted}"
|
408 |
-
except json.JSONDecodeError:
|
409 |
-
final_response = f"{answer_part}\n\nSource information is unavailable."
|
410 |
else:
|
411 |
-
|
|
|
412 |
else:
|
413 |
-
|
414 |
-
|
415 |
-
|
416 |
-
|
417 |
-
|
418 |
-
|
419 |
-
|
420 |
-
|
421 |
-
|
422 |
-
|
423 |
-
|
424 |
-
|
425 |
-
|
426 |
-
|
427 |
-
|
428 |
-
|
429 |
-
|
430 |
-
|
431 |
-
|
432 |
-
|
433 |
-
|
434 |
-
|
435 |
-
|
436 |
-
|
437 |
-
|
438 |
-
|
439 |
-
|
440 |
-
|
441 |
-
|
442 |
-
|
443 |
-
|
444 |
-
|
445 |
-
|
446 |
-
|
447 |
-
|
448 |
-
|
449 |
-
|
450 |
-
|
|
|
451 |
except Exception as e:
|
452 |
-
|
|
|
|
|
453 |
|
454 |
# FastAPI app setup
|
455 |
app = FastAPI()
|
@@ -564,7 +496,7 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
|
|
564 |
async for chunk in async_generator:
|
565 |
if isinstance(chunk, ImageResponse):
|
566 |
# Handle image responses if necessary
|
567 |
-
image_markdown = f"
|
570 |
else:
|
@@ -587,7 +519,9 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
|
|
587 |
yield f"data: {json.dumps(response_chunk)}\n\n"
|
588 |
|
589 |
# After all chunks are sent, send the final message with finish_reason
|
590 |
-
|
|
|
|
|
591 |
completion_tokens = len(assistant_content.split())
|
592 |
total_tokens = prompt_tokens + completion_tokens
|
593 |
estimated_cost = calculate_estimated_cost(prompt_tokens, completion_tokens)
|
@@ -629,7 +563,7 @@ async def chat_completions(request: ChatRequest, req: Request, api_key: str = De
|
|
629 |
response_content = ""
|
630 |
async for chunk in async_generator:
|
631 |
if isinstance(chunk, ImageResponse):
|
632 |
-
response_content += f":
|
102 |
+
self.url = url
|
103 |
self.alt = alt
|
104 |
|
105 |
def to_data_uri(image: Any) -> str:
|
106 |
return "data:image/png;base64,..." # Replace with actual base64 data
|
107 |
|
|
|
108 |
class Blackbox:
|
|
|
109 |
url = "https://www.blackbox.ai"
|
110 |
api_endpoint = "https://www.blackbox.ai/api/chat"
|
111 |
working = True
|
|
|
112 |
supports_stream = True
|
113 |
supports_system_message = True
|
114 |
supports_message_history = True
|
|
|
118 |
models = [
|
119 |
default_model,
|
120 |
'blackboxai-pro',
|
|
|
121 |
"llama-3.1-8b",
|
122 |
'llama-3.1-70b',
|
123 |
'llama-3.1-405b',
|
|
|
138 |
'ReactAgent',
|
139 |
'XcodeAgent',
|
140 |
'AngularJSAgent',
|
141 |
+
*image_models,
|
142 |
+
'Niansuh',
|
143 |
]
|
144 |
|
145 |
+
# Filter models based on AVAILABLE_MODELS
|
146 |
+
if AVAILABLE_MODELS:
|
147 |
+
models = [model for model in models if model in AVAILABLE_MODELS]
|
148 |
+
|
149 |
agentMode = {
|
150 |
'ImageGeneration': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"},
|
151 |
+
'Niansuh': {'mode': True, 'id': "NiansuhAIk1HgESy", 'name': "Niansuh"},
|
152 |
}
|
|
|
153 |
trendingAgentMode = {
|
154 |
"blackboxai": {},
|
155 |
"gemini-1.5-flash": {'mode': True, 'id': 'Gemini'},
|
|
|
197 |
'AngularJSAgent': '@AngularJS Agent',
|
198 |
'blackboxai-pro': '@BLACKBOXAI-PRO',
|
199 |
'ImageGeneration': '@Image Generation',
|
200 |
+
'Niansuh': '@Niansuh',
|
201 |
}
|
202 |
|
203 |
model_referers = {
|
204 |
+
"blackboxai": f"{url}/?model=blackboxai",
|
205 |
+
"gpt-4o": f"{url}/?model=gpt-4o",
|
206 |
+
"gemini-pro": f"{url}/?model=gemini-pro",
|
207 |
+
"claude-sonnet-3.5": f"{url}/?model=claude-sonnet-3.5"
|
208 |
}
|
209 |
|
210 |
model_aliases = {
|
211 |
"gemini-flash": "gemini-1.5-flash",
|
212 |
"claude-3.5-sonnet": "claude-sonnet-3.5",
|
213 |
"flux": "ImageGeneration",
|
214 |
+
"niansuh": "Niansuh",
|
215 |
}
|
216 |
|
217 |
@classmethod
|
218 |
+
def get_model(cls, model: str) -> Optional[str]:
|
219 |
if model in cls.models:
|
220 |
return model
|
221 |
+
elif model in cls.userSelectedModel and cls.userSelectedModel[model] in cls.models:
|
222 |
+
return cls.userSelectedModel[model]
|
223 |
+
elif model in cls.model_aliases and cls.model_aliases[model] in cls.models:
|
224 |
return cls.model_aliases[model]
|
225 |
else:
|
226 |
+
return cls.default_model if cls.default_model in cls.models else None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
227 |
|
228 |
@classmethod
|
229 |
async def create_async_generator(
|
|
|
235 |
image_name: Optional[str] = None,
|
236 |
webSearchMode: bool = False,
|
237 |
**kwargs
|
238 |
+
) -> AsyncGenerator[Any, None]:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
239 |
model = cls.get_model(model)
|
240 |
+
if model is None:
|
241 |
+
logger.error(f"Model {model} is not available.")
|
242 |
+
raise ModelNotWorkingException(model)
|
243 |
|
244 |
+
logger.info(f"Selected model: {model}")
|
|
|
|
|
245 |
|
246 |
+
if not cls.working or model not in cls.models:
|
247 |
+
logger.error(f"Model {model} is not working or not supported.")
|
248 |
+
raise ModelNotWorkingException(model)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
249 |
|
250 |
+
headers = {
|
251 |
+
"accept": "*/*",
|
252 |
+
"accept-language": "en-US,en;q=0.9",
|
253 |
+
"cache-control": "no-cache",
|
254 |
+
"content-type": "application/json",
|
255 |
+
"origin": cls.url,
|
256 |
+
"pragma": "no-cache",
|
257 |
+
"priority": "u=1, i",
|
258 |
+
"referer": cls.model_referers.get(model, cls.url),
|
259 |
+
"sec-ch-ua": '"Chromium";v="129", "Not=A?Brand";v="8"',
|
260 |
+
"sec-ch-ua-mobile": "?0",
|
261 |
+
"sec-ch-ua-platform": '"Linux"',
|
262 |
+
"sec-fetch-dest": "empty",
|
263 |
+
"sec-fetch-mode": "cors",
|
264 |
+
"sec-fetch-site": "same-origin",
|
265 |
+
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36",
|
|
|
|
|
|
|
|
|
|
|
|
|
266 |
}
|
267 |
|
268 |
+
if model in cls.model_prefixes:
|
269 |
+
prefix = cls.model_prefixes[model]
|
270 |
+
if not messages[0]['content'].startswith(prefix):
|
271 |
+
logger.debug(f"Adding prefix '{prefix}' to the first message.")
|
272 |
+
messages[0]['content'] = f"{prefix} {messages[0]['content']}"
|
273 |
+
|
274 |
+
random_id = ''.join(random.choices(string.ascii_letters + string.digits, k=7))
|
275 |
+
messages[-1]['id'] = random_id
|
276 |
+
messages[-1]['role'] = 'user'
|
277 |
+
|
278 |
+
# Don't log the full message content for privacy
|
279 |
+
logger.debug(f"Generated message ID: {random_id} for model: {model}")
|
280 |
+
|
281 |
+
if image is not None:
|
282 |
+
messages[-1]['data'] = {
|
283 |
+
'fileText': '',
|
284 |
+
'imageBase64': to_data_uri(image),
|
285 |
+
'title': image_name
|
286 |
+
}
|
287 |
+
messages[-1]['content'] = 'FILE:BB\n$#$\n\n$#$\n' + messages[-1]['content']
|
288 |
+
logger.debug("Image data added to the message.")
|
289 |
+
|
290 |
+
data = {
|
291 |
+
"messages": messages,
|
292 |
+
"id": random_id,
|
293 |
"previewToken": None,
|
294 |
"userId": None,
|
295 |
"codeModelMode": True,
|
296 |
+
"agentMode": {},
|
297 |
+
"trendingAgentMode": {},
|
298 |
"isMicMode": False,
|
299 |
"userSystemPrompt": None,
|
300 |
+
"maxTokens": 99999999,
|
301 |
"playgroundTopP": 0.9,
|
302 |
"playgroundTemperature": 0.5,
|
303 |
"isChromeExt": False,
|
|
|
307 |
"clickedForceWebSearch": False,
|
308 |
"visitFromDelta": False,
|
309 |
"mobileClient": False,
|
310 |
+
"userSelectedModel": None,
|
311 |
"webSearchMode": webSearchMode,
|
|
|
312 |
}
|
313 |
|
314 |
+
if model in cls.agentMode:
|
315 |
+
data["agentMode"] = cls.agentMode[model]
|
316 |
+
elif model in cls.trendingAgentMode:
|
317 |
+
data["trendingAgentMode"] = cls.trendingAgentMode[model]
|
318 |
+
elif model in cls.userSelectedModel:
|
319 |
+
data["userSelectedModel"] = cls.userSelectedModel[model]
|
320 |
+
logger.info(f"Sending request to {cls.api_endpoint} with data (excluding messages).")
|
|
|
|
|
321 |
|
322 |
+
timeout = ClientTimeout(total=60) # Set an appropriate timeout
|
323 |
+
retry_attempts = 10 # Set the number of retry attempts
|
324 |
|
325 |
+
for attempt in range(retry_attempts):
|
326 |
try:
|
327 |
+
async with ClientSession(headers=headers, timeout=timeout) as session:
|
328 |
+
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
|
329 |
+
response.raise_for_status()
|
330 |
+
logger.info(f"Received response with status {response.status}")
|
331 |
+
if model == 'ImageGeneration':
|
332 |
+
response_text = await response.text()
|
333 |
+
url_match = re.search(r'https://storage\.googleapis\.com/[^\s\)]+', response_text)
|
334 |
+
if url_match:
|
335 |
+
image_url = url_match.group(0)
|
336 |
+
logger.info(f"Image URL found.")
|
337 |
+
yield ImageResponse(image_url, alt=messages[-1]['content'])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
338 |
else:
|
339 |
+
logger.error("Image URL not found in the response.")
|
340 |
+
raise Exception("Image URL not found in the response")
|
341 |
else:
|
342 |
+
full_response = ""
|
343 |
+
search_results_json = ""
|
344 |
+
try:
|
345 |
+
async for chunk, _ in response.content.iter_chunks():
|
346 |
+
if chunk:
|
347 |
+
decoded_chunk = chunk.decode(errors='ignore')
|
348 |
+
decoded_chunk = re.sub(r'\$@\$v=[^$]+\$@\$', '', decoded_chunk)
|
349 |
+
if decoded_chunk.strip():
|
350 |
+
if '$~~~$' in decoded_chunk:
|
351 |
+
search_results_json += decoded_chunk
|
352 |
+
else:
|
353 |
+
full_response += decoded_chunk
|
354 |
+
yield decoded_chunk
|
355 |
+
logger.info("Finished streaming response chunks.")
|
356 |
+
except Exception as e:
|
357 |
+
logger.exception("Error while iterating over response chunks.")
|
358 |
+
raise e
|
359 |
+
if data["webSearchMode"] and search_results_json:
|
360 |
+
match = re.search(r'\$~~~\$(.*?)\$~~~\$', search_results_json, re.DOTALL)
|
361 |
+
if match:
|
362 |
+
try:
|
363 |
+
search_results = json.loads(match.group(1))
|
364 |
+
formatted_results = "\n\n**Sources:**\n"
|
365 |
+
for i, result in enumerate(search_results[:5], 1):
|
366 |
+
formatted_results += f"{i}. [{result['title']}]({result['link']})\n"
|
367 |
+
logger.info("Formatted search results.")
|
368 |
+
yield formatted_results
|
369 |
+
except json.JSONDecodeError as je:
|
370 |
+
logger.error("Failed to parse search results JSON.")
|
371 |
+
raise je
|
372 |
+
break # Exit the retry loop if successful
|
373 |
+
except ClientError as ce:
|
374 |
+
logger.error(f"Client error occurred: {ce}. Retrying attempt {attempt + 1}/{retry_attempts}")
|
375 |
+
if attempt == retry_attempts - 1:
|
376 |
+
raise HTTPException(status_code=502, detail="Error communicating with the external API.")
|
377 |
+
except asyncio.TimeoutError:
|
378 |
+
logger.error(f"Request timed out. Retrying attempt {attempt + 1}/{retry_attempts}")
|
379 |
+
if attempt == retry_attempts - 1:
|
380 |
+
raise HTTPException(status_code=504, detail="External API request timed out.")
|
381 |
except Exception as e:
|
382 |
+
logger.error(f"Unexpected error: {e}. Retrying attempt {attempt + 1}/{retry_attempts}")
|
383 |
+
if attempt == retry_attempts - 1:
|
384 |
+
raise HTTPException(status_code=500, detail=str(e))
|
385 |
|
386 |
# FastAPI app setup
|
387 |
app = FastAPI()
|
|
|
496 |
async for chunk in async_generator:
|
497 |
if isinstance(chunk, ImageResponse):
|
498 |
# Handle image responses if necessary
|
499 |
+
image_markdown = f"\n"
|
500 |
assistant_content += image_markdown
|
501 |
response_chunk = create_response(image_markdown, request.model, finish_reason=None)
|
502 |
else:
|
|
|
519 |
yield f"data: {json.dumps(response_chunk)}\n\n"
|
520 |
|
521 |
# After all chunks are sent, send the final message with finish_reason
|
522 |
+
# *** Key Correction Starts Here ***
|
523 |
+
prompt_tokens = sum(len(msg.content.split()) for msg in request.messages)
|
524 |
+
# *** Key Correction Ends Here ***
|
525 |
completion_tokens = len(assistant_content.split())
|
526 |
total_tokens = prompt_tokens + completion_tokens
|
527 |
estimated_cost = calculate_estimated_cost(prompt_tokens, completion_tokens)
|
|
|
563 |
response_content = ""
|
564 |
async for chunk in async_generator:
|
565 |
if isinstance(chunk, ImageResponse):
|
566 |
+
response_content += f"\n"
|
567 |
else:
|
568 |
response_content += chunk
|
569 |
|