Update main.py
Browse files
main.py
CHANGED
@@ -16,6 +16,7 @@ from aiohttp import ClientSession, ClientResponseError
|
|
16 |
from fastapi import FastAPI, HTTPException, Request, Depends, Header
|
17 |
from fastapi.responses import JSONResponse, StreamingResponse
|
18 |
from pydantic import BaseModel
|
|
|
19 |
|
20 |
# Configure logging
|
21 |
logging.basicConfig(
|
@@ -316,9 +317,18 @@ class Blackbox:
|
|
316 |
proxy=proxy
|
317 |
) as response_api_chat:
|
318 |
response_api_chat.raise_for_status()
|
|
|
|
|
319 |
text = await response_api_chat.text()
|
320 |
cleaned_response = cls.clean_response(text)
|
321 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
322 |
if model in cls.image_models:
|
323 |
match = re.search(r'!\[.*?\]\((https?://[^\)]+)\)', cleaned_response)
|
324 |
if match:
|
@@ -327,32 +337,6 @@ class Blackbox:
|
|
327 |
yield image_response
|
328 |
else:
|
329 |
yield cleaned_response
|
330 |
-
else:
|
331 |
-
if websearch:
|
332 |
-
match = re.search(r'\$~~~\$(.*?)\$~~~\$', cleaned_response, re.DOTALL)
|
333 |
-
if match:
|
334 |
-
source_part = match.group(1).strip()
|
335 |
-
answer_part = cleaned_response[match.end():].strip()
|
336 |
-
try:
|
337 |
-
sources = json.loads(source_part)
|
338 |
-
source_formatted = "**Source:**\n"
|
339 |
-
for item in sources:
|
340 |
-
title = item.get('title', 'No Title')
|
341 |
-
link = item.get('link', '#')
|
342 |
-
position = item.get('position', '')
|
343 |
-
source_formatted += f"{position}. [{title}]({link})\n"
|
344 |
-
final_response = f"{answer_part}\n\n{source_formatted}"
|
345 |
-
except json.JSONDecodeError:
|
346 |
-
final_response = f"{answer_part}\n\nSource information is unavailable."
|
347 |
-
else:
|
348 |
-
final_response = cleaned_response
|
349 |
-
else:
|
350 |
-
if '$~~~$' in cleaned_response:
|
351 |
-
final_response = cleaned_response.split('$~~~$')[0].strip()
|
352 |
-
else:
|
353 |
-
final_response = cleaned_response
|
354 |
-
|
355 |
-
yield final_response
|
356 |
except ClientResponseError as e:
|
357 |
error_text = f"Error {e.status}: {e.message}"
|
358 |
try:
|
@@ -365,6 +349,7 @@ class Blackbox:
|
|
365 |
except Exception as e:
|
366 |
yield f"Unexpected error during /api/chat request: {str(e)}"
|
367 |
|
|
|
368 |
chat_url = f'{cls.url}/chat/{chat_id}?model={model}'
|
369 |
|
370 |
try:
|
@@ -375,7 +360,7 @@ class Blackbox:
|
|
375 |
proxy=proxy
|
376 |
) as response_chat:
|
377 |
response_chat.raise_for_status()
|
378 |
-
|
379 |
except ClientResponseError as e:
|
380 |
error_text = f"Error {e.status}: {e.message}"
|
381 |
try:
|
|
|
16 |
from fastapi import FastAPI, HTTPException, Request, Depends, Header
|
17 |
from fastapi.responses import JSONResponse, StreamingResponse
|
18 |
from pydantic import BaseModel
|
19 |
+
from datetime import datetime
|
20 |
|
21 |
# Configure logging
|
22 |
logging.basicConfig(
|
|
|
317 |
proxy=proxy
|
318 |
) as response_api_chat:
|
319 |
response_api_chat.raise_for_status()
|
320 |
+
# Simulate streaming by breaking the response into chunks
|
321 |
+
# Since the external API may not support streaming, we'll simulate it
|
322 |
text = await response_api_chat.text()
|
323 |
cleaned_response = cls.clean_response(text)
|
324 |
|
325 |
+
# For demonstration, we'll split the response into words and yield them with delays
|
326 |
+
words = cleaned_response.split()
|
327 |
+
for word in words:
|
328 |
+
await asyncio.sleep(0.1) # Simulate delay
|
329 |
+
yield word + ' '
|
330 |
+
|
331 |
+
# If the model is an image model, handle accordingly
|
332 |
if model in cls.image_models:
|
333 |
match = re.search(r'!\[.*?\]\((https?://[^\)]+)\)', cleaned_response)
|
334 |
if match:
|
|
|
337 |
yield image_response
|
338 |
else:
|
339 |
yield cleaned_response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
340 |
except ClientResponseError as e:
|
341 |
error_text = f"Error {e.status}: {e.message}"
|
342 |
try:
|
|
|
349 |
except Exception as e:
|
350 |
yield f"Unexpected error during /api/chat request: {str(e)}"
|
351 |
|
352 |
+
# Simulate the second API call to /chat/{chat_id}
|
353 |
chat_url = f'{cls.url}/chat/{chat_id}?model={model}'
|
354 |
|
355 |
try:
|
|
|
360 |
proxy=proxy
|
361 |
) as response_chat:
|
362 |
response_chat.raise_for_status()
|
363 |
+
# Assuming no streaming from this endpoint
|
364 |
except ClientResponseError as e:
|
365 |
error_text = f"Error {e.status}: {e.message}"
|
366 |
try:
|