Update api/utils.py
Browse files- api/utils.py +4 -0
api/utils.py
CHANGED
@@ -53,6 +53,7 @@ def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
|
|
53 |
return content[len(model_prefix):].strip()
|
54 |
return content
|
55 |
|
|
|
56 |
async def process_streaming_response(request: ChatRequest):
|
57 |
request_id = f"chatcmpl-{uuid.uuid4()}"
|
58 |
logger.info(f"Processing request with ID: {request_id} - Model: {request.model}")
|
@@ -63,6 +64,7 @@ async def process_streaming_response(request: ChatRequest):
|
|
63 |
|
64 |
headers_api_chat = get_headers_api_chat(BASE_URL)
|
65 |
|
|
|
66 |
if request.model == 'o1-preview':
|
67 |
delay_seconds = random.randint(1, 60)
|
68 |
logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Request ID: {request_id})")
|
@@ -145,6 +147,7 @@ async def process_streaming_response(request: ChatRequest):
|
|
145 |
logger.error(f"Request error occurred for Request ID {request_id}: {e}")
|
146 |
raise HTTPException(status_code=500, detail=str(e))
|
147 |
|
|
|
148 |
async def process_non_streaming_response(request: ChatRequest):
|
149 |
request_id = f"chatcmpl-{uuid.uuid4()}"
|
150 |
logger.info(f"Processing request with ID: {request_id} - Model: {request.model}")
|
@@ -155,6 +158,7 @@ async def process_non_streaming_response(request: ChatRequest):
|
|
155 |
|
156 |
headers_api_chat = get_headers_api_chat(BASE_URL)
|
157 |
|
|
|
158 |
if request.model == 'o1-preview':
|
159 |
delay_seconds = random.randint(20, 60)
|
160 |
logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Request ID: {request_id})")
|
|
|
53 |
return content[len(model_prefix):].strip()
|
54 |
return content
|
55 |
|
56 |
+
# Process streaming response with headers from config.py
|
57 |
async def process_streaming_response(request: ChatRequest):
|
58 |
request_id = f"chatcmpl-{uuid.uuid4()}"
|
59 |
logger.info(f"Processing request with ID: {request_id} - Model: {request.model}")
|
|
|
64 |
|
65 |
headers_api_chat = get_headers_api_chat(BASE_URL)
|
66 |
|
67 |
+
# Delay for 'o1-preview' model if necessary
|
68 |
if request.model == 'o1-preview':
|
69 |
delay_seconds = random.randint(1, 60)
|
70 |
logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Request ID: {request_id})")
|
|
|
147 |
logger.error(f"Request error occurred for Request ID {request_id}: {e}")
|
148 |
raise HTTPException(status_code=500, detail=str(e))
|
149 |
|
150 |
+
# Process non-streaming response with headers from config.py
|
151 |
async def process_non_streaming_response(request: ChatRequest):
|
152 |
request_id = f"chatcmpl-{uuid.uuid4()}"
|
153 |
logger.info(f"Processing request with ID: {request_id} - Model: {request.model}")
|
|
|
158 |
|
159 |
headers_api_chat = get_headers_api_chat(BASE_URL)
|
160 |
|
161 |
+
# Delay for 'o1-preview' model if necessary
|
162 |
if request.model == 'o1-preview':
|
163 |
delay_seconds = random.randint(20, 60)
|
164 |
logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Request ID: {request_id})")
|