isididiidid commited on
Commit
4d49c3c
·
verified ·
1 Parent(s): f4084da

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +121 -85
app.py CHANGED
@@ -30,10 +30,6 @@ app = FastAPI(
30
  version="1.0.0"
31
  )
32
 
33
- # 增加日志输出级别
34
- if os.getenv("DEBUG", "false").lower() == "true":
35
- logging.getLogger("openai-proxy").setLevel(logging.DEBUG)
36
-
37
  # 添加CORS中间件
38
  app.add_middleware(
39
  CORSMiddleware,
@@ -44,7 +40,7 @@ app.add_middleware(
44
  )
45
 
46
  # 配置
47
- DEEPSIDER_API_BASE = "https://api.chargpt.ai/api/v1"
48
  TOKEN_INDEX = 0
49
 
50
  # 模型映射表
@@ -60,6 +56,7 @@ MODEL_MAPPING = {
60
  "claude-3.7-sonnet": "anthropic/claude-3.7-sonnet",
61
  }
62
 
 
63
  def get_headers(api_key):
64
  global TOKEN_INDEX
65
  # 检查是否包含多个token(用逗号分隔)
@@ -73,11 +70,21 @@ def get_headers(api_key):
73
  current_token = api_key
74
 
75
  return {
76
- "accept": "application/json",
 
 
77
  "content-type": "application/json",
 
 
 
 
 
 
 
 
 
78
  "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36",
79
- "authorization": f"Bearer {current_token.strip()}",
80
- "i-version": "1.1.64"
81
  }
82
 
83
  # OpenAI API请求模型
@@ -120,7 +127,7 @@ async def check_account_balance(api_key, token_index=None):
120
  try:
121
  # 获取账户余额信息
122
  response = requests.get(
123
- f"{DEEPSIDER_API_BASE.replace('/v1', '')}/quota/retrieve",
124
  headers=headers
125
  )
126
 
@@ -162,15 +169,26 @@ def map_openai_to_deepsider_model(model: str) -> str:
162
 
163
  def format_messages_for_deepsider(messages: List[ChatMessage]) -> str:
164
  """格式化消息列表为DeepSider API所需的提示格式"""
165
- # 直接合并所有消息内容,无需特殊格式化
166
- combined_prompt = ""
167
  for msg in messages:
168
- if msg.role == "system":
169
- combined_prompt = msg.content + "\n\n" + combined_prompt
 
 
 
 
 
 
 
170
  else:
171
- combined_prompt += msg.content + "\n\n"
 
172
 
173
- return combined_prompt.strip()
 
 
 
 
174
 
175
  async def generate_openai_response(full_response: str, request_id: str, model: str) -> Dict:
176
  """生成符合OpenAI API响应格式的完整响应"""
@@ -200,16 +218,72 @@ async def generate_openai_response(full_response: str, request_id: str, model: s
200
  async def stream_openai_response(response, request_id: str, model: str, api_key, token_index):
201
  """流式返回OpenAI API格式的响应"""
202
  timestamp = int(time.time())
 
203
 
204
  try:
205
- # 直接传递原始响应
206
- for chunk in response.iter_lines():
207
- if chunk:
208
- yield chunk.decode('utf-8') + "\n"
209
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
210
  except Exception as e:
211
  logger.error(f"流式响应处理出错: {str(e)}")
212
 
 
 
 
 
 
 
213
  # 返回错误信息
214
  error_chunk = {
215
  "id": f"chatcmpl-{request_id}",
@@ -232,16 +306,7 @@ async def stream_openai_response(response, request_id: str, model: str, api_key,
232
  # 路由定义
233
  @app.get("/")
234
  async def root():
235
- """返回简单的HTML页面,展示使用说明"""
236
- return {
237
- "message": "OpenAI API Proxy服务已启动 连接至DeepSider API",
238
- "usage": {
239
- "模型列表": "GET /v1/models",
240
- "聊天完成": "POST /v1/chat/completions",
241
- "账户余额": "GET /admin/balance (需要X-Admin-Key头)"
242
- },
243
- "说明": "请在Authorization头中使用Bearer token格式,支持使用英文逗号分隔多个token实现轮询"
244
- }
245
 
246
  @app.get("/v1/models")
247
  async def list_models(api_key: str = Depends(verify_api_key)):
@@ -283,7 +348,8 @@ async def create_chat_completion(
283
  payload = {
284
  "model": deepsider_model,
285
  "prompt": prompt,
286
- "stream": chat_request.stream
 
287
  }
288
 
289
  # 获取请求头(包含选择的token)
@@ -293,17 +359,12 @@ async def create_chat_completion(
293
  current_token_index = (TOKEN_INDEX - 1) % len(tokens) if len(tokens) > 0 else 0
294
 
295
  try:
296
- # 记录请求信息
297
- logger.info(f"发送请求到DeepSider API - 模型: {deepsider_model}, Prompt长度: {len(prompt)}")
298
- logger.debug(f"请求正文: {json.dumps(payload)}")
299
-
300
  # 发送请求到DeepSider API
301
  response = requests.post(
302
- f"{DEEPSIDER_API_BASE}/chat/completions",
303
  headers=headers,
304
  json=payload,
305
- stream=chat_request.stream,
306
- timeout=60 # 设置60秒超时
307
  )
308
 
309
  # 检查响应状态
@@ -313,19 +374,10 @@ async def create_chat_completion(
313
  error_data = response.json()
314
  error_msg += f" - {error_data.get('message', '')}"
315
  except:
316
- try:
317
- error_msg += f" - {response.text[:200]}"
318
- except:
319
- pass
320
 
321
  logger.error(error_msg)
322
- return {
323
- "error": {
324
- "message": error_msg,
325
- "type": "api_error",
326
- "code": response.status_code
327
- }
328
- }
329
 
330
  # 处理流式或非流式响应
331
  if chat_request.stream:
@@ -335,22 +387,26 @@ async def create_chat_completion(
335
  media_type="text/event-stream"
336
  )
337
  else:
338
- try:
339
- # 非流式请求,直接返回响应
340
- json_response = response.json()
341
- # 记录响应,有助于调试
342
- logger.debug(f"非流式响应: {json.dumps(json_response)}")
343
- return json_response
344
-
345
- except Exception as e:
346
- logger.exception(f"非流式响应处理出错: {str(e)}")
347
- return {
348
- "error": {
349
- "message": f"处理响应时出错: {str(e)}",
350
- "type": "processing_error",
351
- "code": "internal_error"
352
- }
353
- }
 
 
 
 
354
 
355
  except HTTPException:
356
  raise
@@ -394,13 +450,6 @@ async def get_account_balance(request: Request, admin_key: str = Header(None, al
394
 
395
  return result
396
 
397
- # 模拟模型的路由
398
- @app.get("/v1/engines")
399
- @app.get("/v1/engines/{engine_id}")
400
- async def engines_handler():
401
- """兼容旧的引擎API"""
402
- raise HTTPException(status_code=404, detail="引擎API已被弃用 请使用模型API")
403
-
404
  # 错误处理器
405
  @app.exception_handler(404)
406
  async def not_found_handler(request, exc):
@@ -410,17 +459,7 @@ async def not_found_handler(request, exc):
410
  "type": "not_found_error",
411
  "code": "not_found"
412
  }
413
- }
414
-
415
- @app.exception_handler(500)
416
- async def server_error_handler(request, exc):
417
- return {
418
- "error": {
419
- "message": f"服务器内部错误: {str(exc)}",
420
- "type": "server_error",
421
- "code": "internal_server_error"
422
- }
423
- }
424
 
425
  # 启动事件
426
  @app.on_event("startup")
@@ -428,9 +467,6 @@ async def startup_event():
428
  """服务启动时初始化"""
429
  logger.info(f"OpenAI API代理服务已启动,可以接受请求")
430
  logger.info(f"支持多token轮询,请在Authorization头中使用英文逗号分隔多个token")
431
- logger.info(f"服务地址: http://127.0.0.1:7860")
432
- logger.info(f"OpenAI API格式请求示例: POST http://127.0.0.1:7860/v1/chat/completions")
433
- logger.info(f"可用模型查询: GET http://127.0.0.1:7860/v1/models")
434
 
435
  # 主程序
436
  if __name__ == "__main__":
 
30
  version="1.0.0"
31
  )
32
 
 
 
 
 
33
  # 添加CORS中间件
34
  app.add_middleware(
35
  CORSMiddleware,
 
40
  )
41
 
42
  # 配置
43
+ DEEPSIDER_API_BASE = "https://api.chargpt.ai/api/v2"
44
  TOKEN_INDEX = 0
45
 
46
  # 模型映射表
 
56
  "claude-3.7-sonnet": "anthropic/claude-3.7-sonnet",
57
  }
58
 
59
+ # 请求头
60
  def get_headers(api_key):
61
  global TOKEN_INDEX
62
  # 检查是否包含多个token(用逗号分隔)
 
70
  current_token = api_key
71
 
72
  return {
73
+ "accept": "*/*",
74
+ "accept-encoding": "gzip, deflate, br, zstd",
75
+ "accept-language": "en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7",
76
  "content-type": "application/json",
77
+ "origin": "chrome-extension://client",
78
+ "i-lang": "zh-CN",
79
+ "i-version": "1.1.64",
80
+ "sec-ch-ua": '"Chromium";v="134", "Not:A-Brand";v="24"',
81
+ "sec-ch-ua-mobile": "?0",
82
+ "sec-ch-ua-platform": "Windows",
83
+ "sec-fetch-dest": "empty",
84
+ "sec-fetch-mode": "cors",
85
+ "sec-fetch-site": "cross-site",
86
  "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36",
87
+ "authorization": f"Bearer {current_token.strip()}"
 
88
  }
89
 
90
  # OpenAI API请求模型
 
127
  try:
128
  # 获取账户余额信息
129
  response = requests.get(
130
+ f"{DEEPSIDER_API_BASE.replace('/v2', '')}/quota/retrieve",
131
  headers=headers
132
  )
133
 
 
169
 
170
  def format_messages_for_deepsider(messages: List[ChatMessage]) -> str:
171
  """格式化消息列表为DeepSider API所需的提示格式"""
172
+ prompt = ""
 
173
  for msg in messages:
174
+ role = msg.role
175
+ # 将OpenAI的角色映射到DeepSider能理解的格式
176
+ if role == "system":
177
+ # 系统消息放在开头 作为指导
178
+ prompt = f"{msg.content}\n\n" + prompt
179
+ elif role == "user":
180
+ prompt += f"Human: {msg.content}\n\n"
181
+ elif role == "assistant":
182
+ prompt += f"Assistant: {msg.content}\n\n"
183
  else:
184
+ # 其他角色按用户处理
185
+ prompt += f"Human ({role}): {msg.content}\n\n"
186
 
187
+ # 如果最后一个消息不是用户的 添加一个Human前缀引导模型回答
188
+ if messages and messages[-1].role != "user":
189
+ prompt += "Human: "
190
+
191
+ return prompt.strip()
192
 
193
  async def generate_openai_response(full_response: str, request_id: str, model: str) -> Dict:
194
  """生成符合OpenAI API响应格式的完整响应"""
 
218
  async def stream_openai_response(response, request_id: str, model: str, api_key, token_index):
219
  """流式返回OpenAI API格式的响应"""
220
  timestamp = int(time.time())
221
+ full_response = ""
222
 
223
  try:
224
+ # 将DeepSider响应流转换为OpenAI流格式
225
+ for line in response.iter_lines():
226
+ if not line:
227
+ continue
228
 
229
+ if line.startswith(b'data: '):
230
+ try:
231
+ data = json.loads(line[6:].decode('utf-8'))
232
+
233
+ if data.get('code') == 202 and data.get('data', {}).get('type') == "chat":
234
+ # 获取正文内容
235
+ content = data.get('data', {}).get('content', '')
236
+ if content:
237
+ full_response += content
238
+
239
+ # 生成OpenAI格式的流式响应
240
+ chunk = {
241
+ "id": f"chatcmpl-{request_id}",
242
+ "object": "chat.completion.chunk",
243
+ "created": timestamp,
244
+ "model": model,
245
+ "choices": [
246
+ {
247
+ "index": 0,
248
+ "delta": {
249
+ "content": content
250
+ },
251
+ "finish_reason": None
252
+ }
253
+ ]
254
+ }
255
+ yield f"data: {json.dumps(chunk)}\n\n"
256
+
257
+ elif data.get('code') == 203:
258
+ # 生成完成信号
259
+ chunk = {
260
+ "id": f"chatcmpl-{request_id}",
261
+ "object": "chat.completion.chunk",
262
+ "created": timestamp,
263
+ "model": model,
264
+ "choices": [
265
+ {
266
+ "index": 0,
267
+ "delta": {},
268
+ "finish_reason": "stop"
269
+ }
270
+ ]
271
+ }
272
+ yield f"data: {json.dumps(chunk)}\n\n"
273
+ yield "data: [DONE]\n\n"
274
+
275
+ except json.JSONDecodeError:
276
+ logger.warning(f"无法解析响应: {line}")
277
+
278
  except Exception as e:
279
  logger.error(f"流式响应处理出错: {str(e)}")
280
 
281
+ # 尝试使用下一个Token
282
+ tokens = api_key.split(',')
283
+ if len(tokens) > 1:
284
+ logger.info(f"尝试使用下一个Token重试请求")
285
+ # 目前我们不在这里实现自动重试,只记录错误
286
+
287
  # 返回错误信息
288
  error_chunk = {
289
  "id": f"chatcmpl-{request_id}",
 
306
  # 路由定义
307
  @app.get("/")
308
  async def root():
309
+ return {"message": "OpenAI API Proxy服务已启动 连接至DeepSider API"}
 
 
 
 
 
 
 
 
 
310
 
311
  @app.get("/v1/models")
312
  async def list_models(api_key: str = Depends(verify_api_key)):
 
348
  payload = {
349
  "model": deepsider_model,
350
  "prompt": prompt,
351
+ "webAccess": "close", # 默认关闭网络访问
352
+ "timezone": "Asia/Shanghai"
353
  }
354
 
355
  # 获取请求头(包含选择的token)
 
359
  current_token_index = (TOKEN_INDEX - 1) % len(tokens) if len(tokens) > 0 else 0
360
 
361
  try:
 
 
 
 
362
  # 发送请求到DeepSider API
363
  response = requests.post(
364
+ f"{DEEPSIDER_API_BASE}/chat/conversation",
365
  headers=headers,
366
  json=payload,
367
+ stream=True
 
368
  )
369
 
370
  # 检查响应状态
 
374
  error_data = response.json()
375
  error_msg += f" - {error_data.get('message', '')}"
376
  except:
377
+ error_msg += f" - {response.text}"
 
 
 
378
 
379
  logger.error(error_msg)
380
+ raise HTTPException(status_code=response.status_code, detail="API请求失败")
 
 
 
 
 
 
381
 
382
  # 处理流式或非流式响应
383
  if chat_request.stream:
 
387
  media_type="text/event-stream"
388
  )
389
  else:
390
+ # 收集完整响应
391
+ full_response = ""
392
+ for line in response.iter_lines():
393
+ if not line:
394
+ continue
395
+
396
+ if line.startswith(b'data: '):
397
+ try:
398
+ data = json.loads(line[6:].decode('utf-8'))
399
+
400
+ if data.get('code') == 202 and data.get('data', {}).get('type') == "chat":
401
+ content = data.get('data', {}).get('content', '')
402
+ if content:
403
+ full_response += content
404
+
405
+ except json.JSONDecodeError:
406
+ pass
407
+
408
+ # 返回OpenAI格式的完整响应
409
+ return await generate_openai_response(full_response, request_id, chat_request.model)
410
 
411
  except HTTPException:
412
  raise
 
450
 
451
  return result
452
 
 
 
 
 
 
 
 
453
  # 错误处理器
454
  @app.exception_handler(404)
455
  async def not_found_handler(request, exc):
 
459
  "type": "not_found_error",
460
  "code": "not_found"
461
  }
462
+ }, 404
 
 
 
 
 
 
 
 
 
 
463
 
464
  # 启动事件
465
  @app.on_event("startup")
 
467
  """服务启动时初始化"""
468
  logger.info(f"OpenAI API代理服务已启动,可以接受请求")
469
  logger.info(f"支持多token轮询,请在Authorization头中使用英文逗号分隔多个token")
 
 
 
470
 
471
  # 主程序
472
  if __name__ == "__main__":