nanoppa commited on
Commit
2e48acb
·
verified ·
1 Parent(s): ae9974b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -70
app.py CHANGED
@@ -6,30 +6,40 @@ import time
6
  import os
7
  import re
8
  import logging
 
9
 
10
  # 配置日志输出(可调整级别为DEBUG以获得更详细日志)
11
  logging.basicConfig(level=logging.INFO)
12
-
13
  _COOKIES = os.getenv("COOKIES", "")
 
14
  API_KEY = os.getenv("API_KEY", "linux.do")
15
- if not API_KEY:
16
- logging.warning("API_KEY 未设置!")
17
 
18
  app = Flask(__name__)
19
 
 
 
 
 
 
 
 
 
20
  @app.before_request
21
  def check_api_key():
22
- auth = request.headers.get("Authorization", "")
23
- if auth != f"Bearer {API_KEY}":
24
- logging.warning("未经授权的访问尝试,传入的 key: %s", auth)
25
  return jsonify({"success": False, "message": "Unauthorized: Invalid API key"}), 403
26
 
27
  @app.route('/v1/models', methods=['GET'])
28
  def get_models():
29
- logging.info("收到 /v1/models 请求")
30
- headers = {"Content-Type": "application/json", "Cookie": _COOKIES}
 
 
31
  response = requests.get('https://chat.akash.network/api/models', headers=headers)
32
  models_data = response.json()
 
33
  current_timestamp = int(time.time())
34
  converted_data = {
35
  "object": "list",
@@ -50,66 +60,66 @@ def get_models():
50
  "description": model.get("description"),
51
  "available": model.get("available")
52
  }
53
- for model in models_data.get("models", [])
54
  ]
55
  }
56
- logging.info("返回 /v1/models 响应: %s", json.dumps(converted_data, ensure_ascii=False))
57
  return jsonify(converted_data)
58
 
59
- def build_chunk(chat_id, model, token=None, finish_reason=None):
60
- """
61
- 构造单个 chunk 数据,符合 OpenAI API 流式响应格式。
62
- """
63
- chunk = {
64
- "id": f"chatcmpl-{chat_id}",
65
- "object": "chat.completion.chunk",
66
- "created": int(time.time()),
67
- "model": model,
68
- "choices": [{
69
- "delta": {"content": token} if token is not None else {},
70
- "index": 0,
71
- "finish_reason": finish_reason
72
- }]
73
- }
74
- return chunk
75
-
76
  def generate_stream(akash_response, chat_id, model):
77
  """
78
- 解析 Akash 接口的流式响应数据,并生成符合 OpenAI API 格式的 chunk 数据。
79
  """
80
  for line in akash_response.iter_lines():
81
  if not line:
82
  continue
83
  try:
84
  line_str = line.decode('utf-8').strip()
85
- parts = line_str.split(':', 1)
86
- if len(parts) != 2:
87
- logging.error("流数据格式异常: %s", line_str)
88
- continue
89
- msg_type, msg_data = parts
90
  if msg_type == '0':
91
  token = msg_data.strip()
 
92
  if token.startswith('"') and token.endswith('"'):
93
  token = token[1:-1].replace('\\"', '"')
94
  token = token.replace("\\n", "\n")
95
- chunk = build_chunk(chat_id, model, token=token, finish_reason=None)
96
- logging.debug("流式 chunk: %s", json.dumps(chunk, ensure_ascii=False))
 
 
 
 
 
 
 
 
 
 
97
  yield f"data: {json.dumps(chunk, ensure_ascii=False)}\n\n"
98
  elif msg_type in ['e', 'd']:
99
- chunk = build_chunk(chat_id, model, finish_reason="stop")
100
- logging.debug("流式结束 chunk: %s", json.dumps(chunk, ensure_ascii=False))
 
 
 
 
 
 
 
 
 
 
101
  yield f"data: {json.dumps(chunk, ensure_ascii=False)}\n\n"
102
  yield "data: [DONE]\n\n"
103
  break
104
  except Exception as ex:
105
- logging.error("处理流数据时出错: %s", ex)
106
  continue
107
 
108
  @app.route('/v1/chat/completions', methods=['POST'])
109
  def chat_completions():
110
  try:
111
  data = request.get_json()
112
- logging.info("收到 /v1/chat/completions 请求: %s", json.dumps(data, ensure_ascii=False))
113
  chat_id = str(uuid.uuid4()).replace('-', '')[:16]
114
  model = data.get('model', "DeepSeek-R1")
115
  akash_data = {
@@ -120,9 +130,11 @@ def chat_completions():
120
  "temperature": data.get('temperature', 0.6),
121
  "topP": data.get('top_p', 0.95)
122
  }
123
- headers = {"Content-Type": "application/json", "Cookie": _COOKIES}
124
- # 默认启用 stream 模式,但针对 AkashGen 模型关闭流式响应
125
- stream_flag = data.get('stream', False)
 
 
126
  if model == "AkashGen":
127
  stream_flag = False
128
 
@@ -132,7 +144,7 @@ def chat_completions():
132
  headers=headers,
133
  stream=stream_flag
134
  )
135
- logging.info("Akash API 响应状态: %s", akash_response.status_code)
136
 
137
  if stream_flag:
138
  return Response(
@@ -144,7 +156,6 @@ def chat_completions():
144
  }
145
  )
146
  else:
147
- # 非流式响应处理
148
  if model != "AkashGen":
149
  text_matches = re.findall(r'0:"(.*?)"', akash_response.text)
150
  parsed_text = "".join(text_matches)
@@ -158,39 +169,17 @@ def chat_completions():
158
  "finish_reason": "stop"
159
  }]
160
  }
161
- logging.info("非流式响应 payload: %s", json.dumps(response_payload, ensure_ascii=False))
162
  return Response(
163
  json.dumps(response_payload, ensure_ascii=False),
164
  status=akash_response.status_code,
165
  mimetype='application/json'
166
  )
167
  else:
168
- user_query = data.get('messages', [])[0]["content"]
169
- # 解决用dify或者new_api添加模型时报错
170
- if user_query == "ping":
171
- text_matches = re.findall(r'0:"(.*?)"', akash_response.text)
172
- parsed_text = "".join(text_matches)
173
- response_payload = {
174
- "object": "chat.completion",
175
- "created": int(time.time() * 1000),
176
- "model": model,
177
- "choices": [{
178
- "index": 0,
179
- "message": {"role": "assistant", "content": parsed_text},
180
- "finish_reason": "stop"
181
- }]
182
- }
183
- logging.info("非流式响应 payload: %s", json.dumps(response_payload, ensure_ascii=False))
184
- return Response(
185
- json.dumps(response_payload, ensure_ascii=False),
186
- status=akash_response.status_code,
187
- mimetype='application/json'
188
- )
189
  match = re.search(r"jobId='([^']+)'", akash_response.text)
190
  if match:
191
  job_id = match.group(1)
192
  logging.info("AkashGen jobId: %s", job_id)
193
- # 轮询图片生成状态
194
  while True:
195
  try:
196
  img_response = requests.get(
@@ -198,7 +187,7 @@ def chat_completions():
198
  headers=headers
199
  )
200
  img_data = img_response.json()
201
- if img_data and img_data[0]["status"] == "completed":
202
  response_payload = {
203
  "object": "chat.completion",
204
  "created": int(time.time() * 1000),
@@ -212,7 +201,7 @@ def chat_completions():
212
  "finish_reason": "stop"
213
  }]
214
  }
215
- logging.info("AkashGen 完成后的 payload: %s", json.dumps(response_payload, ensure_ascii=False))
216
  return Response(
217
  json.dumps(response_payload, ensure_ascii=False),
218
  status=akash_response.status_code,
@@ -228,7 +217,7 @@ def chat_completions():
228
  return jsonify({"error": "当前官方服务异常"}), 500
229
 
230
  except Exception as e:
231
- logging.exception("chat_completions 处理过程中出现异常:")
232
  return jsonify({"error": str(e)}), 500
233
 
234
  if __name__ == '__main__':
 
6
  import os
7
  import re
8
  import logging
9
+ from itertools import cycle
10
 
11
  # 配置日志输出(可调整级别为DEBUG以获得更详细日志)
12
  logging.basicConfig(level=logging.INFO)
 
13
  _COOKIES = os.getenv("COOKIES", "")
14
+
15
  API_KEY = os.getenv("API_KEY", "linux.do")
 
 
16
 
17
  app = Flask(__name__)
18
 
19
+ COOKIES = _COOKIES.split(',')
20
+ iterator = cycle(COOKIES)
21
+
22
+ cookie_index = 0
23
+
24
+ def get_cookie():
25
+ return next(iterator)
26
+
27
  @app.before_request
28
  def check_api_key():
29
+ key = request.headers.get("Authorization")
30
+ if key != "Bearer " + API_KEY:
31
+ logging.warning("Unauthorized access attempt with key: %s", key)
32
  return jsonify({"success": False, "message": "Unauthorized: Invalid API key"}), 403
33
 
34
  @app.route('/v1/models', methods=['GET'])
35
  def get_models():
36
+ logging.info("Received /v1/models request")
37
+ _cookie = get_cookie()
38
+ logging.info(_cookie[:50])
39
+ headers = {"Content-Type": "application/json", "Cookie": _cookie}
40
  response = requests.get('https://chat.akash.network/api/models', headers=headers)
41
  models_data = response.json()
42
+ print(models_data)
43
  current_timestamp = int(time.time())
44
  converted_data = {
45
  "object": "list",
 
60
  "description": model.get("description"),
61
  "available": model.get("available")
62
  }
63
+ for model in models_data
64
  ]
65
  }
66
+ logging.info("Response for /v1/models: %s", json.dumps(converted_data, ensure_ascii=False))
67
  return jsonify(converted_data)
68
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
  def generate_stream(akash_response, chat_id, model):
70
  """
71
+ 解析 Akash 接口的流式响应数据,并生成符合 OpenAI API 流返回格式的 chunk 数据。
72
  """
73
  for line in akash_response.iter_lines():
74
  if not line:
75
  continue
76
  try:
77
  line_str = line.decode('utf-8').strip()
78
+ msg_type, msg_data = line_str.split(':', 1)
 
 
 
 
79
  if msg_type == '0':
80
  token = msg_data.strip()
81
+ # 去掉两边的引号并处理转义字符
82
  if token.startswith('"') and token.endswith('"'):
83
  token = token[1:-1].replace('\\"', '"')
84
  token = token.replace("\\n", "\n")
85
+ chunk = {
86
+ "id": f"chatcmpl-{chat_id}",
87
+ "object": "chat.completion.chunk",
88
+ "created": int(time.time()),
89
+ "model": model,
90
+ "choices": [{
91
+ "delta": {"content": token},
92
+ "index": 0,
93
+ "finish_reason": None
94
+ }]
95
+ }
96
+ logging.info("Streaming chunk: %s", json.dumps(chunk, ensure_ascii=False))
97
  yield f"data: {json.dumps(chunk, ensure_ascii=False)}\n\n"
98
  elif msg_type in ['e', 'd']:
99
+ chunk = {
100
+ "id": f"chatcmpl-{chat_id}",
101
+ "object": "chat.completion.chunk",
102
+ "created": int(time.time()),
103
+ "model": model,
104
+ "choices": [{
105
+ "delta": {},
106
+ "index": 0,
107
+ "finish_reason": "stop"
108
+ }]
109
+ }
110
+ logging.info("Streaming finish chunk: %s", json.dumps(chunk, ensure_ascii=False))
111
  yield f"data: {json.dumps(chunk, ensure_ascii=False)}\n\n"
112
  yield "data: [DONE]\n\n"
113
  break
114
  except Exception as ex:
115
+ logging.error("Error processing stream line: %s", ex)
116
  continue
117
 
118
  @app.route('/v1/chat/completions', methods=['POST'])
119
  def chat_completions():
120
  try:
121
  data = request.get_json()
122
+ logging.info("Received /v1/chat/completions request: %s", json.dumps(data, ensure_ascii=False))
123
  chat_id = str(uuid.uuid4()).replace('-', '')[:16]
124
  model = data.get('model', "DeepSeek-R1")
125
  akash_data = {
 
130
  "temperature": data.get('temperature', 0.6),
131
  "topP": data.get('top_p', 0.95)
132
  }
133
+ _cookie = get_cookie()
134
+ logging.info(_cookie[:50])
135
+ headers = {"Content-Type": "application/json", "Cookie": _cookie}
136
+ # 默认 stream 模式开启,但针对 AkashGen 模型关闭流式响应
137
+ stream_flag = data.get('stream', True)
138
  if model == "AkashGen":
139
  stream_flag = False
140
 
 
144
  headers=headers,
145
  stream=stream_flag
146
  )
147
+ logging.info("Akash API response status: %s", akash_response.status_code)
148
 
149
  if stream_flag:
150
  return Response(
 
156
  }
157
  )
158
  else:
 
159
  if model != "AkashGen":
160
  text_matches = re.findall(r'0:"(.*?)"', akash_response.text)
161
  parsed_text = "".join(text_matches)
 
169
  "finish_reason": "stop"
170
  }]
171
  }
172
+ logging.info("Non-stream response payload: %s", json.dumps(response_payload, ensure_ascii=False))
173
  return Response(
174
  json.dumps(response_payload, ensure_ascii=False),
175
  status=akash_response.status_code,
176
  mimetype='application/json'
177
  )
178
  else:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
179
  match = re.search(r"jobId='([^']+)'", akash_response.text)
180
  if match:
181
  job_id = match.group(1)
182
  logging.info("AkashGen jobId: %s", job_id)
 
183
  while True:
184
  try:
185
  img_response = requests.get(
 
187
  headers=headers
188
  )
189
  img_data = img_response.json()
190
+ if img_data[0]["status"] == "completed":
191
  response_payload = {
192
  "object": "chat.completion",
193
  "created": int(time.time() * 1000),
 
201
  "finish_reason": "stop"
202
  }]
203
  }
204
+ logging.info("AkashGen completed response payload: %s", json.dumps(response_payload, ensure_ascii=False))
205
  return Response(
206
  json.dumps(response_payload, ensure_ascii=False),
207
  status=akash_response.status_code,
 
217
  return jsonify({"error": "当前官方服务异常"}), 500
218
 
219
  except Exception as e:
220
+ logging.exception("chat_completions error:")
221
  return jsonify({"error": str(e)}), 500
222
 
223
  if __name__ == '__main__':