Update app.py
Browse files
app.py
CHANGED
@@ -33,13 +33,48 @@ def check_api_key():
|
|
33 |
|
34 |
@app.route('/v1/models', methods=['GET'])
|
35 |
def get_models():
|
|
|
36 |
logging.info("Received /v1/models request")
|
37 |
_cookie = get_cookie()
|
38 |
-
logging.info(_cookie
|
39 |
headers = {"Content-Type": "application/json", "Cookie": _cookie}
|
|
|
40 |
response = requests.get('https://chat.akash.network/api/models', headers=headers)
|
41 |
models_data = response.json()
|
42 |
print(models_data)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
current_timestamp = int(time.time())
|
44 |
converted_data = {
|
45 |
"object": "list",
|
@@ -93,7 +128,7 @@ def generate_stream(akash_response, chat_id, model):
|
|
93 |
"finish_reason": None
|
94 |
}]
|
95 |
}
|
96 |
-
logging.
|
97 |
yield f"data: {json.dumps(chunk, ensure_ascii=False)}\n\n"
|
98 |
elif msg_type in ['e', 'd']:
|
99 |
chunk = {
|
@@ -107,7 +142,7 @@ def generate_stream(akash_response, chat_id, model):
|
|
107 |
"finish_reason": "stop"
|
108 |
}]
|
109 |
}
|
110 |
-
logging.
|
111 |
yield f"data: {json.dumps(chunk, ensure_ascii=False)}\n\n"
|
112 |
yield "data: [DONE]\n\n"
|
113 |
break
|
@@ -117,10 +152,13 @@ def generate_stream(akash_response, chat_id, model):
|
|
117 |
|
118 |
@app.route('/v1/chat/completions', methods=['POST'])
|
119 |
def chat_completions():
|
|
|
120 |
try:
|
121 |
data = request.get_json()
|
122 |
logging.info("Received /v1/chat/completions request: %s", json.dumps(data, ensure_ascii=False))
|
123 |
chat_id = str(uuid.uuid4()).replace('-', '')[:16]
|
|
|
|
|
124 |
model = data.get('model', "DeepSeek-R1")
|
125 |
akash_data = {
|
126 |
"id": chat_id,
|
@@ -130,16 +168,13 @@ def chat_completions():
|
|
130 |
"temperature": data.get('temperature', 0.6),
|
131 |
"topP": data.get('top_p', 0.95)
|
132 |
}
|
133 |
-
_cookie = get_cookie()
|
134 |
-
logging.info(_cookie[:50])
|
135 |
headers = {"Content-Type": "application/json", "Cookie": _cookie}
|
|
|
136 |
# 默认 stream 模式开启,但针对 AkashGen 模型关闭流式响应
|
137 |
-
stream_flag = True
|
138 |
if model == "AkashGen":
|
139 |
stream_flag = False
|
140 |
-
|
141 |
-
stream_flag = data.get('stream',False)
|
142 |
-
logging.info("streamflag: %s", stream_flag)
|
143 |
akash_response = requests.post(
|
144 |
'https://chat.akash.network/api/chat',
|
145 |
json=akash_data,
|
@@ -147,7 +182,19 @@ def chat_completions():
|
|
147 |
stream=stream_flag
|
148 |
)
|
149 |
logging.info("Akash API response status: %s", akash_response.status_code)
|
150 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
151 |
if stream_flag:
|
152 |
return Response(
|
153 |
generate_stream(akash_response, chat_id, model),
|
@@ -198,7 +245,7 @@ def chat_completions():
|
|
198 |
"index": 0,
|
199 |
"message": {
|
200 |
"role": "assistant",
|
201 |
-
"content": f"{img_data[0]['result']}"
|
202 |
},
|
203 |
"finish_reason": "stop"
|
204 |
}]
|
|
|
33 |
|
34 |
@app.route('/v1/models', methods=['GET'])
|
35 |
def get_models():
|
36 |
+
global COOKIES
|
37 |
logging.info("Received /v1/models request")
|
38 |
_cookie = get_cookie()
|
39 |
+
logging.info(_cookie)
|
40 |
headers = {"Content-Type": "application/json", "Cookie": _cookie}
|
41 |
+
|
42 |
response = requests.get('https://chat.akash.network/api/models', headers=headers)
|
43 |
models_data = response.json()
|
44 |
print(models_data)
|
45 |
+
if models_data.get('success',False) == False:
|
46 |
+
response = requests.post('https://chat.akash.network/api/auth/session/refresh', headers=headers)
|
47 |
+
_new_cookies = response.cookies
|
48 |
+
cookie_str = '; '.join([f'{cookie.name}={cookie.value}' for cookie in _new_cookies])
|
49 |
+
COOKIES = [cookie_str if x == _cookie else x for x in COOKIES]
|
50 |
+
headers = {"Content-Type": "application/json", "Cookie": cookie_str}
|
51 |
+
response = requests.get('https://chat.akash.network/api/models', headers=headers)
|
52 |
+
models_data = response.json()
|
53 |
+
current_timestamp = int(time.time())
|
54 |
+
converted_data = {
|
55 |
+
"object": "list",
|
56 |
+
"data": [
|
57 |
+
{
|
58 |
+
"id": model["id"],
|
59 |
+
"object": "model",
|
60 |
+
"created": current_timestamp,
|
61 |
+
"owned_by": "openai" if "Meta" in model["id"] else "third_party",
|
62 |
+
"permissions": [],
|
63 |
+
"root": model["id"],
|
64 |
+
"parent": None,
|
65 |
+
"capabilities": {
|
66 |
+
"temperature": model.get("temperature"),
|
67 |
+
"top_p": model.get("top_p")
|
68 |
+
},
|
69 |
+
"name": model.get("name"),
|
70 |
+
"description": model.get("description"),
|
71 |
+
"available": model.get("available")
|
72 |
+
}
|
73 |
+
for model in models_data
|
74 |
+
]
|
75 |
+
}
|
76 |
+
logging.info("Response for /v1/models: %s", json.dumps(converted_data, ensure_ascii=False))
|
77 |
+
return jsonify(converted_data)
|
78 |
current_timestamp = int(time.time())
|
79 |
converted_data = {
|
80 |
"object": "list",
|
|
|
128 |
"finish_reason": None
|
129 |
}]
|
130 |
}
|
131 |
+
logging.info("Streaming chunk: %s", json.dumps(chunk, ensure_ascii=False))
|
132 |
yield f"data: {json.dumps(chunk, ensure_ascii=False)}\n\n"
|
133 |
elif msg_type in ['e', 'd']:
|
134 |
chunk = {
|
|
|
142 |
"finish_reason": "stop"
|
143 |
}]
|
144 |
}
|
145 |
+
logging.info("Streaming finish chunk: %s", json.dumps(chunk, ensure_ascii=False))
|
146 |
yield f"data: {json.dumps(chunk, ensure_ascii=False)}\n\n"
|
147 |
yield "data: [DONE]\n\n"
|
148 |
break
|
|
|
152 |
|
153 |
@app.route('/v1/chat/completions', methods=['POST'])
|
154 |
def chat_completions():
|
155 |
+
global COOKIES
|
156 |
try:
|
157 |
data = request.get_json()
|
158 |
logging.info("Received /v1/chat/completions request: %s", json.dumps(data, ensure_ascii=False))
|
159 |
chat_id = str(uuid.uuid4()).replace('-', '')[:16]
|
160 |
+
_cookie = get_cookie()
|
161 |
+
logging.info(_cookie)
|
162 |
model = data.get('model', "DeepSeek-R1")
|
163 |
akash_data = {
|
164 |
"id": chat_id,
|
|
|
168 |
"temperature": data.get('temperature', 0.6),
|
169 |
"topP": data.get('top_p', 0.95)
|
170 |
}
|
|
|
|
|
171 |
headers = {"Content-Type": "application/json", "Cookie": _cookie}
|
172 |
+
|
173 |
# 默认 stream 模式开启,但针对 AkashGen 模型关闭流式响应
|
174 |
+
stream_flag = data.get('stream', True)
|
175 |
if model == "AkashGen":
|
176 |
stream_flag = False
|
177 |
+
|
|
|
|
|
178 |
akash_response = requests.post(
|
179 |
'https://chat.akash.network/api/chat',
|
180 |
json=akash_data,
|
|
|
182 |
stream=stream_flag
|
183 |
)
|
184 |
logging.info("Akash API response status: %s", akash_response.status_code)
|
185 |
+
if akash_response.status_code == 403:
|
186 |
+
response = requests.post('https://chat.akash.network/api/auth/session/refresh', headers=headers)
|
187 |
+
_new_cookies = response.cookies
|
188 |
+
cookie_str = '; '.join([f'{cookie.name}={cookie.value}' for cookie in _new_cookies])
|
189 |
+
COOKIES = [cookie_str if x == _cookie else x for x in COOKIES]
|
190 |
+
headers = {"Content-Type": "application/json", "Cookie": cookie_str}
|
191 |
+
akash_response = requests.post(
|
192 |
+
'https://chat.akash.network/api/chat',
|
193 |
+
json=akash_data,
|
194 |
+
headers=headers,
|
195 |
+
stream=stream_flag
|
196 |
+
)
|
197 |
+
|
198 |
if stream_flag:
|
199 |
return Response(
|
200 |
generate_stream(akash_response, chat_id, model),
|
|
|
245 |
"index": 0,
|
246 |
"message": {
|
247 |
"role": "assistant",
|
248 |
+
"content": f"根据您的描述,这里是一张生成的图片:\n\n"
|
249 |
},
|
250 |
"finish_reason": "stop"
|
251 |
}]
|