coium commited on
Commit
dba6b4c
·
verified ·
1 Parent(s): a9fb8cd

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +347 -0
app.py ADDED
@@ -0,0 +1,347 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import re
4
+ import time
5
+ import uuid
6
+ import asyncio
7
+ import requests
8
+ import logging
9
+
10
+ from typing import Optional, List
11
+ from pydantic import BaseModel
12
+ from fastapi import FastAPI, Request, Response, Depends, HTTPException, status
13
+ from fastapi.responses import JSONResponse, StreamingResponse
14
+ from fastapi.middleware.cors import CORSMiddleware
15
+
16
+ # 配置日志
17
+ logging.basicConfig(
18
+ level=logging.INFO,
19
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
20
+ datefmt='%Y-%m-%d %H:%M:%S'
21
+ )
22
+ logger = logging.getLogger(__name__)
23
+
24
+ # 设置API密钥
25
+ API_KEY = os.getenv("API_KEY", "linux.do")
26
+
27
+ # 创建FastAPI应用
28
+ app = FastAPI()
29
+
30
+ # 添加CORS中间件
31
+ app.add_middleware(
32
+ CORSMiddleware,
33
+ allow_origins=["*"],
34
+ allow_credentials=True,
35
+ allow_methods=["*"],
36
+ allow_headers=["*"],
37
+ )
38
+
39
+ # 定义请求模型
40
+ class ChatMessage(BaseModel):
41
+ role: str
42
+ content: str
43
+
44
+ class ChatCompletionRequest(BaseModel):
45
+ model: str
46
+ messages: List[ChatMessage]
47
+ temperature: Optional[float] = 0.6
48
+ top_p: Optional[float] = 0.95
49
+ stream: Optional[bool] = True
50
+ system_message: Optional[str] = "You are a helpful assistant."
51
+
52
+ # 获取会话cookie的函数
53
+ async def get_session_cookie():
54
+ try:
55
+ response = requests.get(
56
+ "https://chat.akash.network/api/auth/session",
57
+ headers={"Content-Type": "application/json"}
58
+ )
59
+ if response.cookies:
60
+ return '; '.join([f"{cookie.name}={cookie.value}" for cookie in response.cookies])
61
+ return ""
62
+ except Exception as e:
63
+ logger.error(f"获取会话cookie失败: {e}")
64
+ return ""
65
+
66
+ # API密钥验证依赖项
67
+ async def verify_api_key(request: Request):
68
+ auth_header = request.headers.get("Authorization")
69
+ if auth_header != "Bearer " + API_KEY:
70
+ raise HTTPException(
71
+ status_code=status.HTTP_403_FORBIDDEN,
72
+ detail={"success": False, "message": "Unauthorized: Invalid API key"}
73
+ )
74
+ return True
75
+
76
+ # 获取模型列表的端点
77
+ @app.get("/v1/models")
78
+ async def get_models(authorized: bool = Depends(verify_api_key)):
79
+ # 获取最新的会话cookie
80
+ cookie = await get_session_cookie()
81
+ # 构建请求头
82
+ headers = {"Content-Type": "application/json", "Cookie": cookie}
83
+ # 发送请求到Akash API
84
+ response = requests.get("https://chat.akash.network/api/models", headers=headers)
85
+ models_data = response.json()
86
+ current_timestamp = int(time.time())
87
+ converted_data = {
88
+ "object": "list",
89
+ "data": [
90
+ {
91
+ "id": model["id"],
92
+ "object": "model",
93
+ "created": current_timestamp,
94
+ "owned_by": "openai" if "Meta" in model["id"] else "third_party",
95
+ "permissions": [],
96
+ "root": model["id"],
97
+ "parent": None,
98
+ "capabilities": {
99
+ "temperature": model["temperature"],
100
+ "top_p": model["top_p"],
101
+ },
102
+ "name": model["name"],
103
+ "description": model["description"],
104
+ "available": model["available"],
105
+ }
106
+ for model in models_data.get("models", [])
107
+ ],
108
+ }
109
+ return converted_data
110
+
111
+ # 聊天完成端点
112
+ @app.post("/v1/chat/completions")
113
+ async def chat_completions(request: Request, authorized: bool = Depends(verify_api_key)):
114
+ logger.info("开始处理聊天完成请求")
115
+ try:
116
+ # 获取请求体
117
+ body = await request.json()
118
+
119
+ # 获取最新的会话cookie
120
+ cookie = await get_session_cookie()
121
+
122
+ # 检查模型参数
123
+ if not body.get("model"):
124
+ return JSONResponse(
125
+ status_code=status.HTTP_400_BAD_REQUEST,
126
+ content={"error": "Missing model parameter"}
127
+ )
128
+
129
+ # 获取可用模型列表
130
+ models_response = requests.get(
131
+ "https://chat.akash.network/api/models",
132
+ headers={"Content-Type": "application/json", "Cookie": cookie}
133
+ )
134
+ models_data = models_response.json()
135
+ available_models = [model["id"] for model in models_data.get("models", [])]
136
+
137
+ # 模型名称修正
138
+ requested_model = body.get("model")
139
+ matched_model = next((model for model in available_models if model.lower() == requested_model.lower()), None)
140
+
141
+ if not matched_model:
142
+ return JSONResponse(
143
+ status_code=status.HTTP_400_BAD_REQUEST,
144
+ content={"error": f"Model '{requested_model}' not found"}
145
+ )
146
+
147
+ # 生成唯一ID
148
+ chat_id = str(uuid.uuid4()).replace("-", "")[:16]
149
+
150
+ # 构建Akash格式的请求数据
151
+ akash_data = {
152
+ "id": chat_id,
153
+ "messages": body.get("messages", []),
154
+ "model": matched_model, # 使用修正后的模型名称
155
+ "system": body.get("system_message", "You are a helpful assistant."),
156
+ "temperature": body.get("temperature", 0.6),
157
+ "topP": body.get("top_p", 0.95),
158
+ }
159
+
160
+ # 构建请求头
161
+ headers = {"Content-Type": "application/json", "Cookie": cookie}
162
+ _stream = body.get("stream", True)
163
+
164
+ # AkashGen模型特殊处理
165
+ if body.get("model", "DeepSeek-R1") == "AkashGen":
166
+ _stream = False
167
+
168
+ # 发送请求到Akash API
169
+ response = requests.post(
170
+ "https://chat.akash.network/api/chat",
171
+ json=akash_data,
172
+ headers=headers,
173
+ stream=_stream,
174
+ )
175
+
176
+ logger.debug(f"Akash API响应: {response.text}")
177
+
178
+ # 处理流式响应
179
+ if _stream is True:
180
+ async def generate():
181
+ content_buffer = ""
182
+ for line in response.iter_lines():
183
+ if not line:
184
+ continue
185
+
186
+ try:
187
+ # 解析行数据,格式为 "type:json_data"
188
+ line_str = line.decode("utf-8")
189
+ msg_type, msg_data = line_str.split(":", 1)
190
+
191
+ # 处理内容类型的消息
192
+ if msg_type == "0":
193
+ # 只去掉两边的双引号
194
+ if msg_data.startswith('"') and msg_data.endswith('"'):
195
+ msg_data = msg_data.replace('\\"', '"')
196
+ msg_data = msg_data[1:-1]
197
+ msg_data = msg_data.replace("\\n", "\n")
198
+ content_buffer += msg_data
199
+
200
+ # 构建 OpenAI 格式的响应块
201
+ chunk = {
202
+ "id": f"chatcmpl-{chat_id}",
203
+ "object": "chat.completion.chunk",
204
+ "created": int(time.time()),
205
+ "model": body.get("model", "DeepSeek-R1"),
206
+ "choices": [
207
+ {
208
+ "delta": {"content": msg_data},
209
+ "index": 0,
210
+ "finish_reason": None,
211
+ }
212
+ ],
213
+ }
214
+ yield f"data: {json.dumps(chunk)}\n\n"
215
+
216
+ # 处理结束消息
217
+ elif msg_type in ["e", "d"]:
218
+ chunk = {
219
+ "id": f"chatcmpl-{chat_id}",
220
+ "object": "chat.completion.chunk",
221
+ "created": int(time.time()),
222
+ "model": body.get("model", "DeepSeek-R1"),
223
+ "choices": [
224
+ {"delta": {}, "index": 0, "finish_reason": "stop"}
225
+ ],
226
+ }
227
+ yield f"data: {json.dumps(chunk)}\n\n"
228
+ yield "data: [DONE]\n\n"
229
+ break
230
+
231
+ except Exception as e:
232
+ logger.error(f"处理响应行时出错: {e}")
233
+ continue
234
+
235
+ return StreamingResponse(
236
+ generate(),
237
+ media_type="text/event-stream",
238
+ headers={
239
+ "Cache-Control": "no-cache",
240
+ "Connection": "keep-alive",
241
+ "Content-Type": "text/event-stream",
242
+ },
243
+ )
244
+ else:
245
+ # 处理非流式响应
246
+ if body.get("model", "DeepSeek-R1") != "AkashGen":
247
+ text_matches = re.findall(r'0:"(.*?)"', response.text)
248
+ parsed_text = "".join(text_matches)
249
+ response_data = {
250
+ "object": "chat.completion",
251
+ "created": int(time.time() * 1000),
252
+ "model": body.get("model", "DeepSeek-R1"),
253
+ "choices": [
254
+ {
255
+ "index": 0,
256
+ "message": {"role": "user", "content": parsed_text},
257
+ "finish_reason": "stop",
258
+ }
259
+ ],
260
+ }
261
+ logger.debug(json.dumps(response_data, ensure_ascii=False))
262
+ return Response(
263
+ content=json.dumps(response_data, ensure_ascii=False),
264
+ status_code=response.status_code,
265
+ headers={
266
+ "Cache-Control": "no-cache",
267
+ "Connection": "keep-alive",
268
+ "Content-Type": "application/json",
269
+ },
270
+ )
271
+ else:
272
+ # 处理AkashGen模型(图像��成)
273
+ match = re.search(r"jobId='([^']+)'", response.text)
274
+ job_id = None
275
+ if match:
276
+ job_id = match.group(1)
277
+ logger.info(f"获取到图像生成任务ID: {job_id}")
278
+
279
+ # 轮询图像生成状态
280
+ async def check_image_status():
281
+ while True:
282
+ try:
283
+ _img_response = requests.get(
284
+ "https://chat.akash.network/api/image-status?ids=" + job_id,
285
+ headers=headers,
286
+ )
287
+ _data = _img_response.json()
288
+
289
+ # 检查图像是否生成完成
290
+ if _data[0]["status"] == "completed":
291
+ logger.info(f"图片生成完成: {job_id}")
292
+ return _data[0]["result"]
293
+ else:
294
+ logger.debug(f"图片生成中: {job_id}")
295
+ except Exception as e:
296
+ logger.error(f"请求图像状态时出现异常: {e}")
297
+
298
+ # 每隔5秒请求一次
299
+ await asyncio.sleep(5)
300
+
301
+ # 等待图像生成完成
302
+ image_url = await check_image_status()
303
+
304
+ # 返回包含图像URL的响应
305
+ response_data = {
306
+ "object": "chat.completion",
307
+ "created": int(time.time() * 1000),
308
+ "model": body.get("model", "DeepSeek-R1"),
309
+ "choices": [
310
+ {
311
+ "index": 0,
312
+ "message": {
313
+ "role": "user",
314
+ "content": f"根据您的描述,这里是一张生成的图片:\n\n![生成的图片]({image_url})",
315
+ },
316
+ "finish_reason": "stop",
317
+ }
318
+ ],
319
+ }
320
+
321
+ return Response(
322
+ content=json.dumps(response_data, ensure_ascii=False),
323
+ status_code=response.status_code,
324
+ headers={
325
+ "Cache-Control": "no-cache",
326
+ "Connection": "keep-alive",
327
+ "Content-Type": "application/json",
328
+ },
329
+ )
330
+ else:
331
+ return JSONResponse(
332
+ status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
333
+ content={"error": "当前官方服务异常"}
334
+ )
335
+
336
+ except Exception as e:
337
+ logger.error(f"处理聊天完成请求时发生错误: {e}")
338
+ return JSONResponse(
339
+ status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
340
+ content={"error": str(e)}
341
+ )
342
+
343
+ # 启动服务器
344
+ if __name__ == "__main__":
345
+ import uvicorn
346
+ logger.info("启动FastAPI服务器,监听端口5200")
347
+ uvicorn.run("app_fastapi:app", host="0.0.0.0", port=5200, reload=True)