rr1 commited on
Commit
de99d2e
·
verified ·
1 Parent(s): b37b1f3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +539 -539
app.py CHANGED
@@ -1,540 +1,540 @@
1
- import json
2
- import os
3
- import random
4
- import time
5
- import uuid
6
- import asyncio
7
- from starlette.responses import Response, StreamingResponse
8
- from fastapi import FastAPI, Request
9
- from fastapi.middleware.cors import CORSMiddleware
10
-
11
- # 配置常量
12
- CONFIG = {
13
- "API": {
14
- "BASE_URL": "https://fragments.e2b.dev",
15
- "API_KEY": os.getenv("API_KEY","sk-123456")
16
- },
17
- "RETRY": {
18
- "MAX_ATTEMPTS": 1,
19
- "DELAY_BASE": 1000
20
- },
21
- "MODEL_CONFIG": {
22
- "o1": {
23
- "id": "o1",
24
- "provider": "OpenAI",
25
- "providerId": "openai",
26
- "name": "o1",
27
- "multiModal": True,
28
- "Systemprompt": "",
29
- "opt_max": {
30
- "temperatureMax": 2,
31
- "max_tokensMax": 0,
32
- "presence_penaltyMax": 2,
33
- "frequency_penaltyMax": 2,
34
- "top_pMax": 1,
35
- "top_kMax": 500
36
- }
37
- },
38
- "o1-mini": {
39
- "id": "o1",
40
- "provider": "OpenAI",
41
- "providerId": "openai",
42
- "name": "o1-mini",
43
- "multiModal": False,
44
- "Systemprompt": "",
45
- "opt_max": {
46
- "temperatureMax": 2,
47
- "max_tokensMax": 0,
48
- "presence_penaltyMax": 2,
49
- "frequency_penaltyMax": 2,
50
- "top_pMax": 1,
51
- "top_kMax": 500
52
- }
53
- },
54
- "o3-mini": {
55
- "id": "o3-mini",
56
- "provider": "OpenAI",
57
- "providerId": "openai",
58
- "name": "o3 mini",
59
- "multiModal": False,
60
- "Systemprompt": "",
61
- "opt_max": {
62
- "temperatureMax": 2,
63
- "max_tokensMax": 0,
64
- "presence_penaltyMax": 2,
65
- "frequency_penaltyMax": 2,
66
- "top_pMax": 1,
67
- "top_kMax": 500
68
- }
69
- },
70
- "gpt-4.5-preview": {
71
- "id": "gpt-4.5-preview",
72
- "provider": "OpenAI",
73
- "providerId": "openai",
74
- "name": "GPT-4.5",
75
- "multiModal": True,
76
- "Systemprompt": "",
77
- "opt_max": {
78
- "temperatureMax": 2,
79
- "max_tokensMax": 0,
80
- "presence_penaltyMax": 2,
81
- "frequency_penaltyMax": 2,
82
- "top_pMax": 1,
83
- "top_kMax": 500
84
- }
85
- },
86
- "gpt-4o": {
87
- "id": "gpt-4o",
88
- "provider": "OpenAI",
89
- "providerId": "openai",
90
- "name": "GPT-4o",
91
- "multiModal": True,
92
- "Systemprompt": "",
93
- "opt_max": {
94
- "temperatureMax": 2,
95
- "max_tokensMax": 16380,
96
- "presence_penaltyMax": 2,
97
- "frequency_penaltyMax": 2,
98
- "top_pMax": 1,
99
- "top_kMax": 500
100
- }
101
- },
102
- "gpt-4-turbo": {
103
- "id": "gpt-4-turbo",
104
- "provider": "OpenAI",
105
- "providerId": "openai",
106
- "name": "GPT-4 Turbo",
107
- "multiModal": True,
108
- "Systemprompt": "",
109
- "opt_max": {
110
- "temperatureMax": 2,
111
- "max_tokensMax": 16380,
112
- "presence_penaltyMax": 2,
113
- "frequency_penaltyMax": 2,
114
- "top_pMax": 1,
115
- "top_kMax": 500
116
- }
117
- },
118
- "gemini-1.5-pro": {
119
- "id": "gemini-1.5-pro-002",
120
- "provider": "Google Vertex AI",
121
- "providerId": "vertex",
122
- "name": "Gemini 1.5 Pro",
123
- "multiModal": True,
124
- "Systemprompt": "",
125
- "opt_max": {
126
- "temperatureMax": 2,
127
- "max_tokensMax": 8192,
128
- "presence_penaltyMax": 2,
129
- "frequency_penaltyMax": 2,
130
- "top_pMax": 1,
131
- "top_kMax": 500
132
- }
133
- },
134
- "gemini-exp-1121": {
135
- "id": "gemini-exp-1121",
136
- "provider": "Google Generative AI",
137
- "providerId": "google",
138
- "name": "Gemini Experimental 1121",
139
- "multiModal": True,
140
- "Systemprompt": "",
141
- "opt_max": {
142
- "temperatureMax": 2,
143
- "max_tokensMax": 8192,
144
- "presence_penaltyMax": 2,
145
- "frequency_penaltyMax": 2,
146
- "top_pMax": 1,
147
- "top_kMax": 40
148
- }
149
- },
150
- "gemini-2.0-flash-exp": {
151
- "id": "models/gemini-2.0-flash-exp",
152
- "provider": "Google Generative AI",
153
- "providerId": "google",
154
- "name": "Gemini 2.0 Flash",
155
- "multiModal": True,
156
- "Systemprompt": "",
157
- "opt_max": {
158
- "temperatureMax": 2,
159
- "max_tokensMax": 8192,
160
- "presence_penaltyMax": 2,
161
- "frequency_penaltyMax": 2,
162
- "top_pMax": 1,
163
- "top_kMax": 40
164
- }
165
- },
166
- "claude-3-7-sonnet-latest": {
167
- "id": "claude-3-5-sonnet-latest",
168
- "provider": "Anthropic",
169
- "providerId": "anthropic",
170
- "name": "Claude 3.7 Sonnet",
171
- "multiModal": True,
172
- "Systemprompt": "",
173
- "opt_max": {
174
- "temperatureMax": 1,
175
- "max_tokensMax": 8192,
176
- "presence_penaltyMax": 2,
177
- "frequency_penaltyMax": 2,
178
- "top_pMax": 1,
179
- "top_kMax": 500
180
- }
181
- },
182
- "claude-3-5-sonnet-latest": {
183
- "id": "claude-3-5-sonnet-latest",
184
- "provider": "Anthropic",
185
- "providerId": "anthropic",
186
- "name": "Claude 3.5 Sonnet",
187
- "multiModal": True,
188
- "Systemprompt": "",
189
- "opt_max": {
190
- "temperatureMax": 1,
191
- "max_tokensMax": 8192,
192
- "presence_penaltyMax": 2,
193
- "frequency_penaltyMax": 2,
194
- "top_pMax": 1,
195
- "top_kMax": 500
196
- }
197
- },
198
- "claude-3-5-haiku-latest": {
199
- "id": "claude-3-5-haiku-latest",
200
- "provider": "Anthropic",
201
- "providerId": "anthropic",
202
- "name": "Claude 3.5 Haiku",
203
- "multiModal": False,
204
- "Systemprompt": "",
205
- "opt_max": {
206
- "temperatureMax": 1,
207
- "max_tokensMax": 8192,
208
- "presence_penaltyMax": 2,
209
- "frequency_penaltyMax": 2,
210
- "top_pMax": 1,
211
- "top_kMax": 500
212
- }
213
- }
214
- },
215
- "DEFAULT_HEADERS": {
216
- 'accept': '*/*',
217
- 'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
218
- 'cache-control': 'no-cache',
219
- 'content-type': 'application/json',
220
- 'origin': 'https://fragments.e2b.dev',
221
- 'pragma': 'no-cache',
222
- 'priority': 'u=1, i',
223
- 'referer': 'https://fragments.e2b.dev/',
224
- 'sec-ch-ua': '"Not(A:Brand";v="99", "Microsoft Edge";v="133", "Chromium";v="133"',
225
- 'sec-ch-ua-mobile': '?0',
226
- 'sec-ch-ua-platform': '"Windows"',
227
- 'sec-fetch-dest': 'empty',
228
- 'sec-fetch-mode': 'cors',
229
- 'sec-fetch-site': 'same-origin',
230
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36 Edg/133.0.0.0'
231
- },
232
- "MODEL_PROMPT": "Chatting with users and starting role-playing, the most important thing is to pay attention to their latest messages, use only 'text' to output the chat text reply content generated for user messages, and finally output it in code"
233
- }
234
-
235
-
236
- # 工具类
237
- class Utils:
238
- @staticmethod
239
- def uuidv4():
240
- return str(uuid.uuid4())
241
-
242
- @staticmethod
243
- async def config_opt(params, model_config):
244
- if not model_config.get("opt_max"):
245
- return None
246
-
247
- options_map = {
248
- "temperature": "temperatureMax",
249
- "max_tokens": "max_tokensMax",
250
- "presence_penalty": "presence_penaltyMax",
251
- "frequency_penalty": "frequency_penaltyMax",
252
- "top_p": "top_pMax",
253
- "top_k": "top_kMax"
254
- }
255
-
256
- constrained_params = {}
257
- for key, value in params.items():
258
- max_key = options_map.get(key)
259
- if (max_key and
260
- max_key in model_config["opt_max"] and
261
- value is not None):
262
- constrained_params[key] = min(value, model_config["opt_max"][max_key])
263
-
264
- return constrained_params
265
-
266
-
267
- # API客户端类
268
- class ApiClient:
269
- def __init__(self, model_id):
270
- if model_id not in CONFIG["MODEL_CONFIG"]:
271
- raise ValueError(f"不支持的模型: {model_id}")
272
- self.model_config = CONFIG["MODEL_CONFIG"][model_id]
273
-
274
- def process_message_content(self, content):
275
- if isinstance(content, str):
276
- return content
277
- if isinstance(content, list):
278
- return "\n".join([item["text"] for item in content if item["type"] == "text"])
279
- if isinstance(content, dict):
280
- return content.get("text")
281
- return None
282
-
283
- async def prepare_chat_request(self, request, config=None):
284
- opt_config = config or {"model": self.model_config["id"]}
285
- return {
286
- "userID": Utils.uuidv4(),
287
- "messages": await self.transform_messages(request),
288
- "template": {
289
- "text": {
290
- "name": CONFIG["MODEL_PROMPT"],
291
- "lib": [""],
292
- "file": "pages/ChatWithUsers.txt",
293
- "instructions": self.model_config["Systemprompt"],
294
- "port": None
295
- }
296
- },
297
- "model": {
298
- "id": self.model_config["id"],
299
- "provider": self.model_config["provider"],
300
- "providerId": self.model_config["providerId"],
301
- "name": self.model_config["name"],
302
- "multiModal": self.model_config["multiModal"]
303
- },
304
- "config": {"model": self.model_config["id"]}
305
- }
306
-
307
- async def transform_messages(self, request):
308
- merged_messages = []
309
- for current in request["messages"]:
310
- current_content = self.process_message_content(current["content"])
311
- if current_content is None:
312
- continue
313
-
314
- if (merged_messages and
315
- current and
316
- merged_messages[-1]["role"] == current["role"]):
317
- last_content = self.process_message_content(merged_messages[-1]["content"])
318
- if last_content is not None:
319
- merged_messages[-1]["content"] = f"{last_content}\n{current_content}"
320
- continue
321
-
322
- merged_messages.append(current)
323
-
324
- messages = []
325
- for msg in merged_messages:
326
- if msg["role"] in ["system", "user"]:
327
- messages.append({
328
- "role": "user",
329
- "content": [{
330
- "type": "text",
331
- "text": msg["content"]
332
- }]
333
- })
334
- elif msg["role"] == "assistant":
335
- messages.append({
336
- "role": "assistant",
337
- "content": [{
338
- "type": "text",
339
- "text": msg["content"]
340
- }]
341
- })
342
-
343
- return messages
344
-
345
-
346
- # 响应处理类
347
- class ResponseHandler:
348
- @staticmethod
349
- async def handle_stream_response(chat_message, model):
350
- async def stream_generator():
351
- index = 0
352
- while True:
353
- # 如果已经发送完所有内容
354
- if index >= len(chat_message):
355
- yield f"data: [DONE]\n\n"
356
- break
357
-
358
- chunk_size = random.randint(15, 29)
359
- chunk = chat_message[index:index + chunk_size]
360
-
361
- event_data = {
362
- "id": Utils.uuidv4(),
363
- "object": "chat.completion.chunk",
364
- "created": int(time.time()),
365
- "model": model,
366
- "choices": [{
367
- "index": 0,
368
- "delta": {"content": chunk},
369
- "finish_reason": "stop" if index + chunk_size >= len(chat_message) else None
370
- }]
371
- }
372
-
373
- try:
374
- payload = f"data: {json.dumps(event_data)}\n\n"
375
- yield payload
376
- except Exception as error:
377
- raise Exception(f"json转换失败: {error}")
378
-
379
- index += chunk_size
380
- await asyncio.sleep(0.05) # 50ms delay
381
-
382
- return StreamingResponse(
383
- stream_generator(),
384
- media_type="text/event-stream",
385
- headers={
386
- "Cache-Control": "no-cache",
387
- "Connection": "keep-alive",
388
- }
389
- )
390
-
391
- @staticmethod
392
- async def handle_normal_response(chat_message, model):
393
- response_data = {
394
- "id": Utils.uuidv4(),
395
- "object": "chat.completion",
396
- "created": int(time.time()),
397
- "model": model,
398
- "choices": [{
399
- "index": 0,
400
- "message": {
401
- "role": "assistant",
402
- "content": chat_message
403
- },
404
- "finish_reason": "stop"
405
- }],
406
- "usage": None
407
- }
408
-
409
- return Response(
410
- content=json.dumps(response_data),
411
- media_type="application/json"
412
- )
413
-
414
-
415
- # FastAPI 应用
416
- app = FastAPI()
417
-
418
- # 添加 CORS 中间件
419
- app.add_middleware(
420
- CORSMiddleware,
421
- allow_origins=["*"],
422
- allow_credentials=True,
423
- allow_methods=["*"],
424
- allow_headers=["*"],
425
- )
426
-
427
-
428
- # 模型列表端点
429
- @app.get("/hf/v1/models")
430
- async def get_models():
431
- return {
432
- "object": "list",
433
- "data": [
434
- {
435
- "id": model,
436
- "object": "model",
437
- "created": int(time.time()),
438
- "owned_by": "e2b",
439
- }
440
- for model in CONFIG["MODEL_CONFIG"].keys()
441
- ]
442
- }
443
-
444
-
445
- # 聊天完成端点
446
- @app.post("/hf/v1/chat/completions")
447
- async def chat_completions(request: Request):
448
- try:
449
- # 验证授权
450
- auth_header = request.headers.get("authorization", "")
451
- auth_token = auth_header.replace("Bearer ", "") if auth_header else ""
452
-
453
- if auth_token != CONFIG["API"]["API_KEY"]:
454
- return Response(
455
- content=json.dumps({"error": "Unauthorized"}),
456
- status_code=401,
457
- media_type="application/json"
458
- )
459
-
460
- # 解析请求体
461
- request_body = await request.json()
462
- model = request_body.get("model")
463
- temperature = request_body.get("temperature")
464
- max_tokens = request_body.get("max_tokens")
465
- presence_penalty = request_body.get("presence_penalty")
466
- frequency_penalty = request_body.get("frequency_penalty")
467
- top_p = request_body.get("top_p")
468
- top_k = request_body.get("top_k")
469
- stream = request_body.get("stream", False)
470
-
471
- # 配置选项
472
-
473
- config_opt = await Utils.config_opt(
474
- {
475
- "temperature": temperature,
476
- "max_tokens": max_tokens,
477
- "presence_penalty": presence_penalty,
478
- "frequency_penalty": frequency_penalty,
479
- "top_p": top_p,
480
- "top_k": top_k
481
- },
482
- CONFIG["MODEL_CONFIG"][model]
483
- )
484
-
485
- #config_opt = {"model": model if CONFIG["MODEL_CONFIG"].get(model) else "gpt-4o"}
486
- # 准备请求
487
- api_client = ApiClient(model)
488
- request_payload = await api_client.prepare_chat_request(request_body, config_opt)
489
-
490
- # 发送请求到上游服务
491
- import httpx
492
- async with httpx.AsyncClient() as client:
493
- response = await client.post(
494
- f"{CONFIG['API']['BASE_URL']}/api/chat",
495
- headers=CONFIG["DEFAULT_HEADERS"],
496
- json=request_payload
497
- )
498
- response_data = response.json()
499
-
500
- # 处理响应
501
- chat_message = (response_data.get('code', '') or response_data.get('text', '') or response_data or '').strip() or None
502
-
503
- if not chat_message:
504
- raise Exception("No response from upstream service")
505
-
506
- # 返回流式或普通响应
507
- if stream:
508
- return await ResponseHandler.handle_stream_response(chat_message, model)
509
- else:
510
- return await ResponseHandler.handle_normal_response(chat_message, model)
511
-
512
- except Exception as error:
513
- return Response(
514
- content=json.dumps({
515
- "error": {
516
- "message": f"{str(error)} 请求失败,可能是上下文超出限制或其他错误,请稍后重试。",
517
- "type": "server_error",
518
- "param": None,
519
- "code": getattr(error, "code", None)
520
- }
521
- }),
522
- status_code=500,
523
- media_type="application/json"
524
- )
525
-
526
-
527
- # 404 处理
528
- @app.api_route("/{path:path}", methods=["GET", "POST", "PUT", "DELETE"])
529
- async def not_found(request: Request, path: str):
530
- return Response(
531
- content="服务运行成功,请使用正确请求路径",
532
- status_code=404,
533
- headers={"Access-Control-Allow-Origin": "*"}
534
- )
535
-
536
-
537
- # 启动服务器
538
- if __name__ == "__main__":
539
- import uvicorn
540
  uvicorn.run(app, host="0.0.0.0", port=7860)
 
1
+ import json
2
+ import os
3
+ import random
4
+ import time
5
+ import uuid
6
+ import asyncio
7
+ from starlette.responses import Response, StreamingResponse
8
+ from fastapi import FastAPI, Request
9
+ from fastapi.middleware.cors import CORSMiddleware
10
+
11
+ # 配置常量
12
+ CONFIG = {
13
+ "API": {
14
+ "BASE_URL": "https://fragments.e2b.dev",
15
+ "API_KEY": os.getenv("API_KEY","sk-123456")
16
+ },
17
+ "RETRY": {
18
+ "MAX_ATTEMPTS": 1,
19
+ "DELAY_BASE": 1000
20
+ },
21
+ "MODEL_CONFIG": {
22
+ "o1": {
23
+ "id": "o1",
24
+ "provider": "OpenAI",
25
+ "providerId": "openai",
26
+ "name": "o1",
27
+ "multiModal": True,
28
+ "Systemprompt": "",
29
+ "opt_max": {
30
+ "temperatureMax": 2,
31
+ "max_tokensMax": 0,
32
+ "presence_penaltyMax": 2,
33
+ "frequency_penaltyMax": 2,
34
+ "top_pMax": 1,
35
+ "top_kMax": 500
36
+ }
37
+ },
38
+ "o1-mini": {
39
+ "id": "o1",
40
+ "provider": "OpenAI",
41
+ "providerId": "openai",
42
+ "name": "o1-mini",
43
+ "multiModal": False,
44
+ "Systemprompt": "",
45
+ "opt_max": {
46
+ "temperatureMax": 2,
47
+ "max_tokensMax": 0,
48
+ "presence_penaltyMax": 2,
49
+ "frequency_penaltyMax": 2,
50
+ "top_pMax": 1,
51
+ "top_kMax": 500
52
+ }
53
+ },
54
+ "o3-mini": {
55
+ "id": "o3-mini",
56
+ "provider": "OpenAI",
57
+ "providerId": "openai",
58
+ "name": "o3 mini",
59
+ "multiModal": False,
60
+ "Systemprompt": "",
61
+ "opt_max": {
62
+ "temperatureMax": 2,
63
+ "max_tokensMax": 0,
64
+ "presence_penaltyMax": 2,
65
+ "frequency_penaltyMax": 2,
66
+ "top_pMax": 1,
67
+ "top_kMax": 500
68
+ }
69
+ },
70
+ "gpt-4.5-preview": {
71
+ "id": "gpt-4.5-preview",
72
+ "provider": "OpenAI",
73
+ "providerId": "openai",
74
+ "name": "GPT-4.5",
75
+ "multiModal": True,
76
+ "Systemprompt": "",
77
+ "opt_max": {
78
+ "temperatureMax": 2,
79
+ "max_tokensMax": 0,
80
+ "presence_penaltyMax": 2,
81
+ "frequency_penaltyMax": 2,
82
+ "top_pMax": 1,
83
+ "top_kMax": 500
84
+ }
85
+ },
86
+ "gpt-4o": {
87
+ "id": "gpt-4o",
88
+ "provider": "OpenAI",
89
+ "providerId": "openai",
90
+ "name": "GPT-4o",
91
+ "multiModal": True,
92
+ "Systemprompt": "",
93
+ "opt_max": {
94
+ "temperatureMax": 2,
95
+ "max_tokensMax": 16380,
96
+ "presence_penaltyMax": 2,
97
+ "frequency_penaltyMax": 2,
98
+ "top_pMax": 1,
99
+ "top_kMax": 500
100
+ }
101
+ },
102
+ "gpt-4-turbo": {
103
+ "id": "gpt-4-turbo",
104
+ "provider": "OpenAI",
105
+ "providerId": "openai",
106
+ "name": "GPT-4 Turbo",
107
+ "multiModal": True,
108
+ "Systemprompt": "",
109
+ "opt_max": {
110
+ "temperatureMax": 2,
111
+ "max_tokensMax": 16380,
112
+ "presence_penaltyMax": 2,
113
+ "frequency_penaltyMax": 2,
114
+ "top_pMax": 1,
115
+ "top_kMax": 500
116
+ }
117
+ },
118
+ "gemini-1.5-pro": {
119
+ "id": "gemini-1.5-pro-002",
120
+ "provider": "Google Vertex AI",
121
+ "providerId": "vertex",
122
+ "name": "Gemini 1.5 Pro",
123
+ "multiModal": True,
124
+ "Systemprompt": "",
125
+ "opt_max": {
126
+ "temperatureMax": 2,
127
+ "max_tokensMax": 8192,
128
+ "presence_penaltyMax": 2,
129
+ "frequency_penaltyMax": 2,
130
+ "top_pMax": 1,
131
+ "top_kMax": 500
132
+ }
133
+ },
134
+ "gemini-exp-1121": {
135
+ "id": "gemini-exp-1121",
136
+ "provider": "Google Generative AI",
137
+ "providerId": "google",
138
+ "name": "Gemini Experimental 1121",
139
+ "multiModal": True,
140
+ "Systemprompt": "",
141
+ "opt_max": {
142
+ "temperatureMax": 2,
143
+ "max_tokensMax": 8192,
144
+ "presence_penaltyMax": 2,
145
+ "frequency_penaltyMax": 2,
146
+ "top_pMax": 1,
147
+ "top_kMax": 40
148
+ }
149
+ },
150
+ "gemini-2.0-flash-exp": {
151
+ "id": "models/gemini-2.0-flash-exp",
152
+ "provider": "Google Generative AI",
153
+ "providerId": "google",
154
+ "name": "Gemini 2.0 Flash",
155
+ "multiModal": True,
156
+ "Systemprompt": "",
157
+ "opt_max": {
158
+ "temperatureMax": 2,
159
+ "max_tokensMax": 8192,
160
+ "presence_penaltyMax": 2,
161
+ "frequency_penaltyMax": 2,
162
+ "top_pMax": 1,
163
+ "top_kMax": 40
164
+ }
165
+ },
166
+ "claude-3-7-sonnet-latest": {
167
+ "id": "claude-3-5-sonnet-latest",
168
+ "provider": "Anthropic",
169
+ "providerId": "anthropic",
170
+ "name": "Claude 3.7 Sonnet",
171
+ "multiModal": True,
172
+ "Systemprompt": "",
173
+ "opt_max": {
174
+ "temperatureMax": 1,
175
+ "max_tokensMax": 8192,
176
+ "presence_penaltyMax": 2,
177
+ "frequency_penaltyMax": 2,
178
+ "top_pMax": 1,
179
+ "top_kMax": 500
180
+ }
181
+ },
182
+ "claude-3-5-sonnet-latest": {
183
+ "id": "claude-3-5-sonnet-latest",
184
+ "provider": "Anthropic",
185
+ "providerId": "anthropic",
186
+ "name": "Claude 3.5 Sonnet",
187
+ "multiModal": True,
188
+ "Systemprompt": "",
189
+ "opt_max": {
190
+ "temperatureMax": 1,
191
+ "max_tokensMax": 8192,
192
+ "presence_penaltyMax": 2,
193
+ "frequency_penaltyMax": 2,
194
+ "top_pMax": 1,
195
+ "top_kMax": 500
196
+ }
197
+ },
198
+ "claude-3-5-haiku-latest": {
199
+ "id": "claude-3-5-haiku-latest",
200
+ "provider": "Anthropic",
201
+ "providerId": "anthropic",
202
+ "name": "Claude 3.5 Haiku",
203
+ "multiModal": False,
204
+ "Systemprompt": "",
205
+ "opt_max": {
206
+ "temperatureMax": 1,
207
+ "max_tokensMax": 8192,
208
+ "presence_penaltyMax": 2,
209
+ "frequency_penaltyMax": 2,
210
+ "top_pMax": 1,
211
+ "top_kMax": 500
212
+ }
213
+ }
214
+ },
215
+ "DEFAULT_HEADERS": {
216
+ 'accept': '*/*',
217
+ 'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
218
+ 'cache-control': 'no-cache',
219
+ 'content-type': 'application/json',
220
+ 'origin': 'https://fragments.e2b.dev',
221
+ 'pragma': 'no-cache',
222
+ 'priority': 'u=1, i',
223
+ 'referer': 'https://fragments.e2b.dev/',
224
+ 'sec-ch-ua': '"Not(A:Brand";v="99", "Microsoft Edge";v="133", "Chromium";v="133"',
225
+ 'sec-ch-ua-mobile': '?0',
226
+ 'sec-ch-ua-platform': '"Windows"',
227
+ 'sec-fetch-dest': 'empty',
228
+ 'sec-fetch-mode': 'cors',
229
+ 'sec-fetch-site': 'same-origin',
230
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36 Edg/133.0.0.0'
231
+ },
232
+ "MODEL_PROMPT": "Chatting with users and starting role-playing, the most important thing is to pay attention to their latest messages, use only 'text' to output the chat text reply content generated for user messages, and finally output it in code"
233
+ }
234
+
235
+
236
+ # 工具类
237
+ class Utils:
238
+ @staticmethod
239
+ def uuidv4():
240
+ return str(uuid.uuid4())
241
+
242
+ @staticmethod
243
+ async def config_opt(params, model_config):
244
+ if not model_config.get("opt_max"):
245
+ return None
246
+
247
+ options_map = {
248
+ "temperature": "temperatureMax",
249
+ "max_tokens": "max_tokensMax",
250
+ "presence_penalty": "presence_penaltyMax",
251
+ "frequency_penalty": "frequency_penaltyMax",
252
+ "top_p": "top_pMax",
253
+ "top_k": "top_kMax"
254
+ }
255
+
256
+ constrained_params = {}
257
+ for key, value in params.items():
258
+ max_key = options_map.get(key)
259
+ if (max_key and
260
+ max_key in model_config["opt_max"] and
261
+ value is not None):
262
+ constrained_params[key] = min(value, model_config["opt_max"][max_key])
263
+
264
+ return constrained_params
265
+
266
+
267
+ # API客户端类
268
+ class ApiClient:
269
+ def __init__(self, model_id):
270
+ if model_id not in CONFIG["MODEL_CONFIG"]:
271
+ raise ValueError(f"不支持的模型: {model_id}")
272
+ self.model_config = CONFIG["MODEL_CONFIG"][model_id]
273
+
274
+ def process_message_content(self, content):
275
+ if isinstance(content, str):
276
+ return content
277
+ if isinstance(content, list):
278
+ return "\n".join([item["text"] for item in content if item["type"] == "text"])
279
+ if isinstance(content, dict):
280
+ return content.get("text")
281
+ return None
282
+
283
+ async def prepare_chat_request(self, request, config=None):
284
+ opt_config = config or {"model": self.model_config["id"]}
285
+ return {
286
+ "userID": Utils.uuidv4(),
287
+ "messages": await self.transform_messages(request),
288
+ "template": {
289
+ "text": {
290
+ "name": CONFIG["MODEL_PROMPT"],
291
+ "lib": [""],
292
+ "file": "pages/ChatWithUsers.txt",
293
+ "instructions": self.model_config["Systemprompt"],
294
+ "port": None
295
+ }
296
+ },
297
+ "model": {
298
+ "id": self.model_config["id"],
299
+ "provider": self.model_config["provider"],
300
+ "providerId": self.model_config["providerId"],
301
+ "name": self.model_config["name"],
302
+ "multiModal": self.model_config["multiModal"]
303
+ },
304
+ "config": opt_config
305
+ }
306
+
307
+ async def transform_messages(self, request):
308
+ merged_messages = []
309
+ for current in request["messages"]:
310
+ current_content = self.process_message_content(current["content"])
311
+ if current_content is None:
312
+ continue
313
+
314
+ if (merged_messages and
315
+ current and
316
+ merged_messages[-1]["role"] == current["role"]):
317
+ last_content = self.process_message_content(merged_messages[-1]["content"])
318
+ if last_content is not None:
319
+ merged_messages[-1]["content"] = f"{last_content}\n{current_content}"
320
+ continue
321
+
322
+ merged_messages.append(current)
323
+
324
+ messages = []
325
+ for msg in merged_messages:
326
+ if msg["role"] in ["system", "user"]:
327
+ messages.append({
328
+ "role": "user",
329
+ "content": [{
330
+ "type": "text",
331
+ "text": msg["content"]
332
+ }]
333
+ })
334
+ elif msg["role"] == "assistant":
335
+ messages.append({
336
+ "role": "assistant",
337
+ "content": [{
338
+ "type": "text",
339
+ "text": msg["content"]
340
+ }]
341
+ })
342
+
343
+ return messages
344
+
345
+
346
+ # 响应处理类
347
+ class ResponseHandler:
348
+ @staticmethod
349
+ async def handle_stream_response(chat_message, model):
350
+ async def stream_generator():
351
+ index = 0
352
+ while True:
353
+ # 如果已经发送完所有内容
354
+ if index >= len(chat_message):
355
+ yield f"data: [DONE]\n\n"
356
+ break
357
+
358
+ chunk_size = random.randint(15, 29)
359
+ chunk = chat_message[index:index + chunk_size]
360
+
361
+ event_data = {
362
+ "id": Utils.uuidv4(),
363
+ "object": "chat.completion.chunk",
364
+ "created": int(time.time()),
365
+ "model": model,
366
+ "choices": [{
367
+ "index": 0,
368
+ "delta": {"content": chunk},
369
+ "finish_reason": "stop" if index + chunk_size >= len(chat_message) else None
370
+ }]
371
+ }
372
+
373
+ try:
374
+ payload = f"data: {json.dumps(event_data)}\n\n"
375
+ yield payload
376
+ except Exception as error:
377
+ raise Exception(f"json转换失败: {error}")
378
+
379
+ index += chunk_size
380
+ await asyncio.sleep(0.05) # 50ms delay
381
+
382
+ return StreamingResponse(
383
+ stream_generator(),
384
+ media_type="text/event-stream",
385
+ headers={
386
+ "Cache-Control": "no-cache",
387
+ "Connection": "keep-alive",
388
+ }
389
+ )
390
+
391
+ @staticmethod
392
+ async def handle_normal_response(chat_message, model):
393
+ response_data = {
394
+ "id": Utils.uuidv4(),
395
+ "object": "chat.completion",
396
+ "created": int(time.time()),
397
+ "model": model,
398
+ "choices": [{
399
+ "index": 0,
400
+ "message": {
401
+ "role": "assistant",
402
+ "content": chat_message
403
+ },
404
+ "finish_reason": "stop"
405
+ }],
406
+ "usage": None
407
+ }
408
+
409
+ return Response(
410
+ content=json.dumps(response_data),
411
+ media_type="application/json"
412
+ )
413
+
414
+
415
+ # FastAPI 应用
416
+ app = FastAPI()
417
+
418
+ # 添加 CORS 中间件
419
+ app.add_middleware(
420
+ CORSMiddleware,
421
+ allow_origins=["*"],
422
+ allow_credentials=True,
423
+ allow_methods=["*"],
424
+ allow_headers=["*"],
425
+ )
426
+
427
+
428
+ # 模型列表端点
429
+ @app.get("/hf/v1/models")
430
+ async def get_models():
431
+ return {
432
+ "object": "list",
433
+ "data": [
434
+ {
435
+ "id": model,
436
+ "object": "model",
437
+ "created": int(time.time()),
438
+ "owned_by": "e2b",
439
+ }
440
+ for model in CONFIG["MODEL_CONFIG"].keys()
441
+ ]
442
+ }
443
+
444
+
445
+ # 聊天完成端点
446
+ @app.post("/hf/v1/chat/completions")
447
+ async def chat_completions(request: Request):
448
+ try:
449
+ # 验证授权
450
+ auth_header = request.headers.get("authorization", "")
451
+ auth_token = auth_header.replace("Bearer ", "") if auth_header else ""
452
+
453
+ if auth_token != CONFIG["API"]["API_KEY"]:
454
+ return Response(
455
+ content=json.dumps({"error": "Unauthorized"}),
456
+ status_code=401,
457
+ media_type="application/json"
458
+ )
459
+
460
+ # 解析请求体
461
+ request_body = await request.json()
462
+ model = request_body.get("model")
463
+ temperature = request_body.get("temperature")
464
+ max_tokens = request_body.get("max_tokens")
465
+ presence_penalty = request_body.get("presence_penalty")
466
+ frequency_penalty = request_body.get("frequency_penalty")
467
+ top_p = request_body.get("top_p")
468
+ top_k = request_body.get("top_k")
469
+ stream = request_body.get("stream", False)
470
+
471
+ # 配置选项
472
+
473
+ config_opt = await Utils.config_opt(
474
+ {
475
+ "temperature": temperature,
476
+ "max_tokens": max_tokens,
477
+ "presence_penalty": presence_penalty,
478
+ "frequency_penalty": frequency_penalty,
479
+ "top_p": top_p,
480
+ "top_k": top_k
481
+ },
482
+ CONFIG["MODEL_CONFIG"][model]
483
+ )
484
+
485
+ #config_opt = {"model": model if CONFIG["MODEL_CONFIG"].get(model) else "gpt-4o"}
486
+ # 准备请求
487
+ api_client = ApiClient(model)
488
+ request_payload = await api_client.prepare_chat_request(request_body, config_opt)
489
+
490
+ # 发送请求到上游服务
491
+ import httpx
492
+ async with httpx.AsyncClient() as client:
493
+ response = await client.post(
494
+ f"{CONFIG['API']['BASE_URL']}/api/chat",
495
+ headers=CONFIG["DEFAULT_HEADERS"],
496
+ json=request_payload
497
+ )
498
+ response_data = response.json()
499
+
500
+ # 处理响应
501
+ chat_message = (response_data.get('code', '') or response_data.get('text', '') or response_data or '').strip() or None
502
+
503
+ if not chat_message:
504
+ raise Exception("No response from upstream service")
505
+
506
+ # 返回流式或普通响应
507
+ if stream:
508
+ return await ResponseHandler.handle_stream_response(chat_message, model)
509
+ else:
510
+ return await ResponseHandler.handle_normal_response(chat_message, model)
511
+
512
+ except Exception as error:
513
+ return Response(
514
+ content=json.dumps({
515
+ "error": {
516
+ "message": f"{str(error)} 请求失败,可能是上下文超出限制或其他错误,请稍后重试。",
517
+ "type": "server_error",
518
+ "param": None,
519
+ "code": getattr(error, "code", None)
520
+ }
521
+ }),
522
+ status_code=500,
523
+ media_type="application/json"
524
+ )
525
+
526
+
527
+ # 404 处理
528
+ @app.api_route("/{path:path}", methods=["GET", "POST", "PUT", "DELETE"])
529
+ async def not_found(request: Request, path: str):
530
+ return Response(
531
+ content="服务运行成功,请使用正确请求路径",
532
+ status_code=404,
533
+ headers={"Access-Control-Allow-Origin": "*"}
534
+ )
535
+
536
+
537
+ # 启动服务器
538
+ if __name__ == "__main__":
539
+ import uvicorn
540
  uvicorn.run(app, host="0.0.0.0", port=7860)