Ethanmaht commited on
Commit
e7e0269
·
verified ·
1 Parent(s): 2cc470a

Delete degpt.py

Browse files
Files changed (1) hide show
  1. degpt.py +0 -881
degpt.py DELETED
@@ -1,881 +0,0 @@
1
- """
2
- update time: 2025.01.09
3
- verson: 0.1.125
4
- """
5
- import json
6
- import re
7
- import time
8
- from datetime import datetime, timedelta
9
- import aiohttp
10
- import requests
11
- from bs4 import BeautifulSoup
12
- from urllib.parse import urljoin, urlparse
13
- from typing import Set, Optional, List, Dict
14
-
15
- # 禁用 SSL 警告
16
- import urllib3
17
- urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
18
-
19
-
20
- debug = False
21
- # 全局变量
22
- last_request_time = 0 # 上次请求的时间戳
23
- cache_duration = 14400 # 缓存有效期,单位:秒 (4小时)
24
- '''用于存储缓存的模型数据'''
25
- cached_models = {
26
- "object": "list",
27
- "data": [],
28
- "version": "0.1.125",
29
- "provider": "DeGPT",
30
- "name": "DeGPT",
31
- "default_locale": "en-US",
32
- "status": True,
33
- "time": 0
34
- }
35
-
36
- '''基础请求地址'''
37
- base_addrs = [
38
- # "America"
39
- "https://usa-chat.degpt.ai/api",
40
- # "Singapore"
41
- "https://singapore-chat.degpt.ai/api",
42
- # "Korea"
43
- "https://korea-chat.degpt.ai/api"
44
- ]
45
- '''基础域名'''
46
- base_url = 'https://singapore-chat.degpt.ai/api'
47
-
48
- '''基础模型'''
49
- base_model = "Pixtral-124B"
50
- # 全局变量:存储所有模型的统计信息
51
- # 格式:{model_name: {"calls": 调用次数, "fails": 失败次数, "last_fail": 最后失败时间}}
52
- MODEL_STATS: Dict[str, Dict] = {}
53
-
54
- def record_call(model_name: str, success: bool = True) -> None:
55
- """
56
- 记录模型调用情况
57
- Args:
58
- model_name: 模型名称
59
- success: 调用是否成功
60
- """
61
- global MODEL_STATS
62
- if model_name not in MODEL_STATS:
63
- MODEL_STATS[model_name] = {"calls": 0, "fails": 0, "last_fail": None}
64
-
65
- stats = MODEL_STATS[model_name]
66
- stats["calls"] += 1
67
- if not success:
68
- stats["fails"] += 1
69
- stats["last_fail"] = datetime.now()
70
-
71
-
72
- def get_auto_model(cooldown_seconds: int = 300) -> str:
73
- """异步获取最优模型"""
74
- try:
75
- if not MODEL_STATS:
76
- get_models()
77
-
78
- best_model = None
79
- best_rate = -1.0
80
- now = datetime.now()
81
-
82
- for name, stats in MODEL_STATS.items():
83
- if stats.get("last_fail") and (now - stats["last_fail"]) < timedelta(seconds=cooldown_seconds):
84
- continue
85
-
86
- total_calls = stats["calls"]
87
- if total_calls > 0:
88
- success_rate = (total_calls - stats["fails"]) / total_calls
89
- if success_rate > best_rate:
90
- best_rate = success_rate
91
- best_model = name
92
-
93
- default_model = best_model or base_model
94
- if debug:
95
- print(f"选择模型: {default_model}")
96
- return default_model
97
- except Exception as e:
98
- if debug:
99
- print(f"模型选择错误: {e}")
100
- return base_model
101
-
102
-
103
- def reload_check():
104
- """检查并更新系统状态
105
- 1. 如果模型数据为空,更新模型数据
106
- 2. 测试当前base_url是否可用,不可用则切换
107
- """
108
- global base_url, cached_models
109
-
110
- try:
111
- # 检查模型数据
112
- if not cached_models["data"]:
113
- if debug:
114
- print("模型数据为空,开始更新...")
115
- get_models()
116
-
117
- # 测试用例 - 平衡效率和功能验证
118
- test_payload = {
119
- "model": base_model,
120
- "messages": [{
121
- "role": "user",
122
- "content": [{"type": "text", "text": "test"}]
123
- }],
124
- "temperature": 0.7,
125
- "max_tokens": 50,
126
- "top_p": 1.0,
127
- "frequency_penalty": 0.0,
128
- "project": "DecentralGPT",
129
- "stream": True
130
- }
131
-
132
- headers = {
133
- 'Accept': '*/*',
134
- 'Content-Type': 'application/json'
135
- }
136
-
137
- with aiohttp.ClientSession() as session:
138
- # 测试当前URL
139
- try:
140
- with session.post(
141
- f"{base_url}/v0/chat/completion/proxy",
142
- headers=headers,
143
- json=test_payload,
144
- timeout=5 # 较短的超时时间提高效率
145
- ) as response:
146
- if response.status == 200:
147
- # 验证响应格式
148
- if response.read():
149
- if debug:
150
- print(f"当前URL可用: {base_url}")
151
- return
152
- except Exception as e:
153
- if debug:
154
- print(f"当前URL不可用: {e}")
155
-
156
- # 测试其他URL
157
- for url in base_addrs:
158
- if url == base_url:
159
- continue
160
- try:
161
- with session.post(
162
- f"{url}/v0/chat/completion/proxy",
163
- headers=headers,
164
- json=test_payload,
165
- timeout=5
166
- ) as response:
167
- if response.status == 200 and response.read():
168
- base_url = url
169
- if debug:
170
- print(f"切换到新URL: {base_url}")
171
- return
172
- except Exception as e:
173
- if debug:
174
- print(f"URL {url} 测试失败: {e}")
175
- continue
176
-
177
- if debug:
178
- print("所有URL不可用,保持当前URL")
179
-
180
- except Exception as e:
181
- if debug:
182
- print(f"系统检查失败: {e}")
183
-
184
-
185
- def _fetch_and_update_models():
186
- """Thread-safe model fetching and cache updating"""
187
- global cached_models
188
- try:
189
- get_from_js_v3()
190
- except Exception as e:
191
- print(f"{e}")
192
- try:
193
- get_alive_models()
194
- except Exception as e:
195
- print(f"{e}")
196
-
197
-
198
- def get_models():
199
- """model data retrieval with thread safety"""
200
- global cached_models, last_request_time
201
- current_time = time.time()
202
- if (current_time - last_request_time) > cache_duration:
203
- try:
204
- # Update timestamp before awaiting to prevent concurrent updates
205
- last_request_time = current_time
206
- _fetch_and_update_models()
207
- except Exception as e:
208
- print(f"{e}")
209
-
210
- return json.dumps(cached_models)
211
-
212
- def get_alive_models():
213
- """
214
- 获取活的模型版本,并更新全局缓存
215
- """
216
- global cached_models, last_request_time
217
-
218
- # 发送 GET 请求
219
- url = 'https://www.degpt.ai/api/config'
220
- headers = {'Content-Type': 'application/json'}
221
-
222
- response = requests.get(url, headers=headers)
223
-
224
- # 检查响应是否成功
225
- if response.status_code == 200:
226
- try:
227
- data = response.json() # 解析响应 JSON 数据
228
- default_models = data.get("default_models", "").split(",") # 获取默认模型并分割成列表
229
-
230
- # 获取当前时间戳(以秒为单位)
231
- timestamp_in_seconds = time.time()
232
- # 转换为毫秒(乘以 1000)
233
- timestamp_in_milliseconds = int(timestamp_in_seconds * 1000)
234
- ## config
235
- cached_models['version']=data['version']
236
- cached_models['provider']=data['provider']
237
- cached_models['name']=data['provider']
238
- cached_models['time']=timestamp_in_milliseconds
239
-
240
- if default_models:
241
- # print("\n提取的模型列表:")
242
- existing_ids = {m.get('id') for m in cached_models["data"]}
243
- for model_id in default_models:
244
- record_call(model_id)
245
- if model_id and model_id not in existing_ids:
246
- model_data = {
247
- "id": model_id,
248
- "object": "model",
249
- "model": model_id,
250
- "created": timestamp_in_milliseconds,
251
- "owned_by": model_id.split("-")[0] if "-" in model_id else "unknown",
252
- "name": model_id,
253
- "description": '',
254
- "support": '',
255
- "tip": ''
256
- }
257
- cached_models["data"].append(model_data)
258
- # 更新全局缓存
259
- last_request_time = timestamp_in_seconds # 更新缓存时间戳
260
-
261
- # print("获取新的模型数据:", models)
262
- except json.JSONDecodeError as e:
263
- print("JSON 解码错误:", e)
264
- else:
265
- print(f"请求失败,状态码: {response.status_code}")
266
-
267
-
268
-
269
- # def parse_models_from_js(js_content: str) -> List[Dict]:
270
- # """解析JS内容中的模型信息"""
271
- # try:
272
- # pattern = r'models\s*:\s*\[([^\]]+)\]'
273
- # match = re.search(pattern, js_content)
274
- #
275
- # if match:
276
- # models_data = match.group(1)
277
- # models_data = re.sub(r'(\w+):', r'"\1":', models_data)
278
- # models_data = models_data.replace("'", '"')
279
- # models_data = f"[{models_data}]"
280
- #
281
- # try:
282
- # models = json.loads(models_data)
283
- # return models
284
- # except json.JSONDecodeError as e:
285
- # return []
286
- #
287
- # return []
288
- #
289
- # except Exception as e:
290
- # return []
291
- #
292
- #
293
- # def get_model_names_from_js(url="https://www.degpt.ai/", timeout: int = 60):
294
- # global cached_models
295
- # try:
296
- # with async_playwright() as p:
297
- # browser = p.chromium.launch(
298
- # headless=True,
299
- # args=['--no-sandbox']
300
- # )
301
- # context = browser.new_context(
302
- # viewport={'width': 1920, 'height': 1080},
303
- # user_agent='Mozilla/5.0 (Windows NT 10.0; Win64; x64) Chrome/119.0.0.0 Safari/537.36'
304
- # )
305
- # page = context.new_page()
306
- #
307
- # def handle_response(response):
308
- # try:
309
- # if response.request.resource_type == "script":
310
- # content_type = response.headers.get('content-type', '').lower()
311
- # if 'javascript' in content_type:
312
- # js_content = response.text()
313
- # if 'models' in js_content:
314
- # # print(f"找到包含模型信息的JS文件: {response.url}")
315
- # models = parse_models_from_js(js_content)
316
- # if models:
317
- # # print("\n提取的模型列表:")
318
- # existing_ids = {m.get('id') for m in cached_models["data"]}
319
- # for model in models:
320
- # model_id = model.get('model', '').strip()
321
- # # print(f"- 名称: {model.get('name', '')}")
322
- # # print(f" 模型: {model.get('model', '')}")
323
- # # print(f" 描述: {model.get('desc', '')}")
324
- # record_call(model_id)
325
- # if model_id and model_id not in existing_ids:
326
- # model_data = {
327
- # "id": model_id,
328
- # "object": "model",
329
- # "model": model_id,
330
- # "created": int(time.time()),
331
- # "owned_by": model_id.split("-")[0] if "-" in model_id else "unknown",
332
- # "name": model.get('name', ''),
333
- # "description": model.get('desc', ''),
334
- # "support": model.get('support', 'text'),
335
- # "tip": model.get('tip', '')
336
- # }
337
- # cached_models["data"].append(model_data)
338
- # # print(f"添加新模型: {model_id}")
339
- # except Exception as e:
340
- # print(f"处理响应时发生错误: {str(e)}")
341
- # logging.error(f"Response处理异常: {e}", exc_info=True)
342
- #
343
- # page.on("response", handle_response)
344
- #
345
- # try:
346
- # page.goto(url, timeout=timeout * 1000, wait_until='networkidle')
347
- # page.wait_for_timeout(5000)
348
- # except Exception as e:
349
- # print(f"页面加载错误: {str(e)}")
350
- # logging.error(f"页面加载异常: {e}", exc_info=True)
351
- # finally:
352
- # browser.close()
353
- # except Exception as e:
354
- # print(f"提取过程发生错误: {str(e)}")
355
- # get_from_js()
356
- # raise
357
-
358
-
359
- # def parse_models_and_urls_from_js(js_content: str) -> Dict:
360
- # """从JS内容中解析模型和URL信息"""
361
- # result = {"models": [], "urls": []}
362
- #
363
- # try:
364
- # # 提取模型信息
365
- # model_pattern = r'models\s*:\s*\[([^\]]+)\]'
366
- # model_match = re.search(model_pattern, js_content)
367
- #
368
- # if model_match:
369
- # models_data = model_match.group(1)
370
- # models_data = re.sub(r'(\w+):', r'"\1":', models_data)
371
- # models_data = models_data.replace("'", '"')
372
- # models_data = f"[{models_data}]"
373
- #
374
- # try:
375
- # models = json.loads(models_data)
376
- # result["models"] = models
377
- # except json.JSONDecodeError as e:
378
- # print(f"Error decoding models JSON: {e}")
379
- #
380
- # # 提取URL信息
381
- # url_pattern = r'\{name\s*:\s*"([^"]+)"\s*,\s*url\s*:\s*"([^"]+)"'
382
- # url_matches = re.findall(url_pattern, js_content)
383
- #
384
- # if url_matches:
385
- # urls = [{"name": name, "url": url} for name, url in url_matches]
386
- # result["urls"] = urls
387
- #
388
- # return result
389
- #
390
- # except Exception as e:
391
- # print(f"Error parsing JS content: {e}")
392
- # return result
393
-
394
- # # version2
395
- # def get_from_js():
396
- # global cached_models
397
- # # 获取 JavaScript 文件内容
398
- # # url = "https://www.degpt.ai/_app/immutable/chunks/index.83d92b06.js"
399
- # # url = "https://www.degpt.ai/_app/immutable/chunks/index.4aecf75a.js"
400
- # url = "https://www.degpt.ai/_app/immutable/chunks/index.e0d19999.js"
401
- # response = requests.get(url)
402
- #
403
- # if response.status_code == 200:
404
- # js_content = response.text
405
- # models = parse_models_from_js(js_content)
406
- # # xx = parse_models_and_urls_from_js(js_content)
407
- # if models:
408
- # if debug:
409
- # print("get_from_js提取的模型列表:")
410
- # existing_ids = {m.get('id') for m in cached_models["data"]}
411
- # for model in models:
412
- # model_id = model.get('model', '').strip()
413
- # if debug:
414
- # print(f"get_from_js 名称: {model.get('name', '')}")
415
- # print(f"get_from_js 模型: {model.get('model', '')}")
416
- # print(f"get_from_js 描述: {model.get('desc', '')}")
417
- # record_call(model_id)
418
- # if model_id and model_id not in existing_ids:
419
- # model_data = {
420
- # "id": model_id,
421
- # "object": "model",
422
- # "model": model_id,
423
- # "created": int(time.time()),
424
- # "owned_by": model_id.split("-")[0] if "-" in model_id else "unknown",
425
- # "name": model.get('name', ''),
426
- # "description": model.get('desc', ''),
427
- # "support": model.get('support', 'text'),
428
- # "tip": model.get('tip', '')
429
- # }
430
- # cached_models["data"].append(model_data)
431
- # if debug:
432
- # print(f"get_from_js添加新模型: {model_id}")
433
- ###############
434
-
435
-
436
- def get_from_js_v3():
437
- global cached_models
438
- models = analyze()
439
- # print(models)
440
- if models:
441
- # 获取已经存在的ID
442
- existing_ids = {m.get('id') for m in cached_models["data"]}
443
- for model in models:
444
- # {'name': 'Llama3.3', 'model': 'Llama3.3-70B', 'tip': 'Llama3.3', 'support': 'text', 'desc': 'Suitable for most tasks'}
445
- if debug:
446
- print(model)
447
- model_id = model.get('model', '').strip()
448
- if model_id and model_id not in existing_ids:
449
- model_data = {
450
- "id": model_id,
451
- "object": "model",
452
- "model": model_id,
453
- "created": int(time.time())*1000,
454
- "owned_by": model_id.split("-")[0] if "-" in model_id else "unknown",
455
- "name": model.get('name', ''),
456
- "description": model.get('desc', ''),
457
- "support": model.get('support', 'text'),
458
- "tip": model.get('tip', '')
459
- }
460
- cached_models["data"].append(model_data)
461
- record_call(model_id)
462
- if debug:
463
- print(f"添加新模型: {model_id}")
464
- pass
465
-
466
-
467
- def fetch_content(url: str) -> Optional[str]:
468
- """获取页面内容"""
469
- try:
470
- headers = {'User-Agent': 'Mozilla/5.0'}
471
- response = requests.get(url, headers=headers, timeout=10)
472
- response.raise_for_status()
473
- return response.text
474
- except requests.RequestException as e:
475
- if debug:
476
- print(f"获取页面失败 {url}: {e}")
477
- return None
478
-
479
-
480
- def parse_models_from_js(content: str, url: str) -> List[Dict]:
481
- """解析JS内容中的模型信息"""
482
- try:
483
- # 匹配模型数据
484
- pattern = r'models\s*:\s*\[([^\]]+)\]'
485
- match = re.search(pattern, content)
486
-
487
- if not match:
488
- return []
489
-
490
- # 处理JSON数据
491
- models_data = match.group(1)
492
- models_data = re.sub(r'(\w+):', r'"\1":', models_data)
493
- models_data = models_data.replace("'", '"')
494
- models_data = f"[{models_data}]"
495
-
496
- try:
497
- models = json.loads(models_data)
498
- if isinstance(models, list) and models and not (len(models) == 1 and not models[0]):
499
- # if debug:
500
- # print(f"解析到模型数据:\n{json.dumps(models, indent=2)}")
501
- return models
502
- except json.JSONDecodeError:
503
- # 尝试修复JSON
504
- fixed_data = _fix_json_errors(models_data)
505
- try:
506
- return json.loads(fixed_data)
507
- except json.JSONDecodeError as e:
508
- if debug:
509
- print(f"JSON解析失败 {url}: {e}")
510
-
511
- return []
512
- except Exception as e:
513
- if debug:
514
- print(f"解析模型失败 {url}: {e}")
515
- return []
516
-
517
-
518
- def _fix_json_errors(json_str: str) -> str:
519
- """修复JSON格式错误"""
520
- # 移除注释
521
- json_str = re.sub(r'//.*?\n|/\*.*?\*/', '', json_str, flags=re.S)
522
- # 处理键名和值
523
- json_str = re.sub(r'(\w+)\s*:', r'"\1":', json_str)
524
- json_str = re.sub(r':\s*([^",\s\{\}\[\]]+)', r': "\1"', json_str)
525
- # 处理布尔值和null
526
- json_str = re.sub(r':\s*true\b', ': true', json_str)
527
- json_str = re.sub(r':\s*false\b', ': false', json_str)
528
- json_str = re.sub(r':\s*null\b', ': null', json_str)
529
- # 处理尾随逗号
530
- json_str = re.sub(r',(\s*[\]}])', r'\1', json_str)
531
- return json_str
532
-
533
-
534
- #"""version2 """
535
- def extract_links(content: str, url: str) -> Set[str]:
536
- """
537
- 提取页面中的所有有效链接,处理特殊情况和无效URL
538
-
539
- Args:
540
- content: 页面内容
541
- url: 当前页面URL
542
-
543
- Returns:
544
- Set[str]: 提取的有效链接集合
545
- """
546
- links = set()
547
- base_domain = urlparse(url).netloc
548
-
549
- def is_valid_path(path: str) -> bool:
550
- """
551
- 验证路径是否有效
552
-
553
- Args:
554
- path: 要验证的路径
555
-
556
- Returns:
557
- bool: 路径是否有效
558
- """
559
- # 排除无效路径模式
560
- invalid_patterns = [
561
- r'\$\{.*?\}', # 模板字面量
562
- r'\{.*?\}', # 其他变量
563
- r'^\(.*?\)', # 括号开头
564
- r'^\).*?', # 右括号开头
565
- r'^[\s\.,]+$', # 仅包含空白或标点
566
- r'^[a-z]+\=', # 属性赋值
567
- r'^\w+\(', # 函数调用
568
- ]
569
-
570
- if not path or path.isspace():
571
- return False
572
-
573
- return not any(re.search(pattern, path) for pattern in invalid_patterns)
574
-
575
- def clean_path(path: str) -> Optional[str]:
576
- """
577
- 清理和规范化路径
578
-
579
- Args:
580
- path: 原始路径
581
-
582
- Returns:
583
- Optional[str]: 清理后的路径,无效则返回None
584
- """
585
- if not path:
586
- return None
587
-
588
- # 基础清理
589
- path = path.strip()
590
- path = re.sub(r'\s+', '', path)
591
- path = re.sub(r'[\(\)]', '', path)
592
- path = re.sub(r',.*$', '', path)
593
-
594
- # 处理相对路径
595
- if path.startswith('./'):
596
- path = path[2:]
597
- elif path.startswith('/'):
598
- path = path[1:]
599
-
600
- # 验证文件扩展名
601
- valid_extensions = ('.js', '.css', '.html', '.htm', '.json')
602
- if not any(path.endswith(ext) for ext in valid_extensions):
603
- return None
604
-
605
- return path
606
-
607
- try:
608
- if not content or url.endswith(('.json', '.css', '.png', '.jpg', '.jpeg', '.gif', '.svg')):
609
- return links
610
-
611
- # 处理HTML内容
612
- soup = BeautifulSoup(content, 'html.parser')
613
-
614
- # 提取href链接
615
- for tag in soup.find_all(href=True):
616
- href = tag['href']
617
- if is_valid_path(href):
618
- cleaned_href = clean_path(href)
619
- if cleaned_href:
620
- full_url = urljoin(url, cleaned_href)
621
- if urlparse(full_url).netloc == base_domain:
622
- links.add(full_url)
623
- if debug:
624
- print(f"添加有效链接: {full_url}")
625
-
626
- # 处理script标签
627
- for tag in soup.find_all('script', src=True):
628
- src = tag['src']
629
- if is_valid_path(src):
630
- cleaned_src = clean_path(src)
631
- if cleaned_src:
632
- full_url = urljoin(url, cleaned_src)
633
- if urlparse(full_url).netloc == base_domain:
634
- links.add(full_url)
635
-
636
- # 处理JS文件内容
637
- if url.endswith('.js'):
638
- # 处理各种导入模式
639
- import_patterns = [
640
- (r'import\s*[^"\']*["\']([^"\']+)["\']', 1),
641
- (r'from\s+["\']([^"\']+)["\']', 1),
642
- (r'import\s*\(["\']([^"\']+)["\']\)', 1),
643
- (r'require\s*\(["\']([^"\']+)["\']\)', 1),
644
- (r'(?:url|src|href)\s*:\s*["\']([^"\']+)["\']', 1),
645
- (r'@import\s+["\']([^"\']+)["\']', 1),
646
- (r'url\(["\']?([^"\'()]+)["\']?\)', 1),
647
- ]
648
-
649
- for pattern, group in import_patterns:
650
- for match in re.finditer(pattern, content):
651
- path = match.group(group)
652
- if is_valid_path(path):
653
- cleaned_path = clean_path(path)
654
- if cleaned_path:
655
- full_url = urljoin(url, cleaned_path)
656
- if urlparse(full_url).netloc == base_domain:
657
- links.add(full_url)
658
-
659
- # 处理数组形式的导入
660
- for array_match in re.finditer(r'\[([\s\S]*?)\]', content):
661
- array_content = array_match.group(1)
662
- paths = re.findall(r'["\']([^"\']+?\.[a-zA-Z0-9]+)["\']', array_content)
663
- for path in paths:
664
- if is_valid_path(path):
665
- cleaned_path = clean_path(path)
666
- if cleaned_path and not cleaned_path.startswith(('http:', 'https:', 'data:', 'blob:')):
667
- full_url = urljoin(url, cleaned_path)
668
- if urlparse(full_url).netloc == base_domain:
669
- links.add(full_url)
670
-
671
- except Exception as e:
672
- if debug:
673
- print(f"提取链接失败 {url}: {e}")
674
-
675
- return links
676
-
677
-
678
- def analyze(_bb_url="https://www.degpt.ai/") -> List[Dict]:
679
- """分析网站内容"""
680
- visited_urls = set()
681
- found_models = []
682
-
683
- def _analyze(url: str) -> bool:
684
- if url in visited_urls:
685
- return False
686
-
687
- visited_urls.add(url)
688
- if debug:
689
- print(f"正在分析: {url}")
690
-
691
- content = fetch_content(url)
692
- if not content:
693
- return False
694
-
695
- models = parse_models_from_js(content, url)
696
- if models:
697
- found_models.extend(models)
698
- return True
699
-
700
- for link in extract_links(content, url):
701
- if _analyze(link):
702
- return True
703
-
704
- return False
705
-
706
- _analyze(_bb_url)
707
- return found_models
708
-
709
-
710
-
711
- ################
712
-
713
- def is_model_available(model_id: str, cooldown_seconds: int = 300) -> bool:
714
- """
715
- 判断模型是否在模型列表中且非最近失败的模型
716
-
717
- Args:
718
- model_id: 模型ID,需要检查的模型标识符
719
- cooldown_seconds: 失败冷却时间(秒),默认300秒
720
-
721
- Returns:
722
- bool: 如果模型可用返回True,否则返回False
723
-
724
- Note:
725
- - 当MODEL_STATS为空时会自动调用get_models()更新数据
726
- - 检查模型是否在冷却期内,如果在冷却期则返回False
727
- """
728
- global MODEL_STATS
729
-
730
- # 如果MODEL_STATS为空,加载模型数据
731
- if not MODEL_STATS:
732
- get_models()
733
-
734
- # 检查模型是否在统计信息中
735
- if model_id not in MODEL_STATS:
736
- return False
737
-
738
- # 检查是否在冷却期内
739
- stats = MODEL_STATS[model_id]
740
- if stats["last_fail"]:
741
- time_since_failure = datetime.now() - stats["last_fail"]
742
- if time_since_failure < timedelta(seconds=cooldown_seconds):
743
- return False
744
-
745
- return True
746
-
747
-
748
- def get_model_by_autoupdate(model_id: Optional[str] = None, cooldown_seconds: int = 300) -> Optional[str]:
749
- """
750
- 检查提供的model_id是否可用,如果不可用则返回成功率最高的模型
751
-
752
- Args:
753
- model_id: 指定的模型ID,可选参数
754
- cooldown_seconds: 失败冷却时间(秒),默认300秒
755
-
756
- Returns:
757
- str | None: 返回可用的模型ID,如果没有可用模型则返回None
758
-
759
- Note:
760
- - 当MODEL_STATS为空时会自动调用get_models()更新数据
761
- - 如果指定的model_id可用,则直接返回
762
- - 如果指定的model_id不可用,则返回成功率最高的模型
763
- """
764
- global MODEL_STATS
765
-
766
- # 如果MODEL_STATS为空,加载模型数据
767
- if not MODEL_STATS:
768
- get_models()
769
-
770
- # 如果提供了model_id且可用,直接返回
771
- if model_id and is_model_available(model_id, cooldown_seconds):
772
- return model_id
773
-
774
- # 否则返回成功率最高的可用模型
775
- return get_auto_model(cooldown_seconds=cooldown_seconds)
776
-
777
-
778
- def is_chatgpt_format(data):
779
- """Check if the data is in the expected ChatGPT format"""
780
- try:
781
- # If the data is a string, try to parse it as JSON
782
- if isinstance(data, str):
783
- try:
784
- data = json.loads(data)
785
- except json.JSONDecodeError:
786
- return False # If the string can't be parsed, it's not in the expected format
787
-
788
- # Now check if data is a dictionary and contains the necessary structure
789
- if isinstance(data, dict):
790
- # Ensure 'choices' is a list and the first item has a 'message' field
791
- if "choices" in data and isinstance(data["choices"], list) and len(data["choices"]) > 0:
792
- if "message" in data["choices"][0]:
793
- return True
794
- except Exception as e:
795
- print(f"Error checking ChatGPT format: {e}")
796
-
797
- return False
798
-
799
-
800
- def chat_completion_message(
801
- user_prompt,
802
- user_id: str = None,
803
- session_id: str = None,
804
- system_prompt="You are a helpful assistant.",
805
- model=base_model,
806
- project="DecentralGPT", stream=False,
807
- temperature=0.3, max_tokens=1024, top_p=0.5,
808
- frequency_penalty=0, presence_penalty=0):
809
- """未来会增加回话隔离: 单人对话,单次会话"""
810
- messages = [
811
- {"role": "system", "content": system_prompt},
812
- {"role": "user", "content": user_prompt}
813
- ]
814
- return chat_completion_messages(messages, user_id, session_id, model, project, stream, temperature, max_tokens,
815
- top_p, frequency_penalty,
816
- presence_penalty)
817
-
818
-
819
- def chat_completion_messages(
820
- messages,
821
- model=base_model,
822
- user_id: str = None,
823
- session_id: str = None,
824
- project="DecentralGPT", stream=False, temperature=0.3, max_tokens=1024, top_p=0.5,
825
- frequency_penalty=0, presence_penalty=0):
826
- # 确保model有效
827
- if not model or model == "auto":
828
- model = get_auto_model()
829
- else:
830
- model = get_model_by_autoupdate(model)
831
- if debug:
832
- print(f"校准后的model: {model}")
833
- headers = {
834
- 'sec-ch-ua-platform': '"macOS"',
835
- 'Referer': 'https://www.degpt.ai/',
836
- 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36',
837
- 'sec-ch-ua': 'Google Chrome";v="131", "Chromium";v="131", "Not_A Brand";v="24"',
838
- 'DNT': '1',
839
- 'Content-Type': 'application/json',
840
- 'sec-ch-ua-mobile': '?0'
841
- }
842
- payload = {
843
- # make sure ok
844
- "model": model,
845
- "messages": messages,
846
- "project": project,
847
- "stream": stream,
848
- "temperature": temperature,
849
- "max_tokens": max_tokens,
850
- "top_p": top_p,
851
- "frequency_penalty": frequency_penalty,
852
- "presence_penalty": presence_penalty
853
-
854
- }
855
- # print(json.dumps(headers, indent=4))
856
- # print(json.dumps(payload, indent=4))
857
- return chat_completion(headers, payload)
858
-
859
-
860
- def chat_completion(headers, payload):
861
- """处理用户请求并保留上下文"""
862
- try:
863
- url = f'{base_url}/v0/chat/completion/proxy'
864
- response = requests.post(url, headers=headers, json=payload)
865
- response.encoding = 'utf-8'
866
- response.raise_for_status()
867
- return response.json()
868
- except requests.exceptions.RequestException as e:
869
- print(f"请求失败: {e}")
870
- return "请求失败,请检查网络或参数配置。"
871
- except (KeyError, IndexError) as e:
872
- print(f"解析响应时出错: {e}")
873
- return "解析响应内容失败。"
874
- return {}
875
-
876
- # if __name__ == '__main__':
877
- # get_from_js_v3()
878
- # print("get_models: ",get_models())
879
- # print("cached_models:",cached_models)
880
- # print("base_url: ",base_url)
881
- # print("MODEL_STATS:",MODEL_STATS)