Spaces:
Running
Running
Upload 4 files
Browse files- README.md +20 -0
- app.py +201 -32
- requirements.txt +0 -0
README.md
CHANGED
@@ -13,10 +13,30 @@ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-
|
|
13 |
|
14 |
# Abacus Chat Proxy
|
15 |
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
一个用于中转API请求的代理服务器。
|
17 |
|
|
|
|
|
18 |
## 🚀 快速开始
|
19 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
### 本地运行
|
21 |
|
22 |
#### Windows用户
|
|
|
13 |
|
14 |
# Abacus Chat Proxy
|
15 |
|
16 |
+
> 📢 本项目基于 [orbitoo/abacus_chat_proxy](https://github.com/orbitoo/abacus_chat_proxy) 改进
|
17 |
+
>
|
18 |
+
> 特别感谢 orbitoo 大佬提供的原始项目!
|
19 |
+
>
|
20 |
+
> 本项目增加了:Docker部署支持、Hugging Face一键部署、自动保活功能等
|
21 |
+
|
22 |
一个用于中转API请求的代理服务器。
|
23 |
|
24 |
+
[](https://huggingface.co/spaces/malt666/abacus_chat_proxy?duplicate=true)
|
25 |
+
|
26 |
## 🚀 快速开始
|
27 |
|
28 |
+
### Hugging Face一键部署
|
29 |
+
|
30 |
+
1. 点击上方的"Deploy to Hugging Face Spaces"按钮
|
31 |
+
2. 登录你的Hugging Face账号(如果还没有,需要注册一个)
|
32 |
+
3. 在弹出的页面中设置你的Space名称
|
33 |
+
4. 创建完Space后,在Space的Settings -> Repository Secrets中添加以下配置:
|
34 |
+
- `covid_1`: 你的会话ID
|
35 |
+
- `cookie_1`: 你的cookies字符串
|
36 |
+
- `password`: (可选)访问密码
|
37 |
+
5. 等待自动部署完成即可
|
38 |
+
6. **获取API链接**:部署成功后,点击右上角的三个点按钮,在弹出的选项卡里面点击"Embed this Space",然后在弹出的"Embed this Space"界面里的"Direct URL"就是你的访问链接,你可以用这个链接调用API
|
39 |
+
|
40 |
### 本地运行
|
41 |
|
42 |
#### Windows用户
|
app.py
CHANGED
@@ -12,6 +12,7 @@ import jwt
|
|
12 |
import os
|
13 |
import threading
|
14 |
from datetime import datetime
|
|
|
15 |
|
16 |
app = Flask(__name__)
|
17 |
|
@@ -42,6 +43,15 @@ SENTRY_TRACE = f"{TRACE_ID}-80d9d2538b2682d0"
|
|
42 |
health_check_counter = 0
|
43 |
|
44 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
# HTML模板
|
46 |
INDEX_HTML = """
|
47 |
<!DOCTYPE html>
|
@@ -81,6 +91,11 @@ INDEX_HTML = """
|
|
81 |
text-align: center;
|
82 |
font-size: 2.5rem;
|
83 |
}
|
|
|
|
|
|
|
|
|
|
|
84 |
.status-card {
|
85 |
background: #f8f9fa;
|
86 |
border-radius: 8px;
|
@@ -142,6 +157,32 @@ INDEX_HTML = """
|
|
142 |
padding: 0.25rem 0.5rem;
|
143 |
border-radius: 4px;
|
144 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
145 |
@media (max-width: 768px) {
|
146 |
.container {
|
147 |
padding: 1rem;
|
@@ -183,8 +224,47 @@ INDEX_HTML = """
|
|
183 |
</div>
|
184 |
</div>
|
185 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
186 |
<div class="endpoints">
|
187 |
-
<h2
|
188 |
<div class="endpoint-item">
|
189 |
<p>获取模型列表:</p>
|
190 |
<code class="endpoint-url">GET /v1/models</code>
|
@@ -541,6 +621,10 @@ def send_message(message, model, think=False):
|
|
541 |
(session, cookies, session_token, conversation_id, model_map) = get_user_data()
|
542 |
trace_id, sentry_trace = generate_trace_id()
|
543 |
|
|
|
|
|
|
|
|
|
544 |
headers = {
|
545 |
"accept": "text/event-stream",
|
546 |
"accept-language": "zh-CN,zh;q=0.9",
|
@@ -617,28 +701,37 @@ def send_message(message, model, think=False):
|
|
617 |
elif think_state == 2:
|
618 |
id = data.get("messageId")
|
619 |
segment = "<think>\n" + data.get("segment", "")
|
|
|
620 |
yield f"data: {json.dumps({'object': 'chat.completion.chunk', 'choices': [{'delta': {'content': segment}}]})}\n\n"
|
621 |
think_state = 1
|
622 |
elif think_state == 1:
|
623 |
if data.get("messageId") != id:
|
624 |
segment = data.get("segment", "")
|
|
|
625 |
yield f"data: {json.dumps({'object': 'chat.completion.chunk', 'choices': [{'delta': {'content': segment}}]})}\n\n"
|
626 |
else:
|
627 |
segment = "\n</think>\n" + data.get("segment", "")
|
|
|
628 |
yield f"data: {json.dumps({'object': 'chat.completion.chunk', 'choices': [{'delta': {'content': segment}}]})}\n\n"
|
629 |
think_state = 0
|
630 |
else:
|
631 |
segment = data.get("segment", "")
|
|
|
632 |
yield f"data: {json.dumps({'object': 'chat.completion.chunk', 'choices': [{'delta': {'content': segment}}]})}\n\n"
|
633 |
else:
|
634 |
segment = extract_segment(decoded_line)
|
635 |
if segment:
|
|
|
636 |
yield f"data: {json.dumps({'object': 'chat.completion.chunk', 'choices': [{'delta': {'content': segment}}]})}\n\n"
|
637 |
except Exception as e:
|
638 |
print(f"处理响应出错: {e}")
|
639 |
|
640 |
yield "data: " + json.dumps({"object": "chat.completion.chunk", "choices": [{"delta": {}, "finish_reason": "stop"}]}) + "\n\n"
|
641 |
yield "data: [DONE]\n\n"
|
|
|
|
|
|
|
|
|
642 |
|
643 |
return Response(generate(), mimetype="text/event-stream")
|
644 |
except requests.exceptions.RequestException as e:
|
@@ -655,6 +748,9 @@ def send_message_non_stream(message, model, think=False):
|
|
655 |
(session, cookies, session_token, conversation_id, model_map) = get_user_data()
|
656 |
trace_id, sentry_trace = generate_trace_id()
|
657 |
|
|
|
|
|
|
|
658 |
headers = {
|
659 |
"accept": "text/event-stream",
|
660 |
"accept-language": "zh-CN,zh;q=0.9",
|
@@ -718,6 +814,9 @@ def send_message_non_stream(message, model, think=False):
|
|
718 |
if think:
|
719 |
id = ""
|
720 |
think_state = 2
|
|
|
|
|
|
|
721 |
for line in response.iter_lines():
|
722 |
if line:
|
723 |
decoded_line = line.decode("utf-8")
|
@@ -727,51 +826,87 @@ def send_message_non_stream(message, model, think=False):
|
|
727 |
continue
|
728 |
elif think_state == 2:
|
729 |
id = data.get("messageId")
|
730 |
-
segment =
|
731 |
-
|
732 |
think_state = 1
|
733 |
elif think_state == 1:
|
734 |
if data.get("messageId") != id:
|
735 |
segment = data.get("segment", "")
|
736 |
-
|
737 |
else:
|
738 |
-
segment =
|
739 |
-
|
740 |
think_state = 0
|
741 |
else:
|
742 |
segment = data.get("segment", "")
|
743 |
-
|
744 |
-
except
|
745 |
-
print(f"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
746 |
else:
|
747 |
for line in response.iter_lines():
|
748 |
if line:
|
749 |
decoded_line = line.decode("utf-8")
|
750 |
-
|
751 |
-
|
752 |
-
|
753 |
-
|
754 |
-
|
755 |
-
|
756 |
-
|
757 |
-
|
758 |
-
|
759 |
-
|
760 |
-
|
761 |
-
|
762 |
-
|
763 |
-
|
|
|
|
|
764 |
"index": 0,
|
765 |
-
"message": {
|
766 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
767 |
}
|
768 |
-
|
769 |
-
|
770 |
-
return jsonify(openai_response)
|
771 |
-
except Exception as e:
|
772 |
error_details = str(e)
|
773 |
-
if
|
774 |
-
|
|
|
775 |
print(f"发送消息失败: {error_details}")
|
776 |
return jsonify({"error": f"Failed to send message: {error_details}"}), 500
|
777 |
|
@@ -868,10 +1003,44 @@ def index():
|
|
868 |
health_checks=health_check_counter,
|
869 |
user_count=USER_NUM,
|
870 |
models=sorted(list(MODELS)),
|
871 |
-
year=datetime.now().year
|
|
|
|
|
872 |
)
|
873 |
|
874 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
875 |
if __name__ == "__main__":
|
876 |
# 启动保活线程
|
877 |
threading.Thread(target=keep_alive, daemon=True).start()
|
|
|
12 |
import os
|
13 |
import threading
|
14 |
from datetime import datetime
|
15 |
+
import tiktoken # 导入tiktoken来计算token数量
|
16 |
|
17 |
app = Flask(__name__)
|
18 |
|
|
|
43 |
health_check_counter = 0
|
44 |
|
45 |
|
46 |
+
# 添加统计变量
|
47 |
+
model_usage_stats = {} # 模型使用次数统计
|
48 |
+
total_tokens = {
|
49 |
+
"prompt": 0, # 输入token统计
|
50 |
+
"completion": 0, # 输出token统计
|
51 |
+
"total": 0 # 总token统计
|
52 |
+
}
|
53 |
+
|
54 |
+
|
55 |
# HTML模板
|
56 |
INDEX_HTML = """
|
57 |
<!DOCTYPE html>
|
|
|
91 |
text-align: center;
|
92 |
font-size: 2.5rem;
|
93 |
}
|
94 |
+
h2 {
|
95 |
+
color: #3a4a5c;
|
96 |
+
margin: 1.5rem 0 1rem;
|
97 |
+
font-size: 1.5rem;
|
98 |
+
}
|
99 |
.status-card {
|
100 |
background: #f8f9fa;
|
101 |
border-radius: 8px;
|
|
|
157 |
padding: 0.25rem 0.5rem;
|
158 |
border-radius: 4px;
|
159 |
}
|
160 |
+
.usage-table {
|
161 |
+
width: 100%;
|
162 |
+
border-collapse: collapse;
|
163 |
+
margin-top: 1rem;
|
164 |
+
}
|
165 |
+
.usage-table th, .usage-table td {
|
166 |
+
padding: 0.5rem;
|
167 |
+
text-align: left;
|
168 |
+
border-bottom: 1px solid #dee2e6;
|
169 |
+
}
|
170 |
+
.usage-table th {
|
171 |
+
background-color: #e9ecef;
|
172 |
+
font-weight: 600;
|
173 |
+
color: #495057;
|
174 |
+
}
|
175 |
+
.usage-table tbody tr:hover {
|
176 |
+
background-color: #f1f3f5;
|
177 |
+
}
|
178 |
+
.token-count {
|
179 |
+
font-family: monospace;
|
180 |
+
color: #0366d6;
|
181 |
+
}
|
182 |
+
.call-count {
|
183 |
+
font-family: monospace;
|
184 |
+
color: #28a745;
|
185 |
+
}
|
186 |
@media (max-width: 768px) {
|
187 |
.container {
|
188 |
padding: 1rem;
|
|
|
224 |
</div>
|
225 |
</div>
|
226 |
|
227 |
+
<h2>🔍 模型使用统计</h2>
|
228 |
+
<div class="status-card">
|
229 |
+
<div class="status-item">
|
230 |
+
<span class="status-label">总Token使用量</span>
|
231 |
+
<span class="status-value token-count">{{ total_tokens.total|int }}</span>
|
232 |
+
</div>
|
233 |
+
<div class="status-item">
|
234 |
+
<span class="status-label">输入Token</span>
|
235 |
+
<span class="status-value token-count">{{ total_tokens.prompt|int }}</span>
|
236 |
+
</div>
|
237 |
+
<div class="status-item">
|
238 |
+
<span class="status-label">输出Token</span>
|
239 |
+
<span class="status-value token-count">{{ total_tokens.completion|int }}</span>
|
240 |
+
</div>
|
241 |
+
|
242 |
+
<table class="usage-table">
|
243 |
+
<thead>
|
244 |
+
<tr>
|
245 |
+
<th>模型</th>
|
246 |
+
<th>调用次数</th>
|
247 |
+
<th>输入Token</th>
|
248 |
+
<th>输出Token</th>
|
249 |
+
<th>总Token</th>
|
250 |
+
</tr>
|
251 |
+
</thead>
|
252 |
+
<tbody>
|
253 |
+
{% for model, stats in model_stats.items() %}
|
254 |
+
<tr>
|
255 |
+
<td>{{ model }}</td>
|
256 |
+
<td class="call-count">{{ stats.count }}</td>
|
257 |
+
<td class="token-count">{{ stats.prompt_tokens|int }}</td>
|
258 |
+
<td class="token-count">{{ stats.completion_tokens|int }}</td>
|
259 |
+
<td class="token-count">{{ stats.total_tokens|int }}</td>
|
260 |
+
</tr>
|
261 |
+
{% endfor %}
|
262 |
+
</tbody>
|
263 |
+
</table>
|
264 |
+
</div>
|
265 |
+
|
266 |
<div class="endpoints">
|
267 |
+
<h2>📡 API端点</h2>
|
268 |
<div class="endpoint-item">
|
269 |
<p>获取模型列表:</p>
|
270 |
<code class="endpoint-url">GET /v1/models</code>
|
|
|
621 |
(session, cookies, session_token, conversation_id, model_map) = get_user_data()
|
622 |
trace_id, sentry_trace = generate_trace_id()
|
623 |
|
624 |
+
# 计算输入token
|
625 |
+
prompt_tokens = num_tokens_from_string(message)
|
626 |
+
completion_buffer = io.StringIO() # 收集所有输出用于计算token
|
627 |
+
|
628 |
headers = {
|
629 |
"accept": "text/event-stream",
|
630 |
"accept-language": "zh-CN,zh;q=0.9",
|
|
|
701 |
elif think_state == 2:
|
702 |
id = data.get("messageId")
|
703 |
segment = "<think>\n" + data.get("segment", "")
|
704 |
+
completion_buffer.write(segment) # 收集输出
|
705 |
yield f"data: {json.dumps({'object': 'chat.completion.chunk', 'choices': [{'delta': {'content': segment}}]})}\n\n"
|
706 |
think_state = 1
|
707 |
elif think_state == 1:
|
708 |
if data.get("messageId") != id:
|
709 |
segment = data.get("segment", "")
|
710 |
+
completion_buffer.write(segment) # 收集输出
|
711 |
yield f"data: {json.dumps({'object': 'chat.completion.chunk', 'choices': [{'delta': {'content': segment}}]})}\n\n"
|
712 |
else:
|
713 |
segment = "\n</think>\n" + data.get("segment", "")
|
714 |
+
completion_buffer.write(segment) # 收集输出
|
715 |
yield f"data: {json.dumps({'object': 'chat.completion.chunk', 'choices': [{'delta': {'content': segment}}]})}\n\n"
|
716 |
think_state = 0
|
717 |
else:
|
718 |
segment = data.get("segment", "")
|
719 |
+
completion_buffer.write(segment) # 收集输出
|
720 |
yield f"data: {json.dumps({'object': 'chat.completion.chunk', 'choices': [{'delta': {'content': segment}}]})}\n\n"
|
721 |
else:
|
722 |
segment = extract_segment(decoded_line)
|
723 |
if segment:
|
724 |
+
completion_buffer.write(segment) # 收集输出
|
725 |
yield f"data: {json.dumps({'object': 'chat.completion.chunk', 'choices': [{'delta': {'content': segment}}]})}\n\n"
|
726 |
except Exception as e:
|
727 |
print(f"处理响应出错: {e}")
|
728 |
|
729 |
yield "data: " + json.dumps({"object": "chat.completion.chunk", "choices": [{"delta": {}, "finish_reason": "stop"}]}) + "\n\n"
|
730 |
yield "data: [DONE]\n\n"
|
731 |
+
|
732 |
+
# 在流式传输完成后计算token并更新统计
|
733 |
+
completion_tokens = num_tokens_from_string(completion_buffer.getvalue())
|
734 |
+
update_model_stats(model, prompt_tokens, completion_tokens)
|
735 |
|
736 |
return Response(generate(), mimetype="text/event-stream")
|
737 |
except requests.exceptions.RequestException as e:
|
|
|
748 |
(session, cookies, session_token, conversation_id, model_map) = get_user_data()
|
749 |
trace_id, sentry_trace = generate_trace_id()
|
750 |
|
751 |
+
# 计算输入token
|
752 |
+
prompt_tokens = num_tokens_from_string(message)
|
753 |
+
|
754 |
headers = {
|
755 |
"accept": "text/event-stream",
|
756 |
"accept-language": "zh-CN,zh;q=0.9",
|
|
|
814 |
if think:
|
815 |
id = ""
|
816 |
think_state = 2
|
817 |
+
think_buffer = io.StringIO()
|
818 |
+
content_buffer = io.StringIO()
|
819 |
+
|
820 |
for line in response.iter_lines():
|
821 |
if line:
|
822 |
decoded_line = line.decode("utf-8")
|
|
|
826 |
continue
|
827 |
elif think_state == 2:
|
828 |
id = data.get("messageId")
|
829 |
+
segment = data.get("segment", "")
|
830 |
+
think_buffer.write(segment)
|
831 |
think_state = 1
|
832 |
elif think_state == 1:
|
833 |
if data.get("messageId") != id:
|
834 |
segment = data.get("segment", "")
|
835 |
+
content_buffer.write(segment)
|
836 |
else:
|
837 |
+
segment = data.get("segment", "")
|
838 |
+
think_buffer.write(segment)
|
839 |
think_state = 0
|
840 |
else:
|
841 |
segment = data.get("segment", "")
|
842 |
+
content_buffer.write(segment)
|
843 |
+
except Exception as e:
|
844 |
+
print(f"处理响应出错: {e}")
|
845 |
+
|
846 |
+
think_content = think_buffer.getvalue()
|
847 |
+
response_content = content_buffer.getvalue()
|
848 |
+
|
849 |
+
# 计算输出token并更新统计信息
|
850 |
+
completion_tokens = num_tokens_from_string(think_content + response_content)
|
851 |
+
update_model_stats(model, prompt_tokens, completion_tokens)
|
852 |
+
|
853 |
+
return jsonify({
|
854 |
+
"id": f"chatcmpl-{str(uuid.uuid4())}",
|
855 |
+
"object": "chat.completion",
|
856 |
+
"created": int(time.time()),
|
857 |
+
"model": model,
|
858 |
+
"choices": [{
|
859 |
+
"index": 0,
|
860 |
+
"message": {
|
861 |
+
"role": "assistant",
|
862 |
+
"content": f"<think>\n{think_content}\n</think>\n{response_content}"
|
863 |
+
},
|
864 |
+
"finish_reason": "stop"
|
865 |
+
}],
|
866 |
+
"usage": {
|
867 |
+
"prompt_tokens": prompt_tokens,
|
868 |
+
"completion_tokens": completion_tokens,
|
869 |
+
"total_tokens": prompt_tokens + completion_tokens
|
870 |
+
}
|
871 |
+
})
|
872 |
else:
|
873 |
for line in response.iter_lines():
|
874 |
if line:
|
875 |
decoded_line = line.decode("utf-8")
|
876 |
+
segment = extract_segment(decoded_line)
|
877 |
+
if segment:
|
878 |
+
buffer.write(segment)
|
879 |
+
|
880 |
+
response_content = buffer.getvalue()
|
881 |
+
|
882 |
+
# 计算输出token并更新统计信息
|
883 |
+
completion_tokens = num_tokens_from_string(response_content)
|
884 |
+
update_model_stats(model, prompt_tokens, completion_tokens)
|
885 |
+
|
886 |
+
return jsonify({
|
887 |
+
"id": f"chatcmpl-{str(uuid.uuid4())}",
|
888 |
+
"object": "chat.completion",
|
889 |
+
"created": int(time.time()),
|
890 |
+
"model": model,
|
891 |
+
"choices": [{
|
892 |
"index": 0,
|
893 |
+
"message": {
|
894 |
+
"role": "assistant",
|
895 |
+
"content": response_content
|
896 |
+
},
|
897 |
+
"finish_reason": "stop"
|
898 |
+
}],
|
899 |
+
"usage": {
|
900 |
+
"prompt_tokens": prompt_tokens,
|
901 |
+
"completion_tokens": completion_tokens,
|
902 |
+
"total_tokens": prompt_tokens + completion_tokens
|
903 |
}
|
904 |
+
})
|
905 |
+
except requests.exceptions.RequestException as e:
|
|
|
|
|
906 |
error_details = str(e)
|
907 |
+
if hasattr(e, 'response') and e.response is not None:
|
908 |
+
if hasattr(e.response, 'text'):
|
909 |
+
error_details += f" - Response: {e.response.text[:200]}"
|
910 |
print(f"发送消息失败: {error_details}")
|
911 |
return jsonify({"error": f"Failed to send message: {error_details}"}), 500
|
912 |
|
|
|
1003 |
health_checks=health_check_counter,
|
1004 |
user_count=USER_NUM,
|
1005 |
models=sorted(list(MODELS)),
|
1006 |
+
year=datetime.now().year,
|
1007 |
+
model_stats=model_usage_stats,
|
1008 |
+
total_tokens=total_tokens
|
1009 |
)
|
1010 |
|
1011 |
|
1012 |
+
# 获取OpenAI的tokenizer来计算token数
|
1013 |
+
def num_tokens_from_string(string, model="gpt-3.5-turbo"):
|
1014 |
+
"""计算文本的token数量"""
|
1015 |
+
try:
|
1016 |
+
encoding = tiktoken.encoding_for_model(model)
|
1017 |
+
num_tokens = len(encoding.encode(string))
|
1018 |
+
return num_tokens
|
1019 |
+
except:
|
1020 |
+
# 如果tiktoken不支持模型或者出错,使用简单的估算
|
1021 |
+
return len(string) // 4 # 粗略估计每个token约4个字符
|
1022 |
+
|
1023 |
+
# 更新模型使用统计
|
1024 |
+
def update_model_stats(model, prompt_tokens, completion_tokens):
|
1025 |
+
global model_usage_stats, total_tokens
|
1026 |
+
if model not in model_usage_stats:
|
1027 |
+
model_usage_stats[model] = {
|
1028 |
+
"count": 0,
|
1029 |
+
"prompt_tokens": 0,
|
1030 |
+
"completion_tokens": 0,
|
1031 |
+
"total_tokens": 0
|
1032 |
+
}
|
1033 |
+
|
1034 |
+
model_usage_stats[model]["count"] += 1
|
1035 |
+
model_usage_stats[model]["prompt_tokens"] += prompt_tokens
|
1036 |
+
model_usage_stats[model]["completion_tokens"] += completion_tokens
|
1037 |
+
model_usage_stats[model]["total_tokens"] += (prompt_tokens + completion_tokens)
|
1038 |
+
|
1039 |
+
total_tokens["prompt"] += prompt_tokens
|
1040 |
+
total_tokens["completion"] += completion_tokens
|
1041 |
+
total_tokens["total"] += (prompt_tokens + completion_tokens)
|
1042 |
+
|
1043 |
+
|
1044 |
if __name__ == "__main__":
|
1045 |
# 启动保活线程
|
1046 |
threading.Thread(target=keep_alive, daemon=True).start()
|
requirements.txt
CHANGED
Binary files a/requirements.txt and b/requirements.txt differ
|
|