simonou99 commited on
Commit
3dae9df
·
1 Parent(s): ad5fc6e
Files changed (1) hide show
  1. apa.py +97 -0
apa.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from zhipuai import ZhipuAI
3
+ import json
4
+ import os
5
+
6
+ api_key = os.environ['ZHIPUAI_API_KEY']
7
+
8
+ def convert_to_openai_format(nested_chat):
9
+ openai_format = []
10
+ for dialogue in nested_chat:
11
+ user_dialogue = {"role": "user", "content": dialogue[0]}
12
+ assistant_dialogue = {"role": "assistant", "content": dialogue[1]}
13
+ openai_format.extend([user_dialogue, assistant_dialogue])
14
+ return openai_format
15
+
16
+ def master_llm(user_prompt, history):
17
+ # 生成针对专家LLM的系统提示
18
+ # 示例: 根据用户提问生成一个简单的系统提示
19
+ if history != []:
20
+ last_round = history[-1]
21
+ last_record_text = f"'''\n# 用户:\n{last_round[0]}\n\n\n# AI:\n{last_round[1]}\n\n\n# 用户:\n{user_prompt}\n'''"
22
+ else:
23
+ last_record_text = f"'''\n# 用户:\n{user_prompt}\n'''"
24
+ syst_prompt = """根据用户与AI的对话或提问,判断未来对话需要什么领域专家,并写出对应领域的AI专家的system prompt。
25
+
26
+ 以以下JSON型式返回,请严格遵守`{}`与`""`的闭合(注意,所有参数都是string):
27
+
28
+ ```
29
+ {
30
+ "expert_system_prompt":"你是一个...AI,你有着...的经验...,你的思维...。现在,你的任务是...",
31
+ "temperature":"0.01",
32
+ "top_p":"1"
33
+ }
34
+ ```
35
+
36
+ 参数解释:
37
+ temperature为AI回复时的随机程度,值越小意味着回答逻辑越发散。取值为(0,1),但不能等于0或1。
38
+ top_p为AI会考虑的候选采样范围,比如0.1指只会选择前10%推荐的候选token。取值为(0,1),但不能等于0或1。
39
+ 行业内,一般来说如果需要创意类型的AI,就会让这两个参数的值高一些,如果需要严格服从型的AI,则需要temperature尽量低一点。
40
+
41
+ 注意,请不要刻意生成专家,如果无法判断需要什么领域的专家(比如无上下文),则直接回复此默认设定:
42
+ ```
43
+ {{
44
+ "expert_system_prompt":"根据用户的提问与需求(如有上下文,请根据上下文),返回合适的回应。",
45
+ "temperature":"0.5",
46
+ "top_p":"0.5"
47
+ }}
48
+ ```
49
+ """
50
+ messages = [
51
+ {"role":"system","content":syst_prompt},
52
+ {"role":"user","content":last_record_text}
53
+ ]
54
+
55
+ client = ZhipuAI(api_key=api_key)
56
+ response = client.chat.completions.create(
57
+ model = "glm-4",
58
+ messages = messages,
59
+ temperature = 0.01,
60
+ top_p = 0.01,
61
+ do_sample = True
62
+ )
63
+
64
+ response_text = response.choices[0].message.content
65
+ response_json = json.loads(response_text[response_text.find('{'):response_text.rfind('}')+1])
66
+ expert_system_prompt = response_json['expert_system_prompt']
67
+ temperature = response_json['temperature']
68
+ top_p = response_json['top_p']
69
+ print(response_text)
70
+
71
+ return expert_system_prompt, temperature, top_p
72
+
73
+ def expert_llm(user_prompt, history, expert_system_prompt, temperature, top_p):
74
+ client = ZhipuAI(api_key=api_key)
75
+ if history != []:
76
+ prompt_records = convert_to_openai_format(history)
77
+ messages = [{"role":"system","content":expert_system_prompt}] + prompt_records + [{"role":"user","content":user_prompt}]
78
+ else:
79
+ messages = [{"role":"system","content":expert_system_prompt},{"role":"user","content":user_prompt}]
80
+ response = client.chat.completions.create(
81
+ model = "glm-4",
82
+ messages = messages,
83
+ temperature = float(temperature),
84
+ top_p = float(top_p),
85
+ do_sample = True
86
+ )
87
+ return response.choices[0].message.content
88
+
89
+ def gradio_fn(message, history):
90
+ expert_system_prompt, temperature, top_p = master_llm(message, history)
91
+ expert_response = expert_llm(message, history, expert_system_prompt, temperature, top_p)
92
+ return expert_response
93
+
94
+ demo = gr.ChatInterface(fn=gradio_fn)
95
+
96
+ if __name__ == "__main__":
97
+ demo.launch()