simonou99 commited on
Commit
0eb35a1
·
verified ·
1 Parent(s): 6c27626

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -0
app.py CHANGED
@@ -92,6 +92,20 @@ def gradio_fn(message, history, api_key):
92
  return expert_response
93
 
94
  with gr.Blocks() as demo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
  api_key = gr.Textbox()
96
  main_interface = gr.ChatInterface(fn=gradio_fn, additional_inputs=api_key)
97
 
 
92
  return expert_response
93
 
94
  with gr.Blocks() as demo:
95
+ read_me = gr.Markdown(
96
+ """
97
+ # Simple Dual LLM Chatbot
98
+
99
+ This is a playground for testing out Standford's 'Meta-Prompting' logic ([paper link](https://arxiv.org/abs/2401.12954)), in whcih for every user request, it first passes the request to a 'meta' bot, the 'meta' bot will then generate a system prompt of a field-related 'Expert' bot for answering user's request.
100
+ That is, for each round, the LLM should accordingly assigns the best expert for answering user's specific request.
101
+ Standford claimed that this simple implementation result in a 60%+ better accuracy compared to a standard 'syst_prompt + chat_history' logic.
102
+ Hence, one can't be too curious in checking it out, here is a simple implemnetaion for everybody to play around.
103
+
104
+ Something to keep in mind:
105
+ 1. Currently it requires an api key from chatglm (get one here if you don't have one: [link](https://open.bigmodel.cn/usercenter/apikeys))
106
+ 2. To balance contextual-understanding and token-saving, the meta bot's logic is modified to have access to only the last round of chat and the current user request when 'generating' an expert.
107
+ """
108
+ )
109
  api_key = gr.Textbox()
110
  main_interface = gr.ChatInterface(fn=gradio_fn, additional_inputs=api_key)
111