Spaces:
Sleeping
Sleeping
Commit
·
4f88aaf
1
Parent(s):
f60be08
Update app.py
Browse files
app.py
CHANGED
@@ -109,17 +109,18 @@ def submit_message(prompt, prompt_template, temperature, max_tokens, context_len
|
|
109 |
#completion = completion.run(query)
|
110 |
# from https://blog.devgenius.io/chat-with-document-s-using-openai-chatgpt-api-and-text-embedding-6a0ce3dc8bc8
|
111 |
|
112 |
-
completion =
|
113 |
query = str(system_prompt + history[-context_length*2:] + [prompt_msg])
|
114 |
-
completion = completion({"
|
115 |
-
|
116 |
-
|
117 |
|
118 |
|
|
|
|
|
119 |
|
120 |
history.append(prompt_msg)
|
121 |
#history.append(completion.choices[0].message.to_dict())
|
122 |
-
history.append(completion[
|
123 |
|
124 |
state['total_tokens'] += completion['usage']['total_tokens']
|
125 |
|
|
|
109 |
#completion = completion.run(query)
|
110 |
# from https://blog.devgenius.io/chat-with-document-s-using-openai-chatgpt-api-and-text-embedding-6a0ce3dc8bc8
|
111 |
|
112 |
+
completion = VectorDBQA.from_chain_type(llm=ChatOpenAI(temperature=temperature, max_tokens=max_tokens, model_name="gpt-3.5-turbo"), chain_type="stuff", vectorstore=vectordb, return_source_documents=True)
|
113 |
query = str(system_prompt + history[-context_length*2:] + [prompt_msg])
|
114 |
+
completion = completion({"query": query})
|
115 |
+
# completion = completion({"question": query, "chat_history": history[-context_length*2:]})
|
|
|
116 |
|
117 |
|
118 |
+
# VectorDBQA.from_chain_type(llm=OpenAI(), chain_type="stuff", vectorstore=docsearch, return_source_documents=True)
|
119 |
+
# https://colab.research.google.com/drive/1dzdNDZyofRB0f2KIB4gHXmIza7ehMX30?usp=sharing#scrollTo=b-ejDn_JfpWW
|
120 |
|
121 |
history.append(prompt_msg)
|
122 |
#history.append(completion.choices[0].message.to_dict())
|
123 |
+
history.append(completion['result'])
|
124 |
|
125 |
state['total_tokens'] += completion['usage']['total_tokens']
|
126 |
|