Spaces:
Sleeping
Sleeping
Commit
·
f5d4c8d
1
Parent(s):
7298dcb
Update app.py
Browse files
app.py
CHANGED
@@ -103,11 +103,19 @@ def submit_message(prompt, prompt_template, temperature, max_tokens, context_len
|
|
103 |
|
104 |
# completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=system_prompt + history[-context_length*2:] + [prompt_msg], temperature=temperature, max_tokens=max_tokens)
|
105 |
|
106 |
-
completion_chain = load_qa_chain(ChatOpenAI(temperature=temperature, max_tokens=max_tokens, model_name="gpt-3.5-turbo"), chain_type="stuff" )
|
107 |
-
completion = RetrievalQA(combine_documents_chain=completion_chain, retriever=vectordb.as_retriever())
|
108 |
-
query = str(system_prompt + history[-context_length*2:] + [prompt_msg])
|
109 |
-
completion = completion.run(query)
|
110 |
# from https://blog.devgenius.io/chat-with-document-s-using-openai-chatgpt-api-and-text-embedding-6a0ce3dc8bc8
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
111 |
|
112 |
history.append(prompt_msg)
|
113 |
history.append(completion.choices[0].message.to_dict())
|
|
|
103 |
|
104 |
# completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=system_prompt + history[-context_length*2:] + [prompt_msg], temperature=temperature, max_tokens=max_tokens)
|
105 |
|
106 |
+
#completion_chain = load_qa_chain(ChatOpenAI(temperature=temperature, max_tokens=max_tokens, model_name="gpt-3.5-turbo"), chain_type="stuff" )
|
107 |
+
#completion = RetrievalQA(combine_documents_chain=completion_chain, retriever=vectordb.as_retriever())
|
108 |
+
#query = str(system_prompt + history[-context_length*2:] + [prompt_msg])
|
109 |
+
#completion = completion.run(query)
|
110 |
# from https://blog.devgenius.io/chat-with-document-s-using-openai-chatgpt-api-and-text-embedding-6a0ce3dc8bc8
|
111 |
+
|
112 |
+
completion = ChatVectorDBChain.from_llm(ChatOpenAI(temperature=temperature, max_tokens=max_tokens, model_name="gpt-3.5-turbo"), vectordb, return_source_documents=True)
|
113 |
+
query = str(system_prompt + history[-context_length*2:] + [prompt_msg])
|
114 |
+
completion = completion({"question": query, "chat_history": history[-context_length*2:]})
|
115 |
+
|
116 |
+
|
117 |
+
|
118 |
+
|
119 |
|
120 |
history.append(prompt_msg)
|
121 |
history.append(completion.choices[0].message.to_dict())
|