Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -622,7 +622,47 @@ memory.load_memory_variables({})
|
|
622 |
|
623 |
|
624 |
|
|
|
|
|
|
|
|
|
|
|
625 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
626 |
|
627 |
|
628 |
|
@@ -659,7 +699,7 @@ chain_gemini,memory_gemini = custom_ConversationalRetrievalChain(
|
|
659 |
memory_gemini.clear()
|
660 |
"""
|
661 |
|
662 |
-
|
663 |
chain = ConversationalRetrievalChain.from_llm(
|
664 |
condense_question_prompt=standalone_question_prompt,
|
665 |
combine_docs_chain_kwargs={'prompt': answer_prompt},
|
@@ -675,7 +715,7 @@ chain = ConversationalRetrievalChain.from_llm(
|
|
675 |
verbose= False,
|
676 |
return_source_documents=True
|
677 |
)
|
678 |
-
|
679 |
|
680 |
"""
|
681 |
# let's invoke the chain
|
|
|
622 |
|
623 |
|
624 |
|
625 |
+
standalone_question_template = """Given the following conversation and a follow up question,
|
626 |
+
rephrase the follow up question to be a standalone question, in its original language.\n\n
|
627 |
+
Chat History:\n{chat_history}\n
|
628 |
+
Follow Up Input: {question}\n
|
629 |
+
Standalone question:"""
|
630 |
|
631 |
+
standalone_question_prompt = PromptTemplate(
|
632 |
+
input_variables=['chat_history', 'question'],
|
633 |
+
template=standalone_question_template
|
634 |
+
)
|
635 |
+
|
636 |
+
|
637 |
+
def answer_template(language="english"):
|
638 |
+
"""Pass the standalone question along with the chat history and context
|
639 |
+
to the `LLM` wihch will answer"""
|
640 |
+
|
641 |
+
template = f"""Answer the question at the end, using only the following context (delimited by <context></context>).
|
642 |
+
Your answer must be in the language at the end.
|
643 |
+
|
644 |
+
<context>
|
645 |
+
{{chat_history}}
|
646 |
+
|
647 |
+
{{context}}
|
648 |
+
</context>
|
649 |
+
|
650 |
+
Question: {{question}}
|
651 |
+
Language: {language}.
|
652 |
+
|
653 |
+
"""
|
654 |
+
return template
|
655 |
+
|
656 |
+
answer_prompt = ChatPromptTemplate.from_template(answer_template())
|
657 |
+
|
658 |
+
|
659 |
+
|
660 |
+
# invoke the ChatPromptTemplate
|
661 |
+
answer_prompt.invoke(
|
662 |
+
{"question":"plaese give more details about DTC, including its use cases and implementation.",
|
663 |
+
"context":[Document(page_content="DTC use cases include...")], # the context is a list of retrieved documents.
|
664 |
+
"chat_history":memory.chat_memory}
|
665 |
+
)
|
666 |
|
667 |
|
668 |
|
|
|
699 |
memory_gemini.clear()
|
700 |
"""
|
701 |
|
702 |
+
"""
|
703 |
chain = ConversationalRetrievalChain.from_llm(
|
704 |
condense_question_prompt=standalone_question_prompt,
|
705 |
combine_docs_chain_kwargs={'prompt': answer_prompt},
|
|
|
715 |
verbose= False,
|
716 |
return_source_documents=True
|
717 |
)
|
718 |
+
"""
|
719 |
|
720 |
"""
|
721 |
# let's invoke the chain
|