File size: 1,599 Bytes
f2ec360
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35

from langchain.llms import BaseLLM
from langchain.base_language import BaseLanguageModel
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate


class QuestionAtomizer(LLMChain):
    """
    This chain splits the original question into a set of atomistic questions. 
    """

    @classmethod
    def from_llm(cls, llm: BaseLanguageModel, verbose: bool = True) -> LLMChain:
        """Get the response parser."""
        question_atomizer_template = (
            " Your are provided with the following question:"
            " '{question}' \n"
            " Your task is to split the given question in at most {num_questions} very"
            " simple, basic and atomist sub-questions (only if needed) using only the"
            " information given in the question and no other prior knowledge."
            " The sub-questions should be directly related to the intent of the original question."
            " Consider the primary subject and the predicate of the question (if any) when creating sub questions.\n"
            " Consider also the Parties, Rights, Obligations, Remedies, Actions, or Events mentioned"
            " in the question (if any) when creating the sub questions.\n"
            " The sub questions should have no semantic overlap with each other."
            " Format your response like: \n"
            " n. question"
        )
        prompt = PromptTemplate(
            template=question_atomizer_template,
            input_variables=["question", "num_questions"],
        )
        return cls(prompt=prompt, llm=llm, verbose=verbose)