File size: 3,122 Bytes
546b6a4
 
 
 
 
 
 
 
 
 
 
 
 
c66551b
546b6a4
 
 
 
 
 
 
 
 
 
 
 
c66551b
 
 
 
546b6a4
c66551b
 
 
 
 
 
 
 
 
 
 
 
 
 
546b6a4
c66551b
546b6a4
 
 
 
 
 
 
 
3ee0565
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import os 
import json
import time
from langchain_openai import ChatOpenAI
from langchain.chains.conversational_retrieval.base import ConversationalRetrievalChain
from langchain.memory.buffer_window import ConversationBufferWindowMemory
from langchain_core.prompts import PromptTemplate
### Contextualize question ###
from upstash_vector import Index
from langchain_community.vectorstores.upstash import UpstashVectorStore



settings= json.load(open("system.json","r"))[0]


from upstash_vector import Index
from langchain_community.vectorstores.upstash import UpstashVectorStore
index = Index(os.environ["UPSTASH_VECTOR_REST_URL"],os.environ["UPSTASH_VECTOR_REST_TOKEN"])
vectorStore = UpstashVectorStore(
    embedding=True, index=index,
)
retriever = vectorStore.as_retriever(search_kwargs={"k": settings["k"]})
#LLM setup
LLM= ChatOpenAI(model=settings["model"], temperature=settings["temp"])





#Setup prompt template 

prompt_temp="""
    You are an AI Chatbot from precious Plastic your job is to answer Question about recycling plastic.
    You can return links to in the answer as well as image if you want
    Us the following context to help in answering the Question.
    
    ------
    {context}
    ------
    Question: {question}
    Do not:
     ・ Do not make thing up that you do not know, if you dont know, say that you dont know  \
    """

QUESTION_PROMPT = PromptTemplate(
    template=prompt_temp, # プロンプトテンプレートをセット
    input_variables=["context", "question"] # プロンプトに挿入する変数
)
# Conversation memory
memory = ConversationBufferWindowMemory(
    memory_key=settings["MEMORY_KEY"], #  Memory key メモリーのキー名
    output_key="answer", #output key 出力ののキー名
    k=8, #saved conversation number 保持する会話の履歴数
    return_messages=True, #get chat list チャット履歴をlistで取得する場合はTrue
)


# RAG conversation chain (RAG用)会話chainの設定
chain = ConversationalRetrievalChain.from_llm(
    llm=LLM,
    retriever=retriever,
    combine_docs_chain_kwargs={'prompt': QUESTION_PROMPT}, # プロンプトをセット
    chain_type="stuff", # 検索した文章の処理方法
    memory=memory  # メモリーをセット
)

def invoke_question_time(chain,question):
    
    start_time = time.time()
    response = chain.invoke({"question": question})
    end_time = time.time()
    print(response["answer"])
    print("responce time:", end_time - start_time, "seconds")
    return response, end_time - start_time

#Test main
def main():
   
    response,_=invoke_question_time(chain, "Hello what is precious plastic ? ")
    time.sleep(30)
    response,_=invoke_question_time(chain, """I live in the UK and want so start an extruder work shop. 
                                    What is needed ? What should safety issues might this have and where could i buy equipment could i buy in the UK to help me
                                """)
    time.sleep(30)
    response,_=invoke_question_time(chain, "Hello what is precious plastic ? ")

if __name__ ==" __main__":
    main()