JarvisLabs commited on
Commit
546b6a4
·
verified ·
1 Parent(s): 6b4d106

Update langchain_bot.py

Browse files
Files changed (1) hide show
  1. langchain_bot.py +37 -38
langchain_bot.py CHANGED
@@ -1,39 +1,38 @@
1
- import os
2
- import json
3
- from langchain_openai import ChatOpenAI
4
- from langchain_core.prompts import MessagesPlaceholder
5
- from langchain_core.prompts import ChatPromptTemplate
6
- from langchain_core.runnables import RunnablePassthrough
7
- from langchain.chains.conversational_retrieval.base import ConversationalRetrievalChain
8
- from langchain.memory.buffer_window import ConversationBufferWindowMemory
9
- from langchain_core.prompts import PromptTemplate
10
- from langchain.memory import VectorStoreRetrieverMemo
11
- ### Contextualize question ###
12
- from langchain.chains import create_history_aware_retriever, create_retrieval_chain
13
-
14
-
15
- settings= json.load(open("system.json","r"))
16
-
17
-
18
- from upstash_vector import Index
19
- from langchain_community.vectorstores.upstash import UpstashVectorStore
20
- index = Index(os.environ["UPSTASH_VECTOR_REST_URL"],os.environ["UPSTASH_VECTOR_REST_TOKEN"])
21
- vectorStore = UpstashVectorStore(
22
- embedding=True, index=index,
23
- )
24
- retriever = vectorStore.as_retriever(search_kwargs={"k": settings["k"]})
25
- #LLM setup
26
- LLM= ChatOpenAI(model=settings["model"], temperature=settings["temp"])
27
-
28
- #Setup prompt template
29
- QUESTION_PROMPT = PromptTemplate(
30
- template=settings["prompt_temp"], # プロンプトテンプレートをセット
31
- input_variables=["context", "question"] # プロンプトに挿入する変数
32
- )
33
- # Conversation memory
34
- memory = ConversationBufferWindowMemory(
35
- memory_key=settings["MEMORY_KEY"], # Memory key メモリーのキー名
36
- output_key="answer", #output key 出力ののキー名
37
- k=8, #saved conversation number 保持する会話の履歴数
38
- return_messages=True, #get chat list チャット履歴をlistで取得する場合はTrue
39
  )
 
1
+ import os
2
+ import json
3
+ import time
4
+ from langchain_openai import ChatOpenAI
5
+ from langchain.chains.conversational_retrieval.base import ConversationalRetrievalChain
6
+ from langchain.memory.buffer_window import ConversationBufferWindowMemory
7
+ from langchain_core.prompts import PromptTemplate
8
+ ### Contextualize question ###
9
+ from upstash_vector import Index
10
+ from langchain_community.vectorstores.upstash import UpstashVectorStore
11
+
12
+
13
+
14
+ settings= json.load(open("system.json","r"))
15
+
16
+
17
+ from upstash_vector import Index
18
+ from langchain_community.vectorstores.upstash import UpstashVectorStore
19
+ index = Index(os.environ["UPSTASH_VECTOR_REST_URL"],os.environ["UPSTASH_VECTOR_REST_TOKEN"])
20
+ vectorStore = UpstashVectorStore(
21
+ embedding=True, index=index,
22
+ )
23
+ retriever = vectorStore.as_retriever(search_kwargs={"k": settings["k"]})
24
+ #LLM setup
25
+ LLM= ChatOpenAI(model=settings["model"], temperature=settings["temp"])
26
+
27
+ #Setup prompt template
28
+ QUESTION_PROMPT = PromptTemplate(
29
+ template=settings["prompt_temp"], # プロンプトテンプレートをセット
30
+ input_variables=["context", "question"] # プロンプトに挿入する変数
31
+ )
32
+ # Conversation memory
33
+ memory = ConversationBufferWindowMemory(
34
+ memory_key=settings["MEMORY_KEY"], # Memory key メモリーのキー名
35
+ output_key="answer", #output key 出力ののキー名
36
+ k=8, #saved conversation number 保持する会話の履歴数
37
+ return_messages=True, #get chat list チャット履歴をlistで取得する場合はTrue
 
38
  )