Sadiaa commited on
Commit
9c9a42b
·
verified ·
1 Parent(s): ab30a38

Update chatbot.py

Browse files
Files changed (1) hide show
  1. chatbot.py +14 -2
chatbot.py CHANGED
@@ -2,6 +2,7 @@ import os
2
  import time
3
  import json
4
  from groq import Groq
 
5
  from langchain_core.runnables import RunnablePassthrough
6
  from langchain.memory import ConversationTokenBufferMemory
7
  from langchain_community.chat_models import ChatOpenAI
@@ -20,8 +21,19 @@ class Comsatsbot:
20
  "llama-3.1-70b-versatile",
21
  "llama3-70b-8192"
22
  ]
23
- #self.memory = ConversationTokenBufferMemory(llm=self.llm, max_token_limit=3000)
24
- self.memory = ConversationTokenBufferMemory(memory_key="chat_history",return_messages=True,max_token_limit=3000)
 
 
 
 
 
 
 
 
 
 
 
25
  self.chats_collection = chats_collection
26
  self.index_path = index_path
27
  self.hf = hf
 
2
  import time
3
  import json
4
  from groq import Groq
5
+ from transformers import AutoTokenizer
6
  from langchain_core.runnables import RunnablePassthrough
7
  from langchain.memory import ConversationTokenBufferMemory
8
  from langchain_community.chat_models import ChatOpenAI
 
21
  "llama-3.1-70b-versatile",
22
  "llama3-70b-8192"
23
  ]
24
+ # self.memory = ConversationTokenBufferMemory(llm=self.llm, max_token_limit=3000)
25
+
26
+ # Load LLaMA-compatible tokenizer
27
+ tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
28
+
29
+ # Define a simple token counting function
30
+ def llama_token_count(text: str) -> int:
31
+ return len(tokenizer.encode(text, add_special_tokens=False))
32
+
33
+ # Use updated memory setup
34
+ self.memory = ConversationTokenBufferMemory(memory_key="chat_history", return_messages=True, max_token_limit=3000, tokenizer=llama_token_count)
35
+
36
+
37
  self.chats_collection = chats_collection
38
  self.index_path = index_path
39
  self.hf = hf