Sadiaa commited on
Commit
3af52a5
·
verified ·
1 Parent(s): e20ac80

Update chatbot.py

Browse files
Files changed (1) hide show
  1. chatbot.py +2 -18
chatbot.py CHANGED
@@ -2,13 +2,9 @@ import os
2
  import time
3
  import json
4
  from groq import Groq
5
- from transformers import AutoTokenizer
6
- from langchain_core.runnables import RunnablePassthrough
7
  from langchain.memory import ConversationBufferMemory
8
-
9
  from langchain_openai import ChatOpenAI
10
- from langchain.document_loaders import CSVLoader
11
-
12
  from langchain_community.vectorstores import FAISS
13
  from deep_translator import GoogleTranslator
14
 
@@ -23,19 +19,7 @@ class Comsatsbot:
23
  "llama-3.1-70b-versatile",
24
  "llama3-70b-8192"
25
  ]
26
- # self.memory = ConversationTokenBufferMemory(llm=self.llm, max_token_limit=3000)
27
-
28
- # Load LLaMA-compatible tokenizer
29
- tokenizer = AutoTokenizer.from_pretrained("gpt2")
30
-
31
- # Define a simple token counting function
32
- def llama_token_count(text: str) -> int:
33
- return len(tokenizer.encode(text, add_special_tokens=False))
34
-
35
- # Use updated memory setup
36
- self.memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True, max_token_limit=3000, tokenizer=llama_token_count)
37
-
38
-
39
  self.chats_collection = chats_collection
40
  self.index_path = index_path
41
  self.hf = hf
 
2
  import time
3
  import json
4
  from groq import Groq
 
 
5
  from langchain.memory import ConversationBufferMemory
 
6
  from langchain_openai import ChatOpenAI
7
+ from langchain_community.document_loaders import CSVLoader
 
8
  from langchain_community.vectorstores import FAISS
9
  from deep_translator import GoogleTranslator
10
 
 
19
  "llama-3.1-70b-versatile",
20
  "llama3-70b-8192"
21
  ]
22
+ self.memory = ConversationTokenBufferMemory(llm=self.llm, max_token_limit=3000)
 
 
 
 
 
 
 
 
 
 
 
 
23
  self.chats_collection = chats_collection
24
  self.index_path = index_path
25
  self.hf = hf