Spaces:
Sleeping
Sleeping
Update chatbot.py
Browse files- chatbot.py +31 -62
chatbot.py
CHANGED
@@ -1,23 +1,15 @@
|
|
1 |
import os
|
2 |
-
import json
|
3 |
import time
|
4 |
-
import logging
|
5 |
-
from huggingface_hub import hf_hub_download
|
6 |
import pandas as pd
|
|
|
7 |
from groq import Groq
|
8 |
from langchain.memory import ConversationBufferMemory
|
9 |
-
from langchain_openai import ChatOpenAI
|
10 |
-
from langchain_community.document_loaders import CSVLoader
|
11 |
from langchain_community.vectorstores import FAISS
|
12 |
from deep_translator import GoogleTranslator
|
13 |
|
14 |
-
# Set up logging
|
15 |
-
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
|
16 |
-
logger = logging.getLogger(__name__)
|
17 |
|
18 |
class Comsatsbot:
|
19 |
def __init__(self, hf_space_repo, llm, api_keys, chats_collection, index_path='faiss_kb'):
|
20 |
-
logger.info("Initializing Comsatsbot...")
|
21 |
self.llm = llm
|
22 |
self.api_keys = api_keys
|
23 |
self.client = None
|
@@ -34,63 +26,47 @@ class Comsatsbot:
|
|
34 |
self.initialize_faiss_index()
|
35 |
|
36 |
def load_data_from_hf_space(self):
|
37 |
-
"""
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
# Load the CSV file into a DataFrame using pandas
|
48 |
-
data = pd.read_csv(local_csv_path)
|
49 |
-
documents = data.to_dict(orient="records")
|
50 |
-
logger.debug(f"Loaded {len(documents)} records from the dataset.")
|
51 |
return documents
|
52 |
|
53 |
def initialize_faiss_index(self):
|
54 |
-
logger.info("Initializing FAISS index...")
|
55 |
if os.path.exists(self.index_path):
|
56 |
-
logger.info(f"FAISS index found at {self.index_path}. Loading...")
|
57 |
self.faiss_index = FAISS.load_local(self.index_path)
|
58 |
else:
|
59 |
-
logger.info(f"FAISS index not found. Creating a new one...")
|
60 |
documents = self.load_data_from_hf_space()
|
61 |
self.faiss_index = FAISS.from_documents(documents)
|
62 |
self.faiss_index.save_local(self.index_path)
|
|
|
63 |
self.faiss_retriever = self.faiss_index.as_retriever(search_kwargs={"k": 5})
|
64 |
-
logger.info("FAISS index initialized successfully.")
|
65 |
|
66 |
def retrieve_answer(self, query):
|
67 |
-
logger.info(f"Retrieving answer for query: {query}")
|
68 |
if self.faiss_retriever:
|
69 |
-
|
70 |
-
logger.debug(f"Retrieved answer: {result}")
|
71 |
-
return result
|
72 |
-
logger.warning("FAISS retriever is not initialized.")
|
73 |
return None
|
74 |
|
75 |
def create_chat_record(self, chat_id):
|
76 |
-
logger.info(f"Creating new chat record for chat_id: {chat_id}")
|
77 |
self.chats_collection.insert_one({
|
78 |
"_id": chat_id,
|
79 |
"history": []
|
80 |
})
|
81 |
|
82 |
def update_chat(self, chat_id, question, answer):
|
83 |
-
logger.info(f"Updating chat history for chat_id: {chat_id}")
|
84 |
self.chats_collection.update_one(
|
85 |
{"_id": chat_id},
|
86 |
{"$push": {"history": {"question": question, "answer": answer}}}
|
87 |
)
|
88 |
|
89 |
def load_chat(self, chat_id):
|
90 |
-
logger.info(f"Loading chat history for chat_id: {chat_id}")
|
91 |
chat_record = self.chats_collection.find_one({"_id": chat_id})
|
92 |
if not chat_record:
|
93 |
-
logger.error(f"Chat ID {chat_id} does not exist.")
|
94 |
raise KeyError(f"Chat ID {chat_id} does not exist.")
|
95 |
return chat_record.get('history', [])
|
96 |
|
@@ -102,12 +78,11 @@ Use emojis only when required based on the user's tone and emotions. Do not over
|
|
102 |
- **Sad emotions**: Use π when the user is asking about something disappointing or negative.
|
103 |
- **Surprise**: Use π― when the user expresses surprise.
|
104 |
- **Anger or frustration**: Use π‘ when the user expresses frustration or dissatisfaction.
|
105 |
-
If the user asks the same question repeatedly or asks an illogical question, feel free to use emojis to subtly convey frustration, confusion, or amusement.
|
106 |
If the user writes question in urdu, give answer in urdu.
|
107 |
-
If the user writes question in English, give answer in English
|
108 |
please provide the personalized answer and provide answer quickly
|
109 |
please answer from the dataset i provided to you in csv files. And donot write in every answer that i donot know the exact answer.and refer website only where it is necessary.
|
110 |
-
Do not include the phrase "According to the provided context" or "Based on the chat history". Simply generate the answer like a human would, without referencing where the information comes from.
|
111 |
If the question requires a URL, format it like this:
|
112 |
[Click here to visit COMSATS](https://comsats.edu.pk).
|
113 |
Your task is to help students at COMSATS University, Attock campus, with their university-related queries. The following are key details about the university:
|
@@ -126,31 +101,25 @@ Context ends here. Now, answer the following question:
|
|
126 |
"""
|
127 |
|
128 |
def generate_response(self, question, history, context):
|
129 |
-
logger.info(f"Generating response for question: {question}")
|
130 |
prompt = self.get_system_prompt().format(question=question, history=history, context=context)
|
131 |
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
return response
|
149 |
-
except Exception as e:
|
150 |
-
logger.error(f"Error with model {model}: {e}")
|
151 |
-
time.sleep(2)
|
152 |
-
continue
|
153 |
-
logger.warning("Unable to generate a response.")
|
154 |
return "Sorry, unable to provide an answer at this time."
|
155 |
|
156 |
|
|
|
|
1 |
import os
|
|
|
2 |
import time
|
|
|
|
|
3 |
import pandas as pd
|
4 |
+
from huggingface_hub import hf_hub_download
|
5 |
from groq import Groq
|
6 |
from langchain.memory import ConversationBufferMemory
|
|
|
|
|
7 |
from langchain_community.vectorstores import FAISS
|
8 |
from deep_translator import GoogleTranslator
|
9 |
|
|
|
|
|
|
|
10 |
|
11 |
class Comsatsbot:
|
12 |
def __init__(self, hf_space_repo, llm, api_keys, chats_collection, index_path='faiss_kb'):
|
|
|
13 |
self.llm = llm
|
14 |
self.api_keys = api_keys
|
15 |
self.client = None
|
|
|
26 |
self.initialize_faiss_index()
|
27 |
|
28 |
def load_data_from_hf_space(self):
|
29 |
+
files = ["english_data.csv", "urdu_data.csv", "FYP Supervisor Feedback.csv"]
|
30 |
+
documents = []
|
31 |
+
|
32 |
+
for file in files:
|
33 |
+
local_path = hf_hub_download(repo_id=self.hf_space_repo, filename=file)
|
34 |
+
df = pd.read_csv(local_path)
|
35 |
+
docs = df.astype(str).to_dict(orient="records")
|
36 |
+
documents.extend(docs)
|
37 |
+
|
|
|
|
|
|
|
|
|
|
|
38 |
return documents
|
39 |
|
40 |
def initialize_faiss_index(self):
|
|
|
41 |
if os.path.exists(self.index_path):
|
|
|
42 |
self.faiss_index = FAISS.load_local(self.index_path)
|
43 |
else:
|
|
|
44 |
documents = self.load_data_from_hf_space()
|
45 |
self.faiss_index = FAISS.from_documents(documents)
|
46 |
self.faiss_index.save_local(self.index_path)
|
47 |
+
|
48 |
self.faiss_retriever = self.faiss_index.as_retriever(search_kwargs={"k": 5})
|
|
|
49 |
|
50 |
def retrieve_answer(self, query):
|
|
|
51 |
if self.faiss_retriever:
|
52 |
+
return self.faiss_retriever.invoke(query)
|
|
|
|
|
|
|
53 |
return None
|
54 |
|
55 |
def create_chat_record(self, chat_id):
|
|
|
56 |
self.chats_collection.insert_one({
|
57 |
"_id": chat_id,
|
58 |
"history": []
|
59 |
})
|
60 |
|
61 |
def update_chat(self, chat_id, question, answer):
|
|
|
62 |
self.chats_collection.update_one(
|
63 |
{"_id": chat_id},
|
64 |
{"$push": {"history": {"question": question, "answer": answer}}}
|
65 |
)
|
66 |
|
67 |
def load_chat(self, chat_id):
|
|
|
68 |
chat_record = self.chats_collection.find_one({"_id": chat_id})
|
69 |
if not chat_record:
|
|
|
70 |
raise KeyError(f"Chat ID {chat_id} does not exist.")
|
71 |
return chat_record.get('history', [])
|
72 |
|
|
|
78 |
- **Sad emotions**: Use π when the user is asking about something disappointing or negative.
|
79 |
- **Surprise**: Use π― when the user expresses surprise.
|
80 |
- **Anger or frustration**: Use π‘ when the user expresses frustration or dissatisfaction.
|
|
|
81 |
If the user writes question in urdu, give answer in urdu.
|
82 |
+
If the user writes question in English, give answer in English.
|
83 |
please provide the personalized answer and provide answer quickly
|
84 |
please answer from the dataset i provided to you in csv files. And donot write in every answer that i donot know the exact answer.and refer website only where it is necessary.
|
85 |
+
Do not include the phrase "According to the provided context" or "Based on the chat history". Simply generate the answer like a human would, without referencing where the information comes from.
|
86 |
If the question requires a URL, format it like this:
|
87 |
[Click here to visit COMSATS](https://comsats.edu.pk).
|
88 |
Your task is to help students at COMSATS University, Attock campus, with their university-related queries. The following are key details about the university:
|
|
|
101 |
"""
|
102 |
|
103 |
def generate_response(self, question, history, context):
|
|
|
104 |
prompt = self.get_system_prompt().format(question=question, history=history, context=context)
|
105 |
|
106 |
+
for api_key in self.api_keys:
|
107 |
+
self.client = Groq(api_key=api_key)
|
108 |
+
for model in self.models:
|
109 |
+
try:
|
110 |
+
chat_completion = self.client.chat.completions.create(
|
111 |
+
messages=[
|
112 |
+
{"role": "system", "content": prompt},
|
113 |
+
{"role": "user", "content": f"Answer the following question: {question}"}
|
114 |
+
],
|
115 |
+
model=model,
|
116 |
+
max_tokens=1024,
|
117 |
+
)
|
118 |
+
return chat_completion.choices[0].message.content
|
119 |
+
except Exception:
|
120 |
+
time.sleep(2)
|
121 |
+
continue
|
|
|
|
|
|
|
|
|
|
|
|
|
122 |
return "Sorry, unable to provide an answer at this time."
|
123 |
|
124 |
|
125 |
+
|