Spaces:
Running
Running
File size: 8,146 Bytes
4003576 2788b1e 57fce4e 49c055c 57fce4e a9cc6a5 f5c686f 3af52a5 4003576 2788b1e 49c055c 4003576 57fce4e 49c055c b600e93 a9cc6a5 b600e93 88b1a0d b600e93 f5c686f 4003576 b600e93 57fce4e 4003576 57fce4e 4003576 49c055c 4003576 49c055c 57fce4e 4003576 49c055c 57fce4e 4003576 2788b1e 49c055c 4003576 49c055c 4003576 49c055c 2788b1e 4003576 49c055c b600e93 4003576 49c055c 4003576 49c055c 4003576 49c055c 4003576 d18fcb7 cd9a3d3 d18fcb7 cd9a3d3 ec99e1c d18fcb7 195a6ed 49c055c d18fcb7 8102a2e 49c055c 8102a2e d18fcb7 8102a2e 49c055c 8102a2e 49c055c 8102a2e 738595c 8102a2e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 |
import os
import json
import time
import logging
from huggingface_hub import hf_hub_download
import pandas as pd
from groq import Groq
from langchain.memory import ConversationBufferMemory
from langchain_openai import ChatOpenAI
from langchain_community.document_loaders import CSVLoader
from langchain_community.vectorstores import FAISS
from deep_translator import GoogleTranslator
# Set up logging
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
class Comsatsbot:
def __init__(self, hf_space_repo, llm, api_keys, chats_collection, index_path='faiss_kb'):
logger.info("Initializing Comsatsbot...")
self.llm = llm
self.api_keys = api_keys
self.client = None
self.models = [
"llama-3.3-70b-versatile",
"llama3-70b-8192"
]
self.memory = ConversationBufferMemory(llm=self.llm, max_token_limit=3000)
self.chats_collection = chats_collection
self.index_path = index_path
self.hf_space_repo = hf_space_repo
self.faiss_index = None
self.faiss_retriever = None
self.initialize_faiss_index()
def load_data_from_hf_space(self):
"""
Loads CSV files directly from Hugging Face Space repository.
This will allow the chatbot to answer questions using the dataset.
"""
logger.info("Loading data from Hugging Face Space repository...")
# Download CSV files from Hugging Face Space using the repository and file path
local_csv_path = hf_hub_download(repo_id=self.hf_space_repo, filename="english_data.csv","urdu_data.csv","FYP Supervisor Feedback.csv")
logger.info(f"Downloaded CSV file to {local_csv_path}")
# Load the CSV file into a DataFrame using pandas
data = pd.read_csv(local_csv_path)
documents = data.to_dict(orient="records")
logger.debug(f"Loaded {len(documents)} records from the dataset.")
return documents
def initialize_faiss_index(self):
logger.info("Initializing FAISS index...")
if os.path.exists(self.index_path):
logger.info(f"FAISS index found at {self.index_path}. Loading...")
self.faiss_index = FAISS.load_local(self.index_path)
else:
logger.info(f"FAISS index not found. Creating a new one...")
documents = self.load_data_from_hf_space()
self.faiss_index = FAISS.from_documents(documents)
self.faiss_index.save_local(self.index_path)
self.faiss_retriever = self.faiss_index.as_retriever(search_kwargs={"k": 5})
logger.info("FAISS index initialized successfully.")
def retrieve_answer(self, query):
logger.info(f"Retrieving answer for query: {query}")
if self.faiss_retriever:
result = self.faiss_retriever.invoke(query)
logger.debug(f"Retrieved answer: {result}")
return result
logger.warning("FAISS retriever is not initialized.")
return None
def create_chat_record(self, chat_id):
logger.info(f"Creating new chat record for chat_id: {chat_id}")
self.chats_collection.insert_one({
"_id": chat_id,
"history": []
})
def update_chat(self, chat_id, question, answer):
logger.info(f"Updating chat history for chat_id: {chat_id}")
self.chats_collection.update_one(
{"_id": chat_id},
{"$push": {"history": {"question": question, "answer": answer}}}
)
def load_chat(self, chat_id):
logger.info(f"Loading chat history for chat_id: {chat_id}")
chat_record = self.chats_collection.find_one({"_id": chat_id})
if not chat_record:
logger.error(f"Chat ID {chat_id} does not exist.")
raise KeyError(f"Chat ID {chat_id} does not exist.")
return chat_record.get('history', [])
def get_system_prompt(self):
return """
You are a comsats assistant to help the user with comsats university-related queries. Your response should be concise, direct, and to the point. Avoid any unnecessary explanations. Always consider the provided context and chat history to generate the answer.
Use emojis only when required based on the user's tone and emotions. Do not overuse them in every answer. provide emojis only when it is context based required. Here's when you should use emojis:
- **Happy emotions**: Use π or π when the user expresses satisfaction or asks for something positive.
- **Sad emotions**: Use π when the user is asking about something disappointing or negative.
- **Surprise**: Use π― when the user expresses surprise.
- **Anger or frustration**: Use π‘ when the user expresses frustration or dissatisfaction.
If the user asks the same question repeatedly or asks an illogical question, feel free to use emojis to subtly convey frustration, confusion, or amusement.
If the user writes question in urdu, give answer in urdu.
If the user writes question in English, give answer in English .
please provide the personalized answer and provide answer quickly
please answer from the dataset i provided to you in csv files. And donot write in every answer that i donot know the exact answer.and refer website only where it is necessary.
Do not include the phrase "According to the provided context" or "Based on the chat history". Simply generate the answer like a human would, without referencing where the information comes from.
If the question requires a URL, format it like this:
[Click here to visit COMSATS](https://comsats.edu.pk).
Your task is to help students at COMSATS University, Attock campus, with their university-related queries. The following are key details about the university:
- Departments: CS, AI, SE, Math, BBA, EE, CE, English.
- Facilities: Cricket ground, football ground, two canteens (near CS and Math/EE), mosque near CS department, LT rooms in CS, classrooms in Math, and labs in EE.
- Admission: Accepts NTS test, CGPA requirements: 85% for CGPA 4.0, 79-84% for CGPA 3.66.
- Available degrees: BS Computer Science, BS Software Engineering, BS Artificial Intelligence, BS English, BS Math, BS Electrical Engineering, BS Computer Engineering, BS BBA.
Consider the following chat history for additional context to answer the question:
{history}
When answering:
- Answer in a conversational and friendly tone.
- Be concise and to the point, while still being helpful.
- If you donβt know the answer from the context or chat history, simply say "I donβt know the answer to this π".
Context ends here. Now, answer the following question:
{question}
"""
def generate_response(self, question, history, context):
logger.info(f"Generating response for question: {question}")
prompt = self.get_system_prompt().format(question=question, history=history, context=context)
while True:
for api_key in self.api_keys:
self.client = Groq(api_key=api_key)
for model in self.models:
try:
logger.info(f"Calling model {model} for response...")
chat_completion = self.client.chat.completions.create(
messages=[
{"role": "system", "content": prompt},
{"role": "user", "content": f"Answer the following question: {question}"}
],
model=model,
max_tokens=1024,
)
response = chat_completion.choices[0].message.content
logger.debug(f"Received response: {response}")
return response
except Exception as e:
logger.error(f"Error with model {model}: {e}")
time.sleep(2)
continue
logger.warning("Unable to generate a response.")
return "Sorry, unable to provide an answer at this time."
|