logasanjeev commited on
Commit
581d233
·
verified ·
1 Parent(s): 92104cc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +112 -53
app.py CHANGED
@@ -10,9 +10,17 @@ from langchain.chains import ConversationalRetrievalChain
10
  from langchain.memory import ConversationBufferMemory
11
  from pptx import Presentation
12
  from io import BytesIO
 
 
 
 
 
 
13
 
14
  # Environment setup for Hugging Face token
15
- os.environ["HUGGINGFACEHUB_API_TOKEN"] = os.getenv("HUGGINGFACEHUB_API_TOKEN", "your-hf-token-here")
 
 
16
 
17
  # Model and embedding options
18
  LLM_MODELS = {
@@ -32,6 +40,7 @@ vector_store = None
32
  qa_chain = None
33
  chat_history = []
34
  memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
 
35
 
36
  # Custom PPTX loader
37
  class PPTXLoader:
@@ -40,73 +49,105 @@ class PPTXLoader:
40
 
41
  def load(self):
42
  docs = []
43
- with open(self.file_path, "rb") as f:
44
- prs = Presentation(BytesIO(f.read()))
45
- for slide_num, slide in enumerate(prs.slides, 1):
46
- text = ""
47
- for shape in slide.shapes:
48
- if hasattr(shape, "text"):
49
- text += shape.text + "\n"
50
- if text.strip():
51
- docs.append({"page_content": text, "metadata": {"source": self.file_path, "slide": slide_num}})
 
 
 
 
52
  return docs
53
 
54
  # Function to load documents
55
  def load_documents(files):
56
  documents = []
57
  for file in files:
58
- file_path = file.name
59
- if file_path.endswith(".pdf"):
60
- loader = PyPDFLoader(file_path)
61
- documents.extend(loader.load())
62
- elif file_path.endswith(".txt"):
63
- loader = TextLoader(file_path)
64
- documents.extend(loader.load())
65
- elif file_path.endswith(".docx"):
66
- loader = Docx2txtLoader(file_path)
67
- documents.extend(loader.load())
68
- elif file_path.endswith(".pptx"):
69
- loader = PPTXLoader(file_path)
70
- documents.extend([{"page_content": doc["page_content"], "metadata": doc["metadata"]} for doc in loader.load()])
 
 
 
 
 
71
  return documents
72
 
73
  # Function to process documents and create vector store
74
  def process_documents(files, chunk_size, chunk_overlap, embedding_model):
75
- global vector_store, qa_chain
76
  if not files:
77
  return "Please upload at least one document.", None
78
 
 
 
 
 
 
 
 
 
 
79
  # Load documents
80
  documents = load_documents(files)
81
  if not documents:
82
- return "No valid documents loaded.", None
83
 
84
  # Split documents
85
- text_splitter = RecursiveCharacterTextSplitter(
86
- chunk_size=int(chunk_size),
87
- chunk_overlap=int(chunk_overlap),
88
- length_function=len
89
- )
90
- doc_splits = text_splitter.split_documents(documents)
 
 
 
 
 
91
 
92
  # Create embeddings
93
- embeddings = HuggingFaceEmbeddings(model_name=EMBEDDING_MODELS[embedding_model])
 
 
 
 
94
 
95
  # Create vector store
96
  try:
97
- vector_store = Chroma.from_documents(doc_splits, embeddings, persist_directory="./chroma_db")
98
  return f"Processed {len(documents)} documents into {len(doc_splits)} chunks.", None
99
  except Exception as e:
100
- return f"Error processing documents: {str(e)}", None
 
101
 
102
  # Function to initialize QA chain
103
  def initialize_qa_chain(llm_model, temperature):
104
  global qa_chain
 
 
 
105
  try:
106
  llm = HuggingFaceEndpoint(
107
  repo_id=LLM_MODELS[llm_model],
 
108
  temperature=float(temperature),
109
- max_length=512,
110
  huggingfacehub_api_token=os.environ["HUGGINGFACEHUB_API_TOKEN"]
111
  )
112
  qa_chain = ConversationalRetrievalChain.from_llm(
@@ -114,46 +155,64 @@ def initialize_qa_chain(llm_model, temperature):
114
  retriever=vector_store.as_retriever(search_kwargs={"k": 3}),
115
  memory=memory
116
  )
 
117
  return "QA chain initialized successfully.", None
118
  except Exception as e:
119
- return f"Error initializing QA chain: {str(e)}", None
 
120
 
121
  # Function to handle user query
122
  def answer_question(question, llm_model, embedding_model, temperature, chunk_size, chunk_overlap):
123
  global chat_history
124
- if not vector_store or not qa_chain:
125
- return "Please upload documents and initialize the QA chain.", chat_history
 
 
 
 
126
 
127
  try:
128
  response = qa_chain({"question": question})["answer"]
129
  chat_history.append(("User", question))
130
  chat_history.append(("Bot", response))
 
131
  return response, chat_history
132
  except Exception as e:
 
133
  return f"Error answering question: {str(e)}", chat_history
134
 
135
  # Function to export chat history
136
  def export_chat():
137
  if not chat_history:
138
  return "No chat history to export.", None
139
- timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
140
- filename = f"chat_history_{timestamp}.txt"
141
- with open(filename, "w") as f:
142
- for role, message in chat_history:
143
- f.write(f"{role}: {message}\n\n")
144
- return f"Chat history exported to {filename}.", filename
 
 
 
 
 
145
 
146
  # Function to reset the app
147
  def reset_app():
148
  global vector_store, qa_chain, chat_history, memory
149
- vector_store = None
150
- qa_chain = None
151
- chat_history = []
152
- memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
153
- if os.path.exists("./chroma_db"):
154
- import shutil
155
- shutil.rmtree("./chroma_db")
156
- return "App reset successfully.", None
 
 
 
 
 
157
 
158
  # Gradio interface
159
  with gr.Blocks(theme=gr.themes.Soft(), title="DocTalk: Document Q&A Chatbot") as demo:
@@ -171,7 +230,7 @@ with gr.Blocks(theme=gr.themes.Soft(), title="DocTalk: Document Q&A Chatbot") as
171
  with gr.Column(scale=1):
172
  llm_model = gr.Dropdown(choices=list(LLM_MODELS.keys()), label="Select LLM Model", value="Lightweight (Gemma-2B)")
173
  embedding_model = gr.Dropdown(choices=list(EMBEDDING_MODELS.keys()), label="Select Embedding Model", value="Lightweight (MiniLM-L6)")
174
- temperature = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=0.7, label="Temperature")
175
  chunk_size = gr.Slider(minimum=500, maximum=2000, step=100, value=1000, label="Chunk Size")
176
  chunk_overlap = gr.Slider(minimum=0, maximum=500, step=50, value=100, label="Chunk Overlap")
177
  init_button = gr.Button("Initialize QA Chain")
 
10
  from langchain.memory import ConversationBufferMemory
11
  from pptx import Presentation
12
  from io import BytesIO
13
+ import shutil
14
+ import logging
15
+
16
+ # Set up logging
17
+ logging.basicConfig(level=logging.INFO)
18
+ logger = logging.getLogger(__name__)
19
 
20
  # Environment setup for Hugging Face token
21
+ os.environ["HUGGINGFACEHUB_API_TOKEN"] = os.getenv("HUGGINGFACEHUB_API_TOKEN", "default-token")
22
+ if os.environ["HUGGINGFACEHUB_API_TOKEN"] == "default-token":
23
+ logger.warning("HUGGINGFACEHUB_API_TOKEN not set. Some models may not work.")
24
 
25
  # Model and embedding options
26
  LLM_MODELS = {
 
40
  qa_chain = None
41
  chat_history = []
42
  memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
43
+ PERSIST_DIRECTORY = "./chroma_db"
44
 
45
  # Custom PPTX loader
46
  class PPTXLoader:
 
49
 
50
  def load(self):
51
  docs = []
52
+ try:
53
+ with open(self.file_path, "rb") as f:
54
+ prs = Presentation(BytesIO(f.read()))
55
+ for slide_num, slide in enumerate(prs.slides, 1):
56
+ text = ""
57
+ for shape in slide.shapes:
58
+ if hasattr(shape, "text") and shape.text:
59
+ text += shape.text + "\n"
60
+ if text.strip():
61
+ docs.append({"page_content": text, "metadata": {"source": self.file_path, "slide": slide_num}})
62
+ except Exception as e:
63
+ logger.error(f"Error loading PPTX {self.file_path}: {str(e)}")
64
+ return []
65
  return docs
66
 
67
  # Function to load documents
68
  def load_documents(files):
69
  documents = []
70
  for file in files:
71
+ try:
72
+ file_path = file.name
73
+ logger.info(f"Loading file: {file_path}")
74
+ if file_path.endswith(".pdf"):
75
+ loader = PyPDFLoader(file_path)
76
+ documents.extend(loader.load())
77
+ elif file_path.endswith(".txt"):
78
+ loader = TextLoader(file_path)
79
+ documents.extend(loader.load())
80
+ elif file_path.endswith(".docx"):
81
+ loader = Docx2txtLoader(file_path)
82
+ documents.extend(loader.load())
83
+ elif file_path.endswith(".pptx"):
84
+ loader = PPTXLoader(file_path)
85
+ documents.extend([{"page_content": doc["page_content"], "metadata": doc["metadata"]} for doc in loader.load()])
86
+ except Exception as e:
87
+ logger.error(f"Error loading file {file_path}: {str(e)}")
88
+ continue
89
  return documents
90
 
91
  # Function to process documents and create vector store
92
  def process_documents(files, chunk_size, chunk_overlap, embedding_model):
93
+ global vector_store
94
  if not files:
95
  return "Please upload at least one document.", None
96
 
97
+ # Clear existing vector store to avoid dimensionality mismatch
98
+ if os.path.exists(PERSIST_DIRECTORY):
99
+ try:
100
+ shutil.rmtree(PERSIST_DIRECTORY)
101
+ logger.info("Cleared existing ChromaDB directory.")
102
+ except Exception as e:
103
+ logger.error(f"Error clearing ChromaDB directory: {str(e)}")
104
+ return f"Error clearing vector store: {str(e)}", None
105
+
106
  # Load documents
107
  documents = load_documents(files)
108
  if not documents:
109
+ return "No valid documents loaded. Check file formats or content.", None
110
 
111
  # Split documents
112
+ try:
113
+ text_splitter = RecursiveCharacterTextSplitter(
114
+ chunk_size=int(chunk_size),
115
+ chunk_overlap=int(chunk_overlap),
116
+ length_function=len
117
+ )
118
+ doc_splits = text_splitter.split_documents(documents)
119
+ logger.info(f"Split {len(documents)} documents into {len(doc_splits)} chunks.")
120
+ except Exception as e:
121
+ logger.error(f"Error splitting documents: {str(e)}")
122
+ return f"Error splitting documents: {str(e)}", None
123
 
124
  # Create embeddings
125
+ try:
126
+ embeddings = HuggingFaceEmbeddings(model_name=EMBEDDING_MODELS[embedding_model])
127
+ except Exception as e:
128
+ logger.error(f"Error initializing embeddings for {embedding_model}: {str(e)}")
129
+ return f"Error initializing embeddings: {str(e)}", None
130
 
131
  # Create vector store
132
  try:
133
+ vector_store = Chroma.from_documents(doc_splits, embeddings, persist_directory=PERSIST_DIRECTORY)
134
  return f"Processed {len(documents)} documents into {len(doc_splits)} chunks.", None
135
  except Exception as e:
136
+ logger.error(f"Error creating vector store: {str(e)}")
137
+ return f"Error creating vector store: {str(e)}", None
138
 
139
  # Function to initialize QA chain
140
  def initialize_qa_chain(llm_model, temperature):
141
  global qa_chain
142
+ if not vector_store:
143
+ return "Please process documents first.", None
144
+
145
  try:
146
  llm = HuggingFaceEndpoint(
147
  repo_id=LLM_MODELS[llm_model],
148
+ task="text-generation",
149
  temperature=float(temperature),
150
+ max_new_tokens=512,
151
  huggingfacehub_api_token=os.environ["HUGGINGFACEHUB_API_TOKEN"]
152
  )
153
  qa_chain = ConversationalRetrievalChain.from_llm(
 
155
  retriever=vector_store.as_retriever(search_kwargs={"k": 3}),
156
  memory=memory
157
  )
158
+ logger.info(f"Initialized QA chain with {llm_model}.")
159
  return "QA chain initialized successfully.", None
160
  except Exception as e:
161
+ logger.error(f"Error initializing QA chain for {llm_model}: {str(e)}")
162
+ return f"Error initializing QA chain: {str(e)}. Ensure your HF token has access to {llm_model}.", None
163
 
164
  # Function to handle user query
165
  def answer_question(question, llm_model, embedding_model, temperature, chunk_size, chunk_overlap):
166
  global chat_history
167
+ if not vector_store:
168
+ return "Please process documents first.", chat_history
169
+ if not qa_chain:
170
+ return "Please initialize the QA chain.", chat_history
171
+ if not question.strip():
172
+ return "Please enter a valid question.", chat_history
173
 
174
  try:
175
  response = qa_chain({"question": question})["answer"]
176
  chat_history.append(("User", question))
177
  chat_history.append(("Bot", response))
178
+ logger.info(f"Answered question: {question}")
179
  return response, chat_history
180
  except Exception as e:
181
+ logger.error(f"Error answering question: {str(e)}")
182
  return f"Error answering question: {str(e)}", chat_history
183
 
184
  # Function to export chat history
185
  def export_chat():
186
  if not chat_history:
187
  return "No chat history to export.", None
188
+ try:
189
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
190
+ filename = f"chat_history_{timestamp}.txt"
191
+ with open(filename, "w") as f:
192
+ for role, message in chat_history:
193
+ f.write(f"{role}: {message}\n\n")
194
+ logger.info(f"Exported chat history to {filename}.")
195
+ return f"Chat history exported to {filename}.", filename
196
+ except Exception as e:
197
+ logger.error(f"Error exporting chat history: {str(e)}")
198
+ return f"Error exporting chat history: {str(e)}", None
199
 
200
  # Function to reset the app
201
  def reset_app():
202
  global vector_store, qa_chain, chat_history, memory
203
+ try:
204
+ vector_store = None
205
+ qa_chain = None
206
+ chat_history = []
207
+ memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
208
+ if os.path.exists(PERSIST_DIRECTORY):
209
+ shutil.rmtree(PERSIST_DIRECTORY)
210
+ logger.info("Cleared ChromaDB directory on reset.")
211
+ logger.info("App reset successfully.")
212
+ return "App reset successfully.", None
213
+ except Exception as e:
214
+ logger.error(f"Error resetting app: {str(e)}")
215
+ return f"Error resetting app: {str(e)}", None
216
 
217
  # Gradio interface
218
  with gr.Blocks(theme=gr.themes.Soft(), title="DocTalk: Document Q&A Chatbot") as demo:
 
230
  with gr.Column(scale=1):
231
  llm_model = gr.Dropdown(choices=list(LLM_MODELS.keys()), label="Select LLM Model", value="Lightweight (Gemma-2B)")
232
  embedding_model = gr.Dropdown(choices=list(EMBEDDING_MODELS.keys()), label="Select Embedding Model", value="Lightweight (MiniLM-L6)")
233
+ temperature = gr.Slider(minimum=0.1, maximum=1.0, step=0.1, value=0.7, label="Temperature")
234
  chunk_size = gr.Slider(minimum=500, maximum=2000, step=100, value=1000, label="Chunk Size")
235
  chunk_overlap = gr.Slider(minimum=0, maximum=500, step=50, value=100, label="Chunk Overlap")
236
  init_button = gr.Button("Initialize QA Chain")