logasanjeev commited on
Commit
60ae86a
·
verified ·
1 Parent(s): fec03fd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -133,14 +133,14 @@ def process_documents(files, chunk_size, chunk_overlap, embedding_model):
133
 
134
  # Create vector store
135
  try:
136
- # Reset ChromaDB client to avoid tenant issues
 
137
  client = chromadb.PersistentClient(path=PERSIST_DIRECTORY)
138
- client.reset()
139
  vector_store = Chroma.from_documents(
140
  documents=doc_splits,
141
  embedding=embeddings,
142
  persist_directory=PERSIST_DIRECTORY,
143
- collection_name="doctalk_collection"
144
  )
145
  return f"Processed {len(documents)} documents into {len(doc_splits)} chunks.", None
146
  except Exception as e:
@@ -167,7 +167,7 @@ def initialize_qa_chain(llm_model, temperature):
167
  memory=memory
168
  )
169
  logger.info(f"Initialized QA chain with {llm_model}.")
170
- return "QA chain initialized successfully.", None
171
  except Exception as e:
172
  logger.error(f"Error initializing QA chain for {llm_model}: {str(e)}")
173
  return f"Error initializing QA chain: {str(e)}. Ensure your HF token has access to {llm_model}.", None
 
133
 
134
  # Create vector store
135
  try:
136
+ # Use a unique collection name to avoid conflicts
137
+ collection_name = f"doctalk_collection_{int(time.time())}"
138
  client = chromadb.PersistentClient(path=PERSIST_DIRECTORY)
 
139
  vector_store = Chroma.from_documents(
140
  documents=doc_splits,
141
  embedding=embeddings,
142
  persist_directory=PERSIST_DIRECTORY,
143
+ collection_name=collection_name
144
  )
145
  return f"Processed {len(documents)} documents into {len(doc_splits)} chunks.", None
146
  except Exception as e:
 
167
  memory=memory
168
  )
169
  logger.info(f"Initialized QA chain with {llm_model}.")
170
+ return "QA Doctor: QA chain initialized successfully.", None
171
  except Exception as e:
172
  logger.error(f"Error initializing QA chain for {llm_model}: {str(e)}")
173
  return f"Error initializing QA chain: {str(e)}. Ensure your HF token has access to {llm_model}.", None