Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -237,6 +237,7 @@ from simple_salesforce import Salesforce, SalesforceLogin
|
|
237 |
from langchain_groq import ChatGroq
|
238 |
from langchain_core.prompts import ChatPromptTemplate
|
239 |
from llama_index.core import StorageContext, VectorStoreIndex, SimpleDirectoryReader, Settings
|
|
|
240 |
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
241 |
|
242 |
# Configure logging
|
@@ -250,7 +251,7 @@ class MessageRequest(BaseModel):
|
|
250 |
# Initialize FastAPI app
|
251 |
app = FastAPI()
|
252 |
|
253 |
-
# Allow CORS requests
|
254 |
app.add_middleware(
|
255 |
CORSMiddleware,
|
256 |
allow_origins=["*"],
|
@@ -272,7 +273,7 @@ for var in required_env_vars:
|
|
272 |
|
273 |
# Initialize Groq model
|
274 |
GROQ_API_KEY = os.getenv("CHATGROQ_API_KEY")
|
275 |
-
GROQ_MODEL = "llama3-8b-8192"
|
276 |
try:
|
277 |
llm = ChatGroq(
|
278 |
model_name=GROQ_MODEL,
|
@@ -324,7 +325,7 @@ def data_ingestion_from_directory():
|
|
324 |
logger.warning("No documents found in PDF_DIRECTORY")
|
325 |
return
|
326 |
storage_context = StorageContext.from_defaults()
|
327 |
-
index = VectorStoreIndex.from_documents(documents)
|
328 |
index.storage_context.persist(persist_dir=PERSIST_DIR)
|
329 |
logger.info("Data ingestion completed successfully")
|
330 |
except Exception as e:
|
@@ -343,7 +344,7 @@ initialize() # Run initialization tasks
|
|
343 |
def handle_query(query):
|
344 |
# Prepare context from chat history
|
345 |
chat_context = ""
|
346 |
-
for past_query, response in reversed(current_chat_history[-10:]): # Limit
|
347 |
if past_query.strip():
|
348 |
chat_context += f"User asked: '{past_query}'\nBot answered: '{response}'\n"
|
349 |
|
@@ -351,25 +352,27 @@ def handle_query(query):
|
|
351 |
try:
|
352 |
storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR)
|
353 |
index = load_index_from_storage(storage_context)
|
354 |
-
query_engine = index.as_query_engine(similarity_top_k=2)
|
355 |
retrieved = query_engine.query(query)
|
356 |
doc_context = retrieved.response if hasattr(retrieved, 'response') else "No relevant documents found."
|
|
|
357 |
except Exception as e:
|
358 |
logger.error(f"Error retrieving documents: {e}")
|
359 |
doc_context = "Failed to retrieve documents."
|
360 |
|
361 |
-
# Construct the prompt with
|
362 |
prompt_template = ChatPromptTemplate.from_messages([
|
363 |
("system", """
|
364 |
-
You are
|
365 |
-
|
366 |
-
|
|
|
367 |
Document Context:
|
368 |
{doc_context}
|
369 |
-
|
370 |
Chat History:
|
371 |
{chat_context}
|
372 |
-
|
373 |
Question:
|
374 |
{query}
|
375 |
"""),
|
@@ -386,7 +389,7 @@ def handle_query(query):
|
|
386 |
|
387 |
# Update chat history
|
388 |
if len(current_chat_history) >= MAX_HISTORY_SIZE:
|
389 |
-
current_chat_history.pop(0)
|
390 |
current_chat_history.append((query, response_text))
|
391 |
return response_text
|
392 |
|
@@ -433,7 +436,7 @@ async def receive_form_data(request: Request):
|
|
433 |
data = {
|
434 |
'FirstName': first_name,
|
435 |
'LastName': last_name,
|
436 |
-
'Description': 'hii',
|
437 |
'Company': form_data.get('company', ''),
|
438 |
'Phone': form_data.get('phone', '').strip(),
|
439 |
'Email': form_data.get('email', ''),
|
@@ -459,14 +462,25 @@ async def chat(request: MessageRequest):
|
|
459 |
"timestamp": datetime.datetime.now().isoformat()
|
460 |
}
|
461 |
if len(chat_history) >= MAX_HISTORY_SIZE:
|
462 |
-
chat_history.pop(0)
|
463 |
chat_history.append(message_data)
|
464 |
logger.info(f"Chat message processed: {message}")
|
465 |
return {"response": response}
|
466 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
467 |
@app.get("/")
|
468 |
def read_root():
|
469 |
-
return {"message": "Welcome to the API"}
|
470 |
|
471 |
def split_name(full_name):
|
472 |
words = full_name.strip().split()
|
|
|
237 |
from langchain_groq import ChatGroq
|
238 |
from langchain_core.prompts import ChatPromptTemplate
|
239 |
from llama_index.core import StorageContext, VectorStoreIndex, SimpleDirectoryReader, Settings
|
240 |
+
from llama_index.core import load_index_from_storage # Added missing import
|
241 |
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
242 |
|
243 |
# Configure logging
|
|
|
251 |
# Initialize FastAPI app
|
252 |
app = FastAPI()
|
253 |
|
254 |
+
# Allow CORS requests (restrict in production)
|
255 |
app.add_middleware(
|
256 |
CORSMiddleware,
|
257 |
allow_origins=["*"],
|
|
|
273 |
|
274 |
# Initialize Groq model
|
275 |
GROQ_API_KEY = os.getenv("CHATGROQ_API_KEY")
|
276 |
+
GROQ_MODEL = "llama3-8b-8192"
|
277 |
try:
|
278 |
llm = ChatGroq(
|
279 |
model_name=GROQ_MODEL,
|
|
|
325 |
logger.warning("No documents found in PDF_DIRECTORY")
|
326 |
return
|
327 |
storage_context = StorageContext.from_defaults()
|
328 |
+
index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)
|
329 |
index.storage_context.persist(persist_dir=PERSIST_DIR)
|
330 |
logger.info("Data ingestion completed successfully")
|
331 |
except Exception as e:
|
|
|
344 |
def handle_query(query):
|
345 |
# Prepare context from chat history
|
346 |
chat_context = ""
|
347 |
+
for past_query, response in reversed(current_chat_history[-10:]): # Limit to last 10 exchanges
|
348 |
if past_query.strip():
|
349 |
chat_context += f"User asked: '{past_query}'\nBot answered: '{response}'\n"
|
350 |
|
|
|
352 |
try:
|
353 |
storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR)
|
354 |
index = load_index_from_storage(storage_context)
|
355 |
+
query_engine = index.as_query_engine(similarity_top_k=2)
|
356 |
retrieved = query_engine.query(query)
|
357 |
doc_context = retrieved.response if hasattr(retrieved, 'response') else "No relevant documents found."
|
358 |
+
logger.info(f"Retrieved documents for query '{query}': {doc_context[:100]}...")
|
359 |
except Exception as e:
|
360 |
logger.error(f"Error retrieving documents: {e}")
|
361 |
doc_context = "Failed to retrieve documents."
|
362 |
|
363 |
+
# Construct the prompt with Redferns Tech focus
|
364 |
prompt_template = ChatPromptTemplate.from_messages([
|
365 |
("system", """
|
366 |
+
You are Clara Redfernstech, a chatbot for Redferns Tech, a leader in data science, machine learning, and AI solutions.
|
367 |
+
Provide accurate, professional answers in 10-15 words based on the provided document context and chat history.
|
368 |
+
Focus on Redferns Tech's expertise in data science and AI.
|
369 |
+
|
370 |
Document Context:
|
371 |
{doc_context}
|
372 |
+
|
373 |
Chat History:
|
374 |
{chat_context}
|
375 |
+
|
376 |
Question:
|
377 |
{query}
|
378 |
"""),
|
|
|
389 |
|
390 |
# Update chat history
|
391 |
if len(current_chat_history) >= MAX_HISTORY_SIZE:
|
392 |
+
current_chat_history.pop(0)
|
393 |
current_chat_history.append((query, response_text))
|
394 |
return response_text
|
395 |
|
|
|
436 |
data = {
|
437 |
'FirstName': first_name,
|
438 |
'LastName': last_name,
|
439 |
+
'Description': 'hii',
|
440 |
'Company': form_data.get('company', ''),
|
441 |
'Phone': form_data.get('phone', '').strip(),
|
442 |
'Email': form_data.get('email', ''),
|
|
|
462 |
"timestamp": datetime.datetime.now().isoformat()
|
463 |
}
|
464 |
if len(chat_history) >= MAX_HISTORY_SIZE:
|
465 |
+
chat_history.pop(0)
|
466 |
chat_history.append(message_data)
|
467 |
logger.info(f"Chat message processed: {message}")
|
468 |
return {"response": response}
|
469 |
|
470 |
+
@app.get("/health")
|
471 |
+
async def health_check():
|
472 |
+
try:
|
473 |
+
storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR)
|
474 |
+
index = load_index_from_storage(storage_context)
|
475 |
+
logger.info("Vector index loaded successfully")
|
476 |
+
return {"status": "healthy", "pdf_ingestion": "successful"}
|
477 |
+
except Exception as e:
|
478 |
+
logger.error(f"Health check failed: {e}")
|
479 |
+
return {"status": "unhealthy", "error": str(e)}
|
480 |
+
|
481 |
@app.get("/")
|
482 |
def read_root():
|
483 |
+
return {"message": "Welcome to the Redferns Tech Chatbot API"}
|
484 |
|
485 |
def split_name(full_name):
|
486 |
words = full_name.strip().split()
|