Update app.py
Browse files
app.py
CHANGED
@@ -145,25 +145,18 @@ def initialize_llm():
|
|
145 |
try:
|
146 |
print("Starting LLM initialization...")
|
147 |
|
148 |
-
api_token = os.environ.get('
|
149 |
if not api_token:
|
150 |
raise ValueError("HUGGINGFACE_API_TOKEN not found in environment variables")
|
151 |
-
print("API token found")
|
152 |
-
|
153 |
-
# Initialize with custom LLM
|
154 |
-
llm = CustomHuggingFaceInference(token=api_token)
|
155 |
-
|
156 |
-
# Test the LLM
|
157 |
-
print("Testing LLM with a simple prompt...")
|
158 |
-
test_response = llm("Hello, please reply with a short greeting.")
|
159 |
-
print(f"Test response received: {test_response}")
|
160 |
|
|
|
|
|
|
|
|
|
161 |
return llm
|
162 |
|
163 |
except Exception as e:
|
164 |
print(f"Error initializing LLM: {str(e)}")
|
165 |
-
import traceback
|
166 |
-
print(f"Traceback: {traceback.format_exc()}")
|
167 |
raise
|
168 |
|
169 |
|
@@ -472,90 +465,48 @@ def get_chat_list():
|
|
472 |
|
473 |
@app.route("/api/chat", methods=["POST"])
|
474 |
def chat():
|
475 |
-
print("Entering chat endpoint") # Log entry point
|
476 |
-
|
477 |
-
if not llm_chain:
|
478 |
-
print("Error: LLM chain not initialized")
|
479 |
-
return jsonify({
|
480 |
-
"success": False,
|
481 |
-
"response": "System is still initializing. Please try again in a moment."
|
482 |
-
})
|
483 |
-
|
484 |
try:
|
485 |
-
# Log incoming request
|
486 |
data = request.json
|
487 |
-
print(f"Received request data: {data}")
|
488 |
-
|
489 |
user_input = data.get("message", "")
|
490 |
session_id = data.get("sessionId", "default")
|
491 |
-
print(f"Processing message for session {session_id}: {user_input[:100]}...")
|
492 |
|
493 |
-
|
494 |
-
|
495 |
-
|
|
|
496 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
497 |
try:
|
498 |
-
|
499 |
-
print("
|
500 |
-
|
501 |
-
update_chat_metadata(session_id, user_input)
|
502 |
-
|
503 |
-
# Get memory variables
|
504 |
-
print("Getting memory variables...")
|
505 |
-
memory_vars = session.get_memory_variables()
|
506 |
-
print(f"Memory variables loaded: {str(memory_vars)[:200]}...")
|
507 |
-
|
508 |
-
# Generate response
|
509 |
-
print("Generating LLM response...")
|
510 |
-
raw_response = llm_chain.run(
|
511 |
-
user_request=user_input,
|
512 |
-
chat_history=memory_vars.get("chat_history", ""),
|
513 |
-
important_info="\n".join(session.important_info)
|
514 |
-
)
|
515 |
-
print(f"Raw response received: {raw_response[:200]}...")
|
516 |
-
|
517 |
-
# Extract important information
|
518 |
-
print("Extracting important info...")
|
519 |
-
new_important_info = extract_important_info(raw_response)
|
520 |
-
print(f"Extracted important info: {new_important_info}")
|
521 |
-
|
522 |
-
for info in new_important_info:
|
523 |
-
session.add_important_info(info)
|
524 |
-
|
525 |
-
# Format the response
|
526 |
-
print("Formatting response...")
|
527 |
-
formatted_response = format_response(raw_response)
|
528 |
-
|
529 |
-
# Store the response
|
530 |
-
print("Storing assistant response...")
|
531 |
-
session.add_message("assistant", formatted_response)
|
532 |
-
|
533 |
-
print("Successfully completed chat processing")
|
534 |
return jsonify({
|
535 |
-
"response": formatted_response,
|
536 |
"success": True,
|
537 |
-
"
|
538 |
})
|
539 |
|
540 |
-
except Exception as
|
541 |
-
print(f"
|
542 |
-
|
543 |
-
|
544 |
-
|
|
|
545 |
|
546 |
except Exception as e:
|
547 |
-
import traceback
|
548 |
-
error_trace = traceback.format_exc()
|
549 |
print(f"Error in chat endpoint: {str(e)}")
|
550 |
-
print(f"Full error traceback: {error_trace}")
|
551 |
-
|
552 |
return jsonify({
|
553 |
-
"response": f"An error occurred: {str(e)}",
|
554 |
"success": False,
|
555 |
-
"
|
556 |
})
|
557 |
|
558 |
|
|
|
559 |
@app.route("/api/new-chat", methods=["POST"])
|
560 |
def new_chat():
|
561 |
"""Create a new chat session"""
|
|
|
145 |
try:
|
146 |
print("Starting LLM initialization...")
|
147 |
|
148 |
+
api_token = os.environ.get('HUGGINGFACE_API_TOKEN')
|
149 |
if not api_token:
|
150 |
raise ValueError("HUGGINGFACE_API_TOKEN not found in environment variables")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
151 |
|
152 |
+
llm = HuggingFaceHub(
|
153 |
+
repo_id="mistralai/Mistral-7B-Instruct-v0.1",
|
154 |
+
huggingfacehub_api_token=api_token
|
155 |
+
)
|
156 |
return llm
|
157 |
|
158 |
except Exception as e:
|
159 |
print(f"Error initializing LLM: {str(e)}")
|
|
|
|
|
160 |
raise
|
161 |
|
162 |
|
|
|
465 |
|
466 |
@app.route("/api/chat", methods=["POST"])
|
467 |
def chat():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
468 |
try:
|
|
|
469 |
data = request.json
|
|
|
|
|
470 |
user_input = data.get("message", "")
|
471 |
session_id = data.get("sessionId", "default")
|
|
|
472 |
|
473 |
+
print(f"Received message: {user_input}")
|
474 |
+
|
475 |
+
# Prepare the prompt
|
476 |
+
prompt = f"""Please provide a helpful response to: {user_input}
|
477 |
|
478 |
+
Guidelines:
|
479 |
+
1. If code is needed, use ```python for code blocks
|
480 |
+
2. Be clear and concise
|
481 |
+
3. If you don't know something, say so
|
482 |
+
"""
|
483 |
+
|
484 |
+
# Get response from LLM
|
485 |
try:
|
486 |
+
response = llm(prompt)
|
487 |
+
print(f"Generated response: {response[:100]}...")
|
488 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
489 |
return jsonify({
|
|
|
490 |
"success": True,
|
491 |
+
"response": response
|
492 |
})
|
493 |
|
494 |
+
except Exception as e:
|
495 |
+
print(f"Error generating response: {str(e)}")
|
496 |
+
return jsonify({
|
497 |
+
"success": False,
|
498 |
+
"response": "Error generating response. Please try again."
|
499 |
+
})
|
500 |
|
501 |
except Exception as e:
|
|
|
|
|
502 |
print(f"Error in chat endpoint: {str(e)}")
|
|
|
|
|
503 |
return jsonify({
|
|
|
504 |
"success": False,
|
505 |
+
"response": "An error occurred. Please try again."
|
506 |
})
|
507 |
|
508 |
|
509 |
+
|
510 |
@app.route("/api/new-chat", methods=["POST"])
|
511 |
def new_chat():
|
512 |
"""Create a new chat session"""
|