File size: 3,371 Bytes
0031a0c
 
 
 
 
0282d73
8fd8ae7
 
0031a0c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9c9bd93
0031a0c
 
 
 
 
8fd8ae7
 
 
 
9c9bd93
0031a0c
 
 
 
 
 
8fd8ae7
 
 
 
 
 
9c9bd93
0282d73
 
0031a0c
 
 
 
9c9bd93
0282d73
 
 
 
0031a0c
 
cfa4af6
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
from bot import llm
# from tools import model_llm_rag
from flask import Flask, request, jsonify
# from langchain_core.messages import HumanMessage, AIMessage
# from langchain.prompts import ChatPromptTemplate
from deep_translator import GoogleTranslator
from database.database_util import get_user_memory
from datetime import datetime

app = Flask(__name__)

PROMPT_TEMPLATE = """
You are Panda, a hemodialysis chatbot that assists patients with their treatment. You are assisting a patient named {username}. Greet them appropriately based on the current time of day (good morning, good afternoon, or good evening).

Since this is the first conversation of the day, you must remind the patient about their hemodialysis schedule by identifying the next upcoming session based on the current date and time. Ensure you use the current date and time to provide the correct information, whether the next session is today or later in the week.

Ask the patient if they have any questions or concerns about their treatment.

---
Current day: Monday

Current date: 2024/05/12

Current time: 07:00:00 AM

Hemodialysis Schedules: Tuesday and Friday at 08:00 AM

Patient's Name: {username}
---
Output:

"""

# @app.route('/first_message', methods=['POST'])
# def first_message():
#     data = request.json
#     username = data.get('username')
#     current_date_time = data.get('current_date_time')
#     prompt_template = ChatPromptTemplate.from_template(PROMPT_TEMPLATE)
#     prompt = prompt_template.format(username=username)
#     response = model_llm_rag.invoke(prompt)
#     translate = GoogleTranslator(source='en', target='id').translate(response)
#     llm.memory.chat_memory.add_ai_message(AIMessage(response))
#     return jsonify({"response": translate})

def simulate_llm_query(user_input, username, schedule_one, schedule_two, liquid_intake, temperature, blood_pressure, food_intake, name):
    """
    Simulates querying a language model.
    Replace this function's logic with actual LLM querying.
    """
    # Placeholder response logic, replace with actual LLM integration
    memory = get_user_memory(username)

    now = datetime.now()
    current_time = now.strftime("%A, %I:%M %p")
    return llm.query(user_input, username, memory, schedule_one, schedule_two, liquid_intake, temperature, blood_pressure, food_intake, current_time, name)

@app.route('/query', methods=['POST'])
def query_llm():
    data = request.json
    user_input = data.get('input')
    username = data.get('username')
    schedule_one = str(data.get('schedule_one'))
    schedule_two = str(data.get('schedule_two'))
    liquid_intake = str(data.get('liquid_intake'))
    temperature = str(data.get('temperature'))
    blood_pressure = str(data.get('blood_pressure'))
    food_intake = str(data.get('food_intake'))
    name = str(data.get('name'))

    input_translate = GoogleTranslator(source='id', target='en').translate(user_input)
    
    if not user_input:
        return jsonify({"error": "No input provided"}), 400
    
    response = simulate_llm_query(input_translate, username, schedule_one, schedule_two, liquid_intake, temperature, blood_pressure, food_intake, name)

    output_translate = GoogleTranslator(source='en', target='id').translate(response)

    return jsonify({"response": output_translate})

if __name__ == '__main__':
    app.run(host='0.0.0.0', port=7860)