File size: 2,582 Bytes
31c0a7e
 
1fa889b
b10d25e
 
 
 
 
 
 
 
 
 
 
 
980db68
b10d25e
4824e79
b10d25e
6df562f
5175e14
 
 
31c0a7e
0408a46
5cccab7
 
1fa889b
5cccab7
 
1fa889b
 
5cccab7
7cb6f95
5cccab7
 
 
 
 
 
 
7cb6f95
1fa889b
 
7cb6f95
1fa889b
5cccab7
 
7cb6f95
5cccab7
7cb6f95
5cccab7
1fa889b
5cccab7
0408a46
5175e14
31c0a7e
 
0408a46
31c0a7e
 
 
 
 
 
0408a46
31c0a7e
0408a46
 
b00a55e
0408a46
3626254
31c0a7e
1fa889b
31c0a7e
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
from flask import Flask, request, jsonify
from huggingface_hub import InferenceClient
from gunicorn.glogging import Logger
class Config:
    loglevel = "info"
    # errorlog = "error.log"  # Log file for errors
    # accesslog = "access.log"  # Log file for access logs
    syslog = False
    syslog_facility = "daemon"
    syslog_prefix = "gunicorn"
    capture_output = True
    logconfig = None
    logconfig_json = None
    logconfig_dict = None
    disable_redirect_access_to_syslog = False

cfg = Config()
app = Flask(__name__)
logger = Logger(cfg)
client = InferenceClient("meta-llama/Llama-3.1-8B-Instruct")
DEFAULT_MAX_TOKENS = 512
DEFAULT_TEMPERATURE = 0.7
DEFAULT_TOP_P = 0.95

def generate_journal_suggestion(current_page):
    try:
        suggestion_prompt = (
            f"""Pe baza înregistrării din jurnal: '{current_page}', generează o singură întrebare pe care utilizatorul ar putea să și-o pună într-un jurnal.
            Întrebarea ar trebui să încurajeze reflecția personală mai profundă, explorarea sentimentelor sau clarificarea obiectivelor."""
        )
        logger.info("Generated suggestion prompt: %s", suggestion_prompt)

        suggestion_response = ""
        response_stream = client.chat_completion(
            [
                {"role": "user", "content": suggestion_prompt}
            ],
            max_tokens=150,
            stream=True,
            temperature=DEFAULT_TEMPERATURE,
            top_p=DEFAULT_TOP_P,
        )
        logger.info("Response stream received.")

        for message in response_stream:
            logger.info("Message received: %s", message)
            token = message.choices[0].delta.content
            suggestion_response += token

        return suggestion_response

    except Exception as e:
        logger.error("An error occurred: %s", e)
        return jsonify({"error": str(e)}), 500

@app.route("/", methods=["POST", "GET"])
def home():
    return "Hi!"

@app.route("/chat", methods=["POST"])
def chat():
    try:
        data = request.json
        message = data.get("message", "")
        system_message = data.get("system_message", "You are a friendly chatbot.")
        journal_page = data.get("journal_page", "")

        suggestion = ""
        if journal_page:
            suggestion = generate_journal_suggestion(journal_page)

        return jsonify({"journal_suggestion": suggestion})
    except Exception as e:
        logger.error("Error in chat endpoint: %s", e)
        return jsonify({"error": str(e)}), 500

if __name__ == "__main__":
    app.run(debug=True)