Spaces:
Paused
Paused
File size: 1,772 Bytes
31c0a7e 647defb 5384fa5 4824e79 5175e14 647defb 5175e14 31c0a7e 0408a46 5cccab7 1fa889b 5cccab7 5384fa5 647defb 7cb6f95 647defb 5cccab7 4cb7b20 0408a46 5384fa5 5175e14 31c0a7e 0408a46 31c0a7e d8173af 31c0a7e d8173af 0408a46 d8173af 31c0a7e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
from flask import Flask, request, jsonify
from transformers import AutoModelForCausalLM, AutoTokenizer
MODEL_NAME = "meta-llama/Llama-3.1-8B-Instruct"
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME).to("cpu")
app = Flask(__name__)
DEFAULT_TEMPERATURE = 0.7
DEFAULT_MAX_TOKENS = 150
DEFAULT_TOP_P = 0.95
def generate_journal_suggestion(current_page):
try:
suggestion_prompt = (
f"""Pe baza înregistrării din jurnal: '{current_page}', generează o singură întrebare pe care utilizatorul ar putea să și-o pună într-un jurnal.
Întrebarea ar trebui să încurajeze reflecția personală mai profundă, explorarea sentimentelor sau clarificarea obiectivelor."""
)
input_ids = tokenizer(suggestion_prompt, return_tensors="pt").input_ids.to("cpu")
output_ids = model.generate(
input_ids,
max_length=DEFAULT_MAX_TOKENS,
temperature=DEFAULT_TEMPERATURE,
top_p=DEFAULT_TOP_P,
do_sample=True,
)
suggestion_response = tokenizer.decode(output_ids[0], skip_special_tokens=True)
except Exception as e:
return f"Error: {str(e)}"
@app.route("/", methods=["POST", "GET"])
def home():
return "Hi!"
@app.route("/chat", methods=["POST"])
def chat():
data = request.json
message = data.get("message", "")
system_message = data.get("system_message", "You are a friendly chatbot.")
journal_page = data.get("journal_page", "")
suggestion = ""
if journal_page:
suggestion = generate_journal_suggestion(journal_page)
return jsonify({"journal_suggestion": suggestion})
if __name__ == "__main__":
app.run(debug=True)
|