fatmata commited on
Commit
843b024
·
verified ·
1 Parent(s): c6dcbc7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -27
app.py CHANGED
@@ -1,32 +1,22 @@
1
- import os
2
- from flask import Flask, request, jsonify
3
- from transformers import AutoTokenizer, AutoModelForCausalLM
4
 
5
- # 🚀 Éviter le problème de cache dans Hugging Face Spaces
6
- os.environ["HF_HOME"] = "/app/huggingface_cache"
7
 
8
- # 📌 Charger le modèle depuis Hugging Face
9
- MODEL_NAME = "fatmata/psybot"
10
- tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
11
- model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
12
 
13
- # 🌐 Créer l'application Flask
14
- app = Flask(__name__)
 
15
 
16
- @app.route('/chat', methods=['POST'])
17
- def chat():
18
- """ Endpoint pour générer une réponse """
19
- data = request.json
20
- user_input = data.get("message", "")
21
 
22
- if not user_input:
23
- return jsonify({"error": "Aucun message fourni"}), 400
24
-
25
- input_ids = tokenizer.encode(user_input, return_tensors="pt")
26
- output = model.generate(input_ids, max_length=150, num_return_sequences=1)
27
- response_text = tokenizer.decode(output[0], skip_special_tokens=True)
28
-
29
- return jsonify({"response": response_text})
30
-
31
- if __name__ == '__main__':
32
- app.run(host='0.0.0.0', port=int(os.environ.get("PORT", 7860)), debug=False)
 
1
+ from fastapi import FastAPI
2
+ from pydantic import BaseModel
3
+ from transformers import pipeline
4
 
5
+ app = FastAPI()
 
6
 
7
+ # Charger le modèle Hugging Face
8
+ model_name = "fatmata/psybot"
9
+ generator = pipeline("text-generation", model=model_name)
 
10
 
11
+ # Définition du format des requêtes
12
+ class TextRequest(BaseModel):
13
+ prompt: str
14
 
15
+ @app.get("/")
16
+ def home():
17
+ return {"message": "Bienvenue sur l'API PsyBot !"}
 
 
18
 
19
+ @app.post("/generate/")
20
+ def generate_text(request: TextRequest):
21
+ response = generator(request.prompt, max_length=100, do_sample=True)
22
+ return {"response": response[0]["generated_text"]}