fatmata commited on
Commit
381aae1
·
verified ·
1 Parent(s): 7090deb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -14
app.py CHANGED
@@ -4,22 +4,18 @@ import torch
4
 
5
  app = FastAPI()
6
 
7
- # Définir le chemin correct vers ton modèle
8
- MODEL_PATH = "fatmata/psyboy/psybot_model" # Remplace par le bon chemin
9
-
10
- # Charger le modèle et le tokenizer
11
- tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
12
- model = AutoModelForCausalLM.from_pretrained(MODEL_PATH)
13
 
14
  @app.get("/")
15
- def read_root():
16
- return {"message": "Hello from PsyBot API!"}
17
 
18
- @app.post("/generate/")
19
- def generate_response(prompt: str):
20
  inputs = tokenizer(prompt, return_tensors="pt")
21
- with torch.no_grad():
22
- output = model.generate(**inputs, max_length=150)
23
-
24
- response = tokenizer.decode(output[0], skip_special_tokens=True)
25
  return {"response": response}
 
4
 
5
  app = FastAPI()
6
 
7
+ # Charger le modèle et le tokenizer depuis Hugging Face
8
+ MODEL_NAME = "fatmata/psybot"
9
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
10
+ model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype=torch.float32)
 
 
11
 
12
  @app.get("/")
13
+ def home():
14
+ return {"message": "Bienvenue sur l'API PsyBot !"}
15
 
16
+ @app.post("/generate")
17
+ def generate_text(prompt: str):
18
  inputs = tokenizer(prompt, return_tensors="pt")
19
+ outputs = model.generate(**inputs, max_length=100)
20
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
 
 
21
  return {"response": response}