psybot-api / app.py
fatmata's picture
Update app.py
7090deb verified
raw
history blame
774 Bytes
from fastapi import FastAPI
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
app = FastAPI()
# Définir le chemin correct vers ton modèle
MODEL_PATH = "fatmata/psyboy/psybot_model" # Remplace par le bon chemin
# Charger le modèle et le tokenizer
tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
model = AutoModelForCausalLM.from_pretrained(MODEL_PATH)
@app.get("/")
def read_root():
return {"message": "Hello from PsyBot API!"}
@app.post("/generate/")
def generate_response(prompt: str):
inputs = tokenizer(prompt, return_tensors="pt")
with torch.no_grad():
output = model.generate(**inputs, max_length=150)
response = tokenizer.decode(output[0], skip_special_tokens=True)
return {"response": response}