Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
import logging | |
from fastapi import FastAPI | |
import gradio as gr | |
from gradio.routes import mount_gradio_app | |
from api.router import router, verify_api_key | |
from db.models import fetch_models_for_group | |
from models.loader import load_models, model_pipelines | |
from config.settings import RESOURCE_GROUP | |
# Configuration de base des logs | |
logging.basicConfig( | |
level=logging.INFO, | |
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", | |
) | |
logger = logging.getLogger(__name__) | |
# Créer l'application FastAPI | |
app = FastAPI( | |
title="Tamis AI Inference API", | |
description="API pour l'inférence des modèles de classification d'objets", | |
version="0.1.0", | |
) | |
# Ajouter middleware d'authentification | |
app.middleware("http")(verify_api_key) | |
# Inclure les routes | |
app.include_router(router) | |
async def init_models(): | |
"""Charger les modèles au démarrage pour Gradio et FastAPI.""" | |
logger.info("Initializing models for Gradio and FastAPI...") | |
try: | |
models_data = await fetch_models_for_group(RESOURCE_GROUP) | |
await load_models(models_data) | |
logger.info("Models loaded successfully.") | |
except Exception as e: | |
logger.error(f"Failed to initialize models: {e}", exc_info=True) | |
# Decide if the app should fail to start or continue without models | |
# raise RuntimeError("Model initialization failed.") | |
# For now, let's log and continue, Gradio will show an empty list | |
pass | |
# Définir la fonction pour Gradio qui récupère les modèles chargés | |
def get_loaded_models_list(): | |
"""Retourne la liste des noms de modèles actuellement chargés.""" | |
return list(model_pipelines.keys()) | |
# Créer l'interface Gradio | |
gradio_app = gr.Blocks(title="Tamis AI - Modèles Chargés") | |
with gradio_app: | |
gr.Markdown("## Modèles actuellement chargés dans l'API") | |
gr.JSON(get_loaded_models_list, label="Modèles Chargés") | |
# Monter l'application Gradio à la racine dans FastAPI | |
app = mount_gradio_app( | |
app, gradio_app, path="/" | |
) | |
# Event startup to load models (ensure it runs *after* Gradio is mounted if needed) | |
# We call init_models inside startup | |
async def startup(): | |
"""Initialiser l'API : charger les modèles depuis la base de données.""" | |
await init_models() # Call the consolidated init function | |
async def health_check(): | |
"""Point d'entrée pour vérifier l'état de l'API.""" | |
return {"status": "healthy"} | |