Spaces:
Sleeping
Sleeping
import uvicorn | |
from fastapi import FastAPI | |
from fastapi.middleware.cors import CORSMiddleware | |
from contextlib import asynccontextmanager | |
from app.routes.embedding_routes import router as embedding_router | |
from app.routes.prediction_routes import router as prediction_router | |
from app.services.model_service import initialize_model, load_embeddings | |
async def lifespan(app: FastAPI): | |
""" | |
Initialize model and load embeddings when the application starts. | |
""" | |
print("Initializing model...") | |
initialize_model() | |
print("Loading embeddings...") | |
question_embeddings, answer_embeddings, qa_data = load_embeddings() | |
if ( | |
question_embeddings is not None | |
and answer_embeddings is not None | |
and qa_data is not None | |
): | |
print(f"Embeddings loaded successfully. {len(qa_data)} QA pairs available.") | |
else: | |
print( | |
"No embeddings found. Please use the /api/create-embeddings endpoint to create embeddings." | |
) | |
yield | |
# Cleanup code (if any) would go here | |
app = FastAPI( | |
title="Taiken Chatbot API", | |
description="API for embedding creation and answer prediction", | |
version="1.0.0", | |
lifespan=lifespan, | |
) | |
# Add CORS middleware | |
app.add_middleware( | |
CORSMiddleware, | |
allow_origins=["*"], | |
allow_credentials=True, | |
allow_methods=["*"], | |
allow_headers=["*"], | |
) | |
# Include routers | |
app.include_router(embedding_router, prefix="/api", tags=["Embedding"]) | |
app.include_router(prediction_router, prefix="/api", tags=["Prediction"]) | |
async def root(): | |
return {"message": "Hello World"} | |
if __name__ == "__main__": | |
uvicorn.run(app, host="0.0.0.0", port=8000, reload=True) | |