Spaces:
Sleeping
Sleeping
File size: 1,738 Bytes
44a025a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 |
import uvicorn
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from contextlib import asynccontextmanager
from app.routes.embedding_routes import router as embedding_router
from app.routes.prediction_routes import router as prediction_router
from app.services.model_service import initialize_model, load_embeddings
@asynccontextmanager
async def lifespan(app: FastAPI):
"""
Initialize model and load embeddings when the application starts.
"""
print("Initializing model...")
initialize_model()
print("Loading embeddings...")
question_embeddings, answer_embeddings, qa_data = load_embeddings()
if (
question_embeddings is not None
and answer_embeddings is not None
and qa_data is not None
):
print(f"Embeddings loaded successfully. {len(qa_data)} QA pairs available.")
else:
print(
"No embeddings found. Please use the /api/create-embeddings endpoint to create embeddings."
)
yield
# Cleanup code (if any) would go here
app = FastAPI(
title="Taiken Chatbot API",
description="API for embedding creation and answer prediction",
version="1.0.0",
lifespan=lifespan,
)
# Add CORS middleware
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Include routers
app.include_router(embedding_router, prefix="/api", tags=["Embedding"])
app.include_router(prediction_router, prefix="/api", tags=["Prediction"])
@app.get("/", tags=["Health"])
async def root():
return {"message": "Hello World"}
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000, reload=True)
|