LLMServer / main /main.py
AurelioAguirre's picture
Setting logging level to trace
9fb6014
raw
history blame
1.46 kB
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
import logging
import os
import uvicorn
from .routes import router
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Initialize FastAPI with simplified configuration
app = FastAPI(
title="LLM Engine Service",
docs_url="/docs",
redoc_url="/redoc",
openapi_url="/openapi.json"
)
# Add CORS middleware
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Include the router from routes.py
app.include_router(router)
def main():
# Load environment variables or configuration here
host = os.getenv("LLM_ENGINE_HOST", "0.0.0.0")
port = int(os.getenv("LLM_ENGINE_PORT", "7860")) # Default to 7860 for Spaces
# Log startup information
logger.info(f"Starting LLM Engine service on {host}:{port}, or: ")
logger.info("Available endpoints:")
logger.info(" - /")
logger.info(" - /health")
logger.info(" - /initialize")
logger.info(" - /generate")
logger.info(" - /initialize/custom")
logger.info(" - /generate/stream")
logger.info(" - /docs")
logger.info(" - /redoc")
logger.info(" - /openapi.json")
# Start the server
uvicorn.run(
app,
host=host,
port=port,
log_level="trace"
)
if __name__ == "__main__":
main()