Kokoro-FastAPI/api/src/main.py

116 lines
2.9 KiB
Python
Raw Normal View History

"""
FastAPI OpenAI Compatible API
"""
from contextlib import asynccontextmanager
import sys
import uvicorn
from loguru import logger
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from .core.config import settings
from .services.tts_model import TTSModel
from .services.tts_service import TTSService
from .routers.openai_compatible import router as openai_router
from .routers.development import router as dev_router
def setup_logger():
"""Configure loguru logger with custom formatting"""
config = {
"handlers": [
{
"sink": sys.stdout,
"format": "<fg #2E8B57>{time:hh:mm:ss A}</fg #2E8B57> | "
"{level: <8} | "
"{message}",
"colorize": True,
"level": "INFO"
},
],
}
# Remove default logger
logger.remove()
# Add our custom logger
logger.configure(**config)
# Override error colors
logger.level("ERROR", color="<red>")
# Configure logger
setup_logger()
@asynccontextmanager
async def lifespan(app: FastAPI):
"""Lifespan context manager for model initialization"""
logger.info("Loading TTS model and voice packs...")
# Initialize the main model with warm-up
voicepack_count = await TTSModel.setup()
2025-01-04 17:55:36 -07:00
# boundary = "█████╗"*9
boundary = "" * 24
2025-01-04 17:55:36 -07:00
startup_msg =f"""
2025-01-04 17:55:36 -07:00
{boundary}
2025-01-04 22:23:59 -07:00
2025-01-04 17:55:36 -07:00
{boundary}
"""
# TODO: Improve CPU warmup, threads, memory, etc
startup_msg += f"\nModel warmed up on {TTSModel.get_device()}"
startup_msg += f"\n{voicepack_count} voice packs loaded\n"
2025-01-04 17:55:36 -07:00
startup_msg += f"\n{boundary}\n"
logger.info(startup_msg)
yield
# Initialize FastAPI app
app = FastAPI(
title=settings.api_title,
description=settings.api_description,
version=settings.api_version,
lifespan=lifespan,
openapi_url="/openapi.json", # Explicitly enable OpenAPI schema
)
# Add CORS middleware
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Include routers
app.include_router(openai_router, prefix="/v1")
app.include_router(dev_router) # New development endpoints
# app.include_router(text_router) # Deprecated but still live for backwards compatibility
# Health check endpoint
@app.get("/health")
async def health_check():
"""Health check endpoint"""
return {"status": "healthy"}
@app.get("/v1/test")
async def test_endpoint():
"""Test endpoint to verify routing"""
return {"status": "ok"}
if __name__ == "__main__":
uvicorn.run("api.src.main:app", host=settings.host, port=settings.port, reload=True)