Kokoro-FastAPI/api/src/main.py

112 lines
2.8 KiB
Python
Raw Normal View History

"""
FastAPI OpenAI Compatible API
"""
import sys
2025-01-09 18:41:44 -07:00
from contextlib import asynccontextmanager
import uvicorn
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
2025-01-13 20:15:46 -07:00
from loguru import logger
from .core.config import settings
2025-01-09 18:41:44 -07:00
from .routers.development import router as dev_router
from .routers.openai_compatible import router as openai_router
2025-01-13 20:15:46 -07:00
from .services.tts_model import TTSModel
from .services.tts_service import TTSService
def setup_logger():
"""Configure loguru logger with custom formatting"""
config = {
"handlers": [
{
"sink": sys.stdout,
"format": "<fg #2E8B57>{time:hh:mm:ss A}</fg #2E8B57> | "
2025-01-09 18:41:44 -07:00
"{level: <8} | "
"{message}",
"colorize": True,
2025-01-09 18:41:44 -07:00
"level": "INFO",
},
],
}
logger.remove()
logger.configure(**config)
logger.level("ERROR", color="<red>")
# Configure logger
setup_logger()
@asynccontextmanager
async def lifespan(app: FastAPI):
"""Lifespan context manager for model initialization"""
logger.info("Loading TTS model and voice packs...")
# Initialize the main model with warm-up
voicepack_count = await TTSModel.setup()
2025-01-04 17:55:36 -07:00
# boundary = "█████╗"*9
2025-01-13 20:18:02 -07:00
boundary = "" * 2*12
2025-01-09 18:41:44 -07:00
startup_msg = f"""
2025-01-04 17:55:36 -07:00
{boundary}
2025-01-04 22:23:59 -07:00
2025-01-04 17:55:36 -07:00
{boundary}
"""
# TODO: Improve CPU warmup, threads, memory, etc
startup_msg += f"\nModel warmed up on {TTSModel.get_device()}"
startup_msg += f"\n{voicepack_count} voice packs loaded\n"
2025-01-04 17:55:36 -07:00
startup_msg += f"\n{boundary}\n"
logger.info(startup_msg)
yield
# Initialize FastAPI app
app = FastAPI(
title=settings.api_title,
description=settings.api_description,
version=settings.api_version,
lifespan=lifespan,
openapi_url="/openapi.json", # Explicitly enable OpenAPI schema
)
# Add CORS middleware
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Include routers
app.include_router(openai_router, prefix="/v1")
app.include_router(dev_router) # New development endpoints
# app.include_router(text_router) # Deprecated but still live for backwards compatibility
# Health check endpoint
@app.get("/health")
async def health_check():
"""Health check endpoint"""
return {"status": "healthy"}
@app.get("/v1/test")
async def test_endpoint():
"""Test endpoint to verify routing"""
return {"status": "ok"}
if __name__ == "__main__":
uvicorn.run("api.src.main:app", host=settings.host, port=settings.port, reload=True)