Added ability to set log level with env vars.

This commit is contained in:
faltiska 2025-07-13 13:47:55 +03:00
parent 45113b6f41
commit 2972c98a59
7 changed files with 23 additions and 18 deletions

View file

@ -2,10 +2,8 @@
FastAPI OpenAI Compatible API
"""
import os
import sys
from contextlib import asynccontextmanager
from pathlib import Path
import torch
import uvicorn
@ -31,7 +29,6 @@ def setup_logger():
"<fg #4169E1>{module}:{line}</fg #4169E1> | "
"{message}",
"colorize": True,
"level": "DEBUG",
},
],
}

View file

@ -40,9 +40,12 @@ ENV PATH="/home/appuser/.cargo/bin:/app/.venv/bin:$PATH" \
PHONEMIZER_ESPEAK_PATH=/usr/bin \
PHONEMIZER_ESPEAK_DATA=/usr/share/espeak-ng-data \
ESPEAK_DATA_PATH=/usr/share/espeak-ng-data \
DEVICE="cpu"
DEVICE="cpu" \
DOWNLOAD_MODEL=true \
UVICORN_LOG_LEVEL=warning \
LOGURU_LEVEL=WARNING \
NUM_WORKERS=2
ENV DOWNLOAD_MODEL=true
# Download model if enabled
RUN if [ "$DOWNLOAD_MODEL" = "true" ]; then \
python download_model.py --output api/src/models/v1_0; \

View file

@ -39,9 +39,12 @@ ENV PATH="/app/.venv/bin:$PATH" \
PHONEMIZER_ESPEAK_PATH=/usr/bin \
PHONEMIZER_ESPEAK_DATA=/usr/share/espeak-ng-data \
ESPEAK_DATA_PATH=/usr/share/espeak-ng-data \
DEVICE="gpu"
ENV DOWNLOAD_MODEL=true
DEVICE="gpu" \
DOWNLOAD_MODEL=true \
UVICORN_LOG_LEVEL=warning \
LOGURU_LEVEL=WARNING \
NUM_WORKERS=2
# Download model if enabled
RUN if [ "$DOWNLOAD_MODEL" = "true" ]; then \
python download_model.py --output api/src/models/v1_0; \

View file

@ -1,8 +1,4 @@
#!/bin/bash
set -e
if [ "$DOWNLOAD_MODEL" = "true" ]; then
python download_model.py --output api/src/models/v1_0
fi
exec uv run --extra $DEVICE --no-sync python -m uvicorn api.src.main:app --host 0.0.0.0 --port 8880 --log-level debug
exec uv run --extra $DEVICE --no-sync python -m uvicorn api.src.main:app --host 0.0.0.0 --port 8880 --log-level $UVICORN_LOG_LEVEL --workers $NUM_WORKERS

View file

@ -1,5 +1,5 @@
$env:PHONEMIZER_ESPEAK_LIBRARY="C:\Program Files\eSpeak NG\libespeak-ng.dll"
$env:PYTHONUTF8=1
$Env:PHONEMIZER_ESPEAK_LIBRARY="C:\Program Files\eSpeak NG\libespeak-ng.dll"
$Env:PYTHONUTF8=1
$Env:PROJECT_ROOT="$pwd"
$Env:USE_GPU="true"
$Env:USE_ONNX="false"
@ -7,7 +7,9 @@ $Env:PYTHONPATH="$Env:PROJECT_ROOT;$Env:PROJECT_ROOT/api"
$Env:MODEL_DIR="src/models"
$Env:VOICES_DIR="src/voices/v1_0"
$Env:WEB_PLAYER_PATH="$Env:PROJECT_ROOT/web"
$Env:UVICORN_LOG_LEVEL="warning"
$Env:LOGURU_LEVEL="WARNING"
uv pip install -e ".[gpu]"
uv run --no-sync python docker/scripts/download_model.py --output api/src/models/v1_0
uv run --no-sync uvicorn api.src.main:app --host 0.0.0.0 --port 8880
uv run --no-sync uvicorn api.src.main:app --host 0.0.0.0 --port 8880 --log-level $env:UVICORN_LOG_LEVEL --workers 2

View file

@ -10,9 +10,11 @@ export PYTHONPATH=$PROJECT_ROOT:$PROJECT_ROOT/api
export MODEL_DIR=src/models
export VOICES_DIR=src/voices/v1_0
export WEB_PLAYER_PATH=$PROJECT_ROOT/web
export UVICORN_LOG_LEVEL=debug
export LOGURU_LEVEL=DEBUG
# Run FastAPI with GPU extras using uv run
# Note: espeak may still require manual installation,
uv pip install -e ".[gpu]"
uv run --no-sync python docker/scripts/download_model.py --output api/src/models/v1_0
uv run --no-sync uvicorn api.src.main:app --host 0.0.0.0 --port 8880
uv run --no-sync uvicorn api.src.main:app --host 0.0.0.0 --port 8880 --log-level $UVICORN_LOG_LEVEL --workers 2

View file

@ -10,6 +10,8 @@ export PYTHONPATH=$PROJECT_ROOT:$PROJECT_ROOT/api
export MODEL_DIR=src/models
export VOICES_DIR=src/voices/v1_0
export WEB_PLAYER_PATH=$PROJECT_ROOT/web
export UVICORN_LOG_LEVEL=debug
export LOGURU_LEVEL=DEBUG
export DEVICE_TYPE=mps
# Enable MPS fallback for unsupported operations
@ -18,4 +20,4 @@ export PYTORCH_ENABLE_MPS_FALLBACK=1
# Run FastAPI with GPU extras using uv run
uv pip install -e .
uv run --no-sync python docker/scripts/download_model.py --output api/src/models/v1_0
uv run --no-sync uvicorn api.src.main:app --host 0.0.0.0 --port 8880
uv run --no-sync uvicorn api.src.main:app --host 0.0.0.0 --port 8880 --log-level $UVICORN_LOG_LEVEL --workers 2