Added ability to set log level with env vars.

This commit is contained in:
faltiska 2025-07-13 13:47:55 +03:00
parent 45113b6f41
commit 2972c98a59
7 changed files with 23 additions and 18 deletions

View file

@ -2,10 +2,8 @@
FastAPI OpenAI Compatible API FastAPI OpenAI Compatible API
""" """
import os
import sys import sys
from contextlib import asynccontextmanager from contextlib import asynccontextmanager
from pathlib import Path
import torch import torch
import uvicorn import uvicorn
@ -31,7 +29,6 @@ def setup_logger():
"<fg #4169E1>{module}:{line}</fg #4169E1> | " "<fg #4169E1>{module}:{line}</fg #4169E1> | "
"{message}", "{message}",
"colorize": True, "colorize": True,
"level": "DEBUG",
}, },
], ],
} }

View file

@ -40,9 +40,12 @@ ENV PATH="/home/appuser/.cargo/bin:/app/.venv/bin:$PATH" \
PHONEMIZER_ESPEAK_PATH=/usr/bin \ PHONEMIZER_ESPEAK_PATH=/usr/bin \
PHONEMIZER_ESPEAK_DATA=/usr/share/espeak-ng-data \ PHONEMIZER_ESPEAK_DATA=/usr/share/espeak-ng-data \
ESPEAK_DATA_PATH=/usr/share/espeak-ng-data \ ESPEAK_DATA_PATH=/usr/share/espeak-ng-data \
DEVICE="cpu" DEVICE="cpu" \
DOWNLOAD_MODEL=true \
UVICORN_LOG_LEVEL=warning \
LOGURU_LEVEL=WARNING \
NUM_WORKERS=2
ENV DOWNLOAD_MODEL=true
# Download model if enabled # Download model if enabled
RUN if [ "$DOWNLOAD_MODEL" = "true" ]; then \ RUN if [ "$DOWNLOAD_MODEL" = "true" ]; then \
python download_model.py --output api/src/models/v1_0; \ python download_model.py --output api/src/models/v1_0; \

View file

@ -39,9 +39,12 @@ ENV PATH="/app/.venv/bin:$PATH" \
PHONEMIZER_ESPEAK_PATH=/usr/bin \ PHONEMIZER_ESPEAK_PATH=/usr/bin \
PHONEMIZER_ESPEAK_DATA=/usr/share/espeak-ng-data \ PHONEMIZER_ESPEAK_DATA=/usr/share/espeak-ng-data \
ESPEAK_DATA_PATH=/usr/share/espeak-ng-data \ ESPEAK_DATA_PATH=/usr/share/espeak-ng-data \
DEVICE="gpu" DEVICE="gpu" \
DOWNLOAD_MODEL=true \
UVICORN_LOG_LEVEL=warning \
LOGURU_LEVEL=WARNING \
NUM_WORKERS=2
ENV DOWNLOAD_MODEL=true
# Download model if enabled # Download model if enabled
RUN if [ "$DOWNLOAD_MODEL" = "true" ]; then \ RUN if [ "$DOWNLOAD_MODEL" = "true" ]; then \
python download_model.py --output api/src/models/v1_0; \ python download_model.py --output api/src/models/v1_0; \

View file

@ -1,8 +1,4 @@
#!/bin/bash #!/bin/bash
set -e set -e
if [ "$DOWNLOAD_MODEL" = "true" ]; then exec uv run --extra $DEVICE --no-sync python -m uvicorn api.src.main:app --host 0.0.0.0 --port 8880 --log-level $UVICORN_LOG_LEVEL --workers $NUM_WORKERS
python download_model.py --output api/src/models/v1_0
fi
exec uv run --extra $DEVICE --no-sync python -m uvicorn api.src.main:app --host 0.0.0.0 --port 8880 --log-level debug

View file

@ -1,5 +1,5 @@
$env:PHONEMIZER_ESPEAK_LIBRARY="C:\Program Files\eSpeak NG\libespeak-ng.dll" $Env:PHONEMIZER_ESPEAK_LIBRARY="C:\Program Files\eSpeak NG\libespeak-ng.dll"
$env:PYTHONUTF8=1 $Env:PYTHONUTF8=1
$Env:PROJECT_ROOT="$pwd" $Env:PROJECT_ROOT="$pwd"
$Env:USE_GPU="true" $Env:USE_GPU="true"
$Env:USE_ONNX="false" $Env:USE_ONNX="false"
@ -7,7 +7,9 @@ $Env:PYTHONPATH="$Env:PROJECT_ROOT;$Env:PROJECT_ROOT/api"
$Env:MODEL_DIR="src/models" $Env:MODEL_DIR="src/models"
$Env:VOICES_DIR="src/voices/v1_0" $Env:VOICES_DIR="src/voices/v1_0"
$Env:WEB_PLAYER_PATH="$Env:PROJECT_ROOT/web" $Env:WEB_PLAYER_PATH="$Env:PROJECT_ROOT/web"
$Env:UVICORN_LOG_LEVEL="warning"
$Env:LOGURU_LEVEL="WARNING"
uv pip install -e ".[gpu]" uv pip install -e ".[gpu]"
uv run --no-sync python docker/scripts/download_model.py --output api/src/models/v1_0 uv run --no-sync python docker/scripts/download_model.py --output api/src/models/v1_0
uv run --no-sync uvicorn api.src.main:app --host 0.0.0.0 --port 8880 uv run --no-sync uvicorn api.src.main:app --host 0.0.0.0 --port 8880 --log-level $env:UVICORN_LOG_LEVEL --workers 2

View file

@ -10,9 +10,11 @@ export PYTHONPATH=$PROJECT_ROOT:$PROJECT_ROOT/api
export MODEL_DIR=src/models export MODEL_DIR=src/models
export VOICES_DIR=src/voices/v1_0 export VOICES_DIR=src/voices/v1_0
export WEB_PLAYER_PATH=$PROJECT_ROOT/web export WEB_PLAYER_PATH=$PROJECT_ROOT/web
export UVICORN_LOG_LEVEL=debug
export LOGURU_LEVEL=DEBUG
# Run FastAPI with GPU extras using uv run # Run FastAPI with GPU extras using uv run
# Note: espeak may still require manual installation, # Note: espeak may still require manual installation,
uv pip install -e ".[gpu]" uv pip install -e ".[gpu]"
uv run --no-sync python docker/scripts/download_model.py --output api/src/models/v1_0 uv run --no-sync python docker/scripts/download_model.py --output api/src/models/v1_0
uv run --no-sync uvicorn api.src.main:app --host 0.0.0.0 --port 8880 uv run --no-sync uvicorn api.src.main:app --host 0.0.0.0 --port 8880 --log-level $UVICORN_LOG_LEVEL --workers 2

View file

@ -10,6 +10,8 @@ export PYTHONPATH=$PROJECT_ROOT:$PROJECT_ROOT/api
export MODEL_DIR=src/models export MODEL_DIR=src/models
export VOICES_DIR=src/voices/v1_0 export VOICES_DIR=src/voices/v1_0
export WEB_PLAYER_PATH=$PROJECT_ROOT/web export WEB_PLAYER_PATH=$PROJECT_ROOT/web
export UVICORN_LOG_LEVEL=debug
export LOGURU_LEVEL=DEBUG
export DEVICE_TYPE=mps export DEVICE_TYPE=mps
# Enable MPS fallback for unsupported operations # Enable MPS fallback for unsupported operations
@ -18,4 +20,4 @@ export PYTORCH_ENABLE_MPS_FALLBACK=1
# Run FastAPI with GPU extras using uv run # Run FastAPI with GPU extras using uv run
uv pip install -e . uv pip install -e .
uv run --no-sync python docker/scripts/download_model.py --output api/src/models/v1_0 uv run --no-sync python docker/scripts/download_model.py --output api/src/models/v1_0
uv run --no-sync uvicorn api.src.main:app --host 0.0.0.0 --port 8880 uv run --no-sync uvicorn api.src.main:app --host 0.0.0.0 --port 8880 --log-level $UVICORN_LOG_LEVEL --workers 2