mirror of
https://github.com/remsky/Kokoro-FastAPI.git
synced 2025-08-31 21:59:28 +00:00

CPU image 5.6Gb -> 4.9Gb GPU image 14.8Gb -> 9.9Gb Saved 4GBby using the cuda base image instead of cudnn-runtime. Saved 700MB by deleting the UV cache. Improved the speed of rebuilding the image by merging the RUN commands into one.
52 lines
1.7 KiB
Docker
52 lines
1.7 KiB
Docker
FROM python:3.10-slim
|
|
|
|
# Install dependencies and check espeak location
|
|
# Rust is required to build sudachipy and pyopenjtalk-plus
|
|
RUN apt-get update -y && \
|
|
apt-get install -y espeak-ng espeak-ng-data git libsndfile1 curl ffmpeg g++ && \
|
|
apt-get clean && rm -rf /var/lib/apt/lists/* && \
|
|
mkdir -p /usr/share/espeak-ng-data && \
|
|
ln -s /usr/lib/*/espeak-ng-data/* /usr/share/espeak-ng-data/ && \
|
|
curl -LsSf https://astral.sh/uv/install.sh | sh && \
|
|
mv /root/.local/bin/uv /usr/local/bin/ && \
|
|
mv /root/.local/bin/uvx /usr/local/bin/ && \
|
|
curl https://sh.rustup.rs -sSf | sh -s -- -y && \
|
|
useradd -m -u 1000 appuser && \
|
|
mkdir -p /app/api/src/models/v1_0 && \
|
|
chown -R appuser:appuser /app
|
|
|
|
USER appuser
|
|
WORKDIR /app
|
|
|
|
# Copy dependency files
|
|
COPY --chown=appuser:appuser pyproject.toml ./pyproject.toml
|
|
|
|
# Install dependencies with CPU extras
|
|
RUN uv venv --python 3.10 && \
|
|
uv sync --extra cpu --no-cache
|
|
|
|
# Copy project files including models
|
|
COPY --chown=appuser:appuser api ./api
|
|
COPY --chown=appuser:appuser web ./web
|
|
COPY --chown=appuser:appuser docker/scripts/ ./
|
|
RUN chmod +x ./entrypoint.sh
|
|
|
|
# Set environment variables
|
|
ENV PATH="/home/appuser/.cargo/bin:/app/.venv/bin:$PATH" \
|
|
PYTHONUNBUFFERED=1 \
|
|
PYTHONPATH=/app:/app/api \
|
|
UV_LINK_MODE=copy \
|
|
USE_GPU=false \
|
|
PHONEMIZER_ESPEAK_PATH=/usr/bin \
|
|
PHONEMIZER_ESPEAK_DATA=/usr/share/espeak-ng-data \
|
|
ESPEAK_DATA_PATH=/usr/share/espeak-ng-data \
|
|
DEVICE="cpu"
|
|
|
|
ENV DOWNLOAD_MODEL=true
|
|
# Download model if enabled
|
|
RUN if [ "$DOWNLOAD_MODEL" = "true" ]; then \
|
|
python download_model.py --output api/src/models/v1_0; \
|
|
fi
|
|
|
|
# Run FastAPI server through entrypoint.sh
|
|
CMD ["./entrypoint.sh"]
|