mirror of
https://github.com/remsky/Kokoro-FastAPI.git
synced 2025-08-05 16:48:53 +00:00
fixed some problems in the build system and model download system
This commit is contained in:
parent
bfdb5c0cf5
commit
90f168f63d
8 changed files with 28 additions and 23 deletions
|
@ -41,7 +41,7 @@ async def _find_file(
|
||||||
if filter_fn is None or filter_fn(full_path):
|
if filter_fn is None or filter_fn(full_path):
|
||||||
return full_path
|
return full_path
|
||||||
|
|
||||||
raise RuntimeError(f"File not found: {filename} in paths: {search_paths}")
|
raise FileNotFoundError(f"File not found: {filename} in paths: {search_paths}")
|
||||||
|
|
||||||
|
|
||||||
async def _scan_directories(
|
async def _scan_directories(
|
||||||
|
|
|
@ -60,7 +60,8 @@ class KokoroV1(BaseModelBackend):
|
||||||
model=self._model, # Pass our model directly
|
model=self._model, # Pass our model directly
|
||||||
device=self._device # Match our device setting
|
device=self._device # Match our device setting
|
||||||
)
|
)
|
||||||
|
except FileNotFoundError as e:
|
||||||
|
raise e
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise RuntimeError(f"Failed to load Kokoro model: {e}")
|
raise RuntimeError(f"Failed to load Kokoro model: {e}")
|
||||||
|
|
||||||
|
|
|
@ -81,7 +81,17 @@ class ModelManager:
|
||||||
logger.info(f"Warmup completed in {ms}ms")
|
logger.info(f"Warmup completed in {ms}ms")
|
||||||
|
|
||||||
return self._device, "kokoro_v1", len(voices)
|
return self._device, "kokoro_v1", len(voices)
|
||||||
|
except FileNotFoundError as e:
|
||||||
|
logger.error("""
|
||||||
|
Model files not found! You need to download the Kokoro V1 model:
|
||||||
|
|
||||||
|
1. Download model using the script:
|
||||||
|
python docker/scripts/download_model.py --output api/src/models/v1_0
|
||||||
|
|
||||||
|
2. Or set environment variable in docker-compose:
|
||||||
|
DOWNLOAD_MODEL=true
|
||||||
|
""")
|
||||||
|
exit(0)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise RuntimeError(f"Warmup failed: {e}")
|
raise RuntimeError(f"Warmup failed: {e}")
|
||||||
|
|
||||||
|
@ -112,6 +122,8 @@ class ModelManager:
|
||||||
|
|
||||||
try:
|
try:
|
||||||
await self._backend.load_model(path)
|
await self._backend.load_model(path)
|
||||||
|
except FileNotFoundError as e:
|
||||||
|
raise e
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise RuntimeError(f"Failed to load model: {e}")
|
raise RuntimeError(f"Failed to load model: {e}")
|
||||||
|
|
||||||
|
|
|
@ -62,18 +62,7 @@ async def lifespan(app: FastAPI):
|
||||||
# Initialize model with warmup and get status
|
# Initialize model with warmup and get status
|
||||||
device, model, voicepack_count = await model_manager\
|
device, model, voicepack_count = await model_manager\
|
||||||
.initialize_with_warmup(voice_manager)
|
.initialize_with_warmup(voice_manager)
|
||||||
|
|
||||||
except FileNotFoundError:
|
|
||||||
logger.error("""
|
|
||||||
Model files not found! You need to download the Kokoro V1 model:
|
|
||||||
|
|
||||||
1. Download model using the script:
|
|
||||||
python docker/scripts/download_model.py --version v1_0 --output api/src/models/v1_0
|
|
||||||
|
|
||||||
2. Or set environment variable in docker-compose:
|
|
||||||
DOWNLOAD_MODEL=true
|
|
||||||
""")
|
|
||||||
raise
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Failed to initialize model: {e}")
|
logger.error(f"Failed to initialize model: {e}")
|
||||||
raise
|
raise
|
||||||
|
|
|
@ -37,7 +37,8 @@ RUN --mount=type=cache,target=/root/.cache/uv \
|
||||||
# Copy project files including models
|
# Copy project files including models
|
||||||
COPY --chown=appuser:appuser api ./api
|
COPY --chown=appuser:appuser api ./api
|
||||||
COPY --chown=appuser:appuser web ./web
|
COPY --chown=appuser:appuser web ./web
|
||||||
COPY --chown=appuser:appuser docker/scripts/download_model.* ./
|
COPY --chown=appuser:appuser docker/scripts/ ./
|
||||||
|
RUN chmod +x ./entrypoint.sh
|
||||||
|
|
||||||
# Set environment variables
|
# Set environment variables
|
||||||
ENV PYTHONUNBUFFERED=1 \
|
ENV PYTHONUNBUFFERED=1 \
|
||||||
|
@ -54,5 +55,6 @@ RUN if [ "$DOWNLOAD_MODEL" = "true" ]; then \
|
||||||
python download_model.py --output api/src/models/v1_0; \
|
python download_model.py --output api/src/models/v1_0; \
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Run FastAPI server
|
ENV DEVICE="cpu"
|
||||||
CMD ["uv", "run", "python", "-m", "uvicorn", "api.src.main:app", "--host", "0.0.0.0", "--port", "8880", "--log-level", "debug"]
|
# Run FastAPI server through entrypoint.sh
|
||||||
|
CMD ["./entrypoint.sh"]
|
|
@ -38,7 +38,8 @@ RUN --mount=type=cache,target=/root/.cache/uv \
|
||||||
# Copy project files including models and sync again
|
# Copy project files including models and sync again
|
||||||
COPY --chown=appuser:appuser api ./api
|
COPY --chown=appuser:appuser api ./api
|
||||||
COPY --chown=appuser:appuser web ./web
|
COPY --chown=appuser:appuser web ./web
|
||||||
COPY --chown=appuser:appuser docker/scripts/download_model.* ./
|
COPY --chown=appuser:appuser docker/scripts/ ./
|
||||||
|
RUN chmod +x ./entrypoint.sh
|
||||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||||
uv sync --extra gpu
|
uv sync --extra gpu
|
||||||
|
|
||||||
|
@ -57,5 +58,6 @@ RUN if [ "$DOWNLOAD_MODEL" = "true" ]; then \
|
||||||
python download_model.py --output api/src/models/v1_0; \
|
python download_model.py --output api/src/models/v1_0; \
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Run FastAPI server
|
ENV DEVICE="gpu"
|
||||||
CMD ["uv", "run", "python", "-m", "uvicorn", "api.src.main:app", "--host", "0.0.0.0", "--port", "8880", "--log-level", "debug"]
|
# Run FastAPI server through entrypoint.sh
|
||||||
|
CMD ["./entrypoint.sh"]
|
||||||
|
|
|
@ -3,7 +3,6 @@
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
import shutil
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from urllib.request import urlretrieve
|
from urllib.request import urlretrieve
|
||||||
|
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
if [ "$DOWNLOAD_MODEL" = "true" ]; then
|
if [ "$DOWNLOAD_MODEL" = "true" ]; then
|
||||||
python docker/scripts/download_model.py --output api/src/models/v1_0
|
python download_model.py --output api/src/models/v1_0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
exec uv run python -m uvicorn api.src.main:app --host 0.0.0.0 --port 8880 --log-level debug
|
exec uv run --extra $DEVICE python -m uvicorn api.src.main:app --host 0.0.0.0 --port 8880 --log-level debug
|
Loading…
Add table
Reference in a new issue