diff --git a/README.md b/README.md index eb90b4d..6859163 100644 --- a/README.md +++ b/README.md @@ -91,11 +91,19 @@ docker run --gpus all -p 8880:8880 ghcr.io/remsky/kokoro-fastapi-gpu:v0.2.2 #NV Run the [model download script](https://github.com/remsky/Kokoro-FastAPI/blob/master/docker/scripts/download_model.py) if you haven't already Start directly via UV (with hot-reload) + + Linux and macOS ```bash ./start-cpu.sh OR ./start-gpu.sh ``` + Windows + ```powershell + .\start-cpu.ps1 OR + .\start-gpu.ps1 + ``` +
diff --git a/start-cpu.ps1 b/start-cpu.ps1 new file mode 100644 index 0000000..5a5df26 --- /dev/null +++ b/start-cpu.ps1 @@ -0,0 +1,13 @@ +$env:PHONEMIZER_ESPEAK_LIBRARY="C:\Program Files\eSpeak NG\libespeak-ng.dll" +$env:PYTHONUTF8=1 +$Env:PROJECT_ROOT="$pwd" +$Env:USE_GPU="false" +$Env:USE_ONNX="false" +$Env:PYTHONPATH="$Env:PROJECT_ROOT;$Env:PROJECT_ROOT/api" +$Env:MODEL_DIR="src/models" +$Env:VOICES_DIR="src/voices/v1_0" +$Env:WEB_PLAYER_PATH="$Env:PROJECT_ROOT/web" + +uv pip install -e ".[cpu]" +uv run --no-sync python docker/scripts/download_model.py --output api/src/models/v1_0 +uv run --no-sync uvicorn api.src.main:app --host 0.0.0.0 --port 8880 \ No newline at end of file diff --git a/start-cpu.sh b/start-cpu.sh index 651f645..4438ff9 100755 --- a/start-cpu.sh +++ b/start-cpu.sh @@ -14,4 +14,5 @@ export WEB_PLAYER_PATH=$PROJECT_ROOT/web # Run FastAPI with CPU extras using uv run # Note: espeak may still require manual installation, uv pip install -e ".[cpu]" +uv run --no-sync python docker/scripts/download_model.py --output api/src/models/v1_0 uv run --no-sync uvicorn api.src.main:app --host 0.0.0.0 --port 8880 diff --git a/start-gpu.ps1 b/start-gpu.ps1 new file mode 100644 index 0000000..7b161a5 --- /dev/null +++ b/start-gpu.ps1 @@ -0,0 +1,13 @@ +$env:PHONEMIZER_ESPEAK_LIBRARY="C:\Program Files\eSpeak NG\libespeak-ng.dll" +$env:PYTHONUTF8=1 +$Env:PROJECT_ROOT="$pwd" +$Env:USE_GPU="true" +$Env:USE_ONNX="false" +$Env:PYTHONPATH="$Env:PROJECT_ROOT;$Env:PROJECT_ROOT/api" +$Env:MODEL_DIR="src/models" +$Env:VOICES_DIR="src/voices/v1_0" +$Env:WEB_PLAYER_PATH="$Env:PROJECT_ROOT/web" + +uv pip install -e ".[gpu]" +uv run --no-sync python docker/scripts/download_model.py --output api/src/models/v1_0 +uv run --no-sync uvicorn api.src.main:app --host 0.0.0.0 --port 8880 \ No newline at end of file diff --git a/start-gpu.sh b/start-gpu.sh index b079978..a3a2e68 100755 --- a/start-gpu.sh +++ b/start-gpu.sh @@ -12,5 +12,7 @@ export VOICES_DIR=src/voices/v1_0 export WEB_PLAYER_PATH=$PROJECT_ROOT/web # Run FastAPI with GPU extras using uv run +# Note: espeak may still require manual installation, uv pip install -e ".[gpu]" +uv run --no-sync python docker/scripts/download_model.py --output api/src/models/v1_0 uv run --no-sync uvicorn api.src.main:app --host 0.0.0.0 --port 8880