-Adjust testing framework for new model

-Add web player support: include static file serving and HTML interface for TTS
This commit is contained in:
remsky 2025-01-22 21:11:47 -07:00
parent 66f46e82f9
commit df4cc5b4b2
30 changed files with 2611 additions and 1369 deletions

View file

@ -0,0 +1,206 @@
"""Tests for API endpoints"""
import pytest
import torch
from fastapi.testclient import TestClient
from ..src.main import app
# Create test client for non-async tests
client = TestClient(app)
def test_health_check():
"""Test the health check endpoint"""
response = client.get("/health")
assert response.status_code == 200
assert response.json() == {"status": "healthy"}
@pytest.mark.asyncio
async def test_openai_speech_endpoint(async_client, mock_tts_service):
"""Test the OpenAI-compatible speech endpoint"""
# Setup mocks
mock_tts_service._voice_manager.list_voices.return_value = ["bm_lewis"]
mock_tts_service.generate_audio.return_value = (torch.zeros(48000).numpy(), 1.0)
mock_tts_service._voice_manager.load_voice.return_value = torch.zeros(192)
# Mock voice validation
mock_tts_service._voice_manager.get_voice_path.return_value = "/mock/voices/bm_lewis.pt"
test_request = {
"model": "kokoro",
"input": "Hello world",
"voice": "bm_lewis",
"response_format": "wav",
"speed": 1.0,
"stream": False,
}
response = await async_client.post("/v1/audio/speech", json=test_request)
assert response.status_code == 200
assert response.headers["content-type"] == "audio/wav"
assert response.headers["content-disposition"] == "attachment; filename=speech.wav"
mock_tts_service.generate_audio.assert_called_once()
@pytest.mark.asyncio
async def test_openai_speech_invalid_voice(async_client, mock_tts_service):
"""Test the OpenAI-compatible speech endpoint with invalid voice"""
# Setup mocks
mock_tts_service._voice_manager.list_voices.return_value = ["af", "bm_lewis"]
mock_tts_service._voice_manager.get_voice_path.return_value = None
test_request = {
"model": "kokoro",
"input": "Hello world",
"voice": "invalid_voice",
"response_format": "wav",
"speed": 1.0,
"stream": False,
}
response = await async_client.post("/v1/audio/speech", json=test_request)
assert response.status_code == 400
assert "not found" in response.json()["detail"]["message"]
@pytest.mark.asyncio
async def test_openai_speech_generation_error(async_client, mock_tts_service):
"""Test error handling in speech generation"""
# Setup mocks
mock_tts_service._voice_manager.list_voices.return_value = ["af"]
mock_tts_service.generate_audio.side_effect = RuntimeError("Generation failed")
mock_tts_service._voice_manager.load_voice.return_value = torch.zeros(192)
mock_tts_service._voice_manager.get_voice_path.return_value = "/mock/voices/af.pt"
test_request = {
"model": "kokoro",
"input": "Hello world",
"voice": "af",
"response_format": "wav",
"speed": 1.0,
"stream": False,
}
response = await async_client.post("/v1/audio/speech", json=test_request)
assert response.status_code == 500
assert "Generation failed" in response.json()["detail"]["message"]
@pytest.mark.asyncio
async def test_combine_voices_list_success(async_client, mock_tts_service):
"""Test successful voice combination using list format"""
# Setup mocks
mock_tts_service._voice_manager.list_voices.return_value = ["af_bella", "af_sarah"]
mock_tts_service._voice_manager.combine_voices.return_value = "af_bella_af_sarah"
mock_tts_service._voice_manager.load_voice.return_value = torch.zeros(192)
mock_tts_service._voice_manager.get_voice_path.return_value = "/mock/voices/af_bella.pt"
test_voices = ["af_bella", "af_sarah"]
response = await async_client.post("/v1/audio/voices/combine", json=test_voices)
assert response.status_code == 200
assert response.json()["voice"] == "af_bella_af_sarah"
mock_tts_service._voice_manager.combine_voices.assert_called_once()
@pytest.mark.asyncio
async def test_combine_voices_empty_list(async_client, mock_tts_service):
"""Test combining empty voice list returns error"""
test_voices = []
response = await async_client.post("/v1/audio/voices/combine", json=test_voices)
assert response.status_code == 400
assert "No voices provided" in response.json()["detail"]["message"]
@pytest.mark.asyncio
async def test_speech_streaming_with_combined_voice(async_client, mock_tts_service):
"""Test streaming speech with combined voice using + syntax"""
# Setup mocks
mock_tts_service._voice_manager.list_voices.return_value = ["af_bella", "af_sarah"]
mock_tts_service._voice_manager.combine_voices.return_value = "af_bella_af_sarah"
mock_tts_service._voice_manager.load_voice.return_value = torch.zeros(192)
mock_tts_service._voice_manager.get_voice_path.return_value = "/mock/voices/af_bella.pt"
async def mock_stream():
yield b"chunk1"
yield b"chunk2"
mock_tts_service.generate_audio_stream.return_value = mock_stream()
test_request = {
"model": "kokoro",
"input": "Hello world",
"voice": "af_bella+af_sarah",
"response_format": "mp3",
"stream": True,
}
headers = {"x-raw-response": "stream"}
response = await async_client.post(
"/v1/audio/speech", json=test_request, headers=headers
)
assert response.status_code == 200
assert response.headers["content-type"] == "audio/mpeg"
assert response.headers["content-disposition"] == "attachment; filename=speech.mp3"
@pytest.mark.asyncio
async def test_openai_speech_pcm_streaming(async_client, mock_tts_service):
"""Test streaming PCM audio for real-time playback"""
# Setup mocks
mock_tts_service._voice_manager.list_voices.return_value = ["af"]
mock_tts_service._voice_manager.load_voice.return_value = torch.zeros(192)
mock_tts_service._voice_manager.get_voice_path.return_value = "/mock/voices/af.pt"
async def mock_stream():
yield b"chunk1"
yield b"chunk2"
mock_tts_service.generate_audio_stream.return_value = mock_stream()
test_request = {
"model": "kokoro",
"input": "Hello world",
"voice": "af",
"response_format": "pcm",
"stream": True,
}
headers = {"x-raw-response": "stream"}
response = await async_client.post(
"/v1/audio/speech", json=test_request, headers=headers
)
assert response.status_code == 200
assert response.headers["content-type"] == "audio/pcm"
@pytest.mark.asyncio
async def test_openai_speech_streaming_mp3(async_client, mock_tts_service):
"""Test streaming MP3 audio to file"""
# Setup mocks
mock_tts_service._voice_manager.list_voices.return_value = ["af"]
mock_tts_service._voice_manager.load_voice.return_value = torch.zeros(192)
mock_tts_service._voice_manager.get_voice_path.return_value = "/mock/voices/af.pt"
async def mock_stream():
yield b"chunk1"
yield b"chunk2"
mock_tts_service.generate_audio_stream.return_value = mock_stream()
test_request = {
"model": "kokoro",
"input": "Hello world",
"voice": "af",
"response_format": "mp3",
"stream": True,
}
headers = {"x-raw-response": "stream"}
response = await async_client.post(
"/v1/audio/speech", json=test_request, headers=headers
)
assert response.status_code == 200
assert response.headers["content-type"] == "audio/mpeg"
assert response.headers["content-disposition"] == "attachment; filename=speech.mp3"

104
api/depr_tests/test_main.py Normal file
View file

@ -0,0 +1,104 @@
"""Tests for FastAPI application"""
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
import torch
from fastapi.testclient import TestClient
from api.src.main import app, lifespan
@pytest.fixture
def test_client():
"""Create a test client"""
return TestClient(app)
def test_health_check(test_client):
"""Test health check endpoint"""
response = test_client.get("/health")
assert response.status_code == 200
assert response.json() == {"status": "healthy"}
@pytest.mark.asyncio
async def test_lifespan_successful_warmup():
"""Test successful model warmup in lifespan"""
with patch("api.src.inference.model_manager.get_manager") as mock_get_model, \
patch("api.src.inference.voice_manager.get_manager") as mock_get_voice, \
patch("api.src.main.logger") as mock_logger, \
patch("os.path.exists") as mock_exists, \
patch("torch.cuda.is_available") as mock_cuda:
# Setup mocks
mock_model = AsyncMock()
mock_voice = AsyncMock()
mock_get_model.return_value = mock_model
mock_get_voice.return_value = mock_voice
mock_exists.return_value = True
mock_cuda.return_value = False
# Setup model manager
mock_backend = MagicMock()
mock_backend.device = "cpu"
mock_model.get_backend.return_value = mock_backend
mock_model.load_model = AsyncMock()
# Setup voice manager
mock_voice_tensor = torch.zeros(192)
mock_voice.load_voice = AsyncMock(return_value=mock_voice_tensor)
mock_voice.list_voices = AsyncMock(return_value=["af", "af_bella", "af_sarah"])
# Create an async generator from the lifespan context manager
async_gen = lifespan(MagicMock())
# Start the context manager
await async_gen.__aenter__()
# Verify managers were initialized
mock_get_model.assert_called_once()
mock_get_voice.assert_called_once()
mock_model.load_model.assert_called_once()
# Clean up
await async_gen.__aexit__(None, None, None)
@pytest.mark.asyncio
async def test_lifespan_failed_warmup():
"""Test failed model warmup in lifespan"""
with patch("api.src.inference.model_manager.get_manager") as mock_get_model:
# Mock the model manager to fail
mock_get_model.side_effect = RuntimeError("Failed to initialize model")
# Create an async generator from the lifespan context manager
async_gen = lifespan(MagicMock())
# Verify the exception is raised
with pytest.raises(RuntimeError, match="Failed to initialize model"):
await async_gen.__aenter__()
# Clean up
await async_gen.__aexit__(None, None, None)
@pytest.mark.asyncio
async def test_lifespan_voice_manager_failure():
"""Test failure when voice manager fails to initialize"""
with patch("api.src.inference.model_manager.get_manager") as mock_get_model, \
patch("api.src.inference.voice_manager.get_manager") as mock_get_voice:
# Setup model manager success but voice manager failure
mock_model = AsyncMock()
mock_get_model.return_value = mock_model
mock_get_voice.side_effect = RuntimeError("Failed to initialize voice manager")
# Create an async generator from the lifespan context manager
async_gen = lifespan(MagicMock())
# Verify the exception is raised
with pytest.raises(RuntimeError, match="Failed to initialize voice manager"):
await async_gen.__aenter__()
# Clean up
await async_gen.__aexit__(None, None, None)

View file

@ -0,0 +1,190 @@
"""Tests for model and voice managers"""
import os
import numpy as np
import pytest
import torch
from unittest.mock import AsyncMock, MagicMock, Mock, patch
from api.src.inference.model_manager import get_manager as get_model_manager
from api.src.inference.voice_manager import get_manager as get_voice_manager
# Get project root path
PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
MOCK_VOICES_DIR = os.path.join(PROJECT_ROOT, "api", "src", "voices")
MOCK_MODEL_DIR = os.path.join(PROJECT_ROOT, "api", "src", "models")
@pytest.mark.asyncio
async def test_model_manager_initialization():
"""Test model manager initialization"""
with patch("api.src.inference.model_manager.settings") as mock_settings, \
patch("api.src.core.paths.get_model_path") as mock_get_path:
mock_settings.model_dir = MOCK_MODEL_DIR
mock_settings.onnx_model_path = "model.onnx"
mock_get_path.return_value = os.path.join(MOCK_MODEL_DIR, "model.onnx")
manager = await get_model_manager()
assert manager is not None
backend = manager.get_backend()
assert backend is not None
@pytest.mark.asyncio
async def test_model_manager_generate():
"""Test model generation"""
with patch("api.src.inference.model_manager.settings") as mock_settings, \
patch("api.src.core.paths.get_model_path") as mock_get_path, \
patch("torch.load") as mock_torch_load:
mock_settings.model_dir = MOCK_MODEL_DIR
mock_settings.onnx_model_path = "model.onnx"
mock_settings.use_onnx = True
mock_settings.use_gpu = False
mock_get_path.return_value = os.path.join(MOCK_MODEL_DIR, "model.onnx")
# Mock torch load to return a tensor
mock_torch_load.return_value = torch.zeros(192)
manager = await get_model_manager()
# Set up mock backend
mock_backend = AsyncMock()
mock_backend.is_loaded = True
mock_backend.device = "cpu"
# Create audio tensor and ensure it's properly mocked
audio_data = torch.zeros(48000, dtype=torch.float32)
async def mock_generate(*args, **kwargs):
return audio_data
mock_backend.generate.side_effect = mock_generate
# Set up manager with mock backend
manager._backends['onnx_cpu'] = mock_backend
manager._current_backend = 'onnx_cpu'
# Generate audio
tokens = [1, 2, 3]
voice_tensor = torch.zeros(192)
audio = await manager.generate(tokens, voice_tensor, speed=1.0)
assert isinstance(audio, torch.Tensor), "Generated audio should be torch tensor"
assert audio.dtype == torch.float32, "Audio should be 32-bit float"
assert audio.shape == (48000,), "Audio should have 48000 samples"
assert mock_backend.generate.call_count == 1
@pytest.mark.asyncio
async def test_voice_manager_initialization():
"""Test voice manager initialization"""
with patch("api.src.inference.voice_manager.settings") as mock_settings, \
patch("os.path.exists") as mock_exists:
mock_settings.voices_dir = MOCK_VOICES_DIR
mock_exists.return_value = True
manager = await get_voice_manager()
assert manager is not None
@pytest.mark.asyncio
async def test_voice_manager_list_voices():
"""Test listing available voices"""
with patch("api.src.inference.voice_manager.settings") as mock_settings, \
patch("os.listdir") as mock_listdir, \
patch("os.makedirs") as mock_makedirs, \
patch("os.path.exists") as mock_exists:
mock_settings.voices_dir = MOCK_VOICES_DIR
mock_listdir.return_value = ["af_bella.pt", "af_sarah.pt", "bm_lewis.pt"]
mock_exists.return_value = True
manager = await get_voice_manager()
voices = await manager.list_voices()
assert isinstance(voices, list)
assert len(voices) == 3, f"Expected 3 voices but got {len(voices)}"
assert sorted(voices) == ["af_bella", "af_sarah", "bm_lewis"]
mock_listdir.assert_called_once()
@pytest.mark.asyncio
async def test_voice_manager_load_voice():
"""Test loading a voice"""
with patch("api.src.inference.voice_manager.settings") as mock_settings, \
patch("torch.load") as mock_torch_load, \
patch("os.path.exists") as mock_exists:
mock_settings.voices_dir = MOCK_VOICES_DIR
mock_exists.return_value = True
# Create a mock tensor
mock_tensor = torch.zeros(192)
mock_torch_load.return_value = mock_tensor
manager = await get_voice_manager()
voice_tensor = await manager.load_voice("af_bella", device="cpu")
assert isinstance(voice_tensor, torch.Tensor)
assert voice_tensor.shape == (192,)
mock_torch_load.assert_called_once()
@pytest.mark.asyncio
async def test_voice_manager_combine_voices():
"""Test combining voices"""
with patch("api.src.inference.voice_manager.settings") as mock_settings, \
patch("torch.load") as mock_load, \
patch("torch.save") as mock_save, \
patch("os.makedirs") as mock_makedirs, \
patch("os.path.exists") as mock_exists:
mock_settings.voices_dir = MOCK_VOICES_DIR
mock_exists.return_value = True
# Create mock tensors
mock_tensor1 = torch.ones(192)
mock_tensor2 = torch.ones(192) * 2
mock_load.side_effect = [mock_tensor1, mock_tensor2]
manager = await get_voice_manager()
combined_name = await manager.combine_voices(["af_bella", "af_sarah"])
assert combined_name == "af_bella_af_sarah"
assert mock_load.call_count == 2
mock_save.assert_called_once()
# Verify the combined tensor was saved
saved_tensor = mock_save.call_args[0][0]
assert isinstance(saved_tensor, torch.Tensor)
assert saved_tensor.shape == (192,)
# Should be average of the two tensors
assert torch.allclose(saved_tensor, torch.ones(192) * 1.5)
@pytest.mark.asyncio
async def test_voice_manager_invalid_voice():
"""Test loading invalid voice"""
with patch("api.src.inference.voice_manager.settings") as mock_settings, \
patch("os.path.exists") as mock_exists:
mock_settings.voices_dir = MOCK_VOICES_DIR
mock_exists.return_value = False
manager = await get_voice_manager()
with pytest.raises(RuntimeError, match="Voice not found"):
await manager.load_voice("invalid_voice", device="cpu")
@pytest.mark.asyncio
async def test_voice_manager_combine_invalid_voices():
"""Test combining with invalid voices"""
with patch("api.src.inference.voice_manager.settings") as mock_settings, \
patch("os.path.exists") as mock_exists:
mock_settings.voices_dir = MOCK_VOICES_DIR
mock_exists.return_value = False
manager = await get_voice_manager()
with pytest.raises(RuntimeError, match="Voice not found"):
await manager.combine_voices(["invalid_voice1", "invalid_voice2"])

View file

@ -0,0 +1,139 @@
"""Tests for text processing endpoints"""
import os
import pytest
import torch
from fastapi.testclient import TestClient
from ..src.main import app
# Get project root path
PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
MOCK_VOICES_DIR = os.path.join(PROJECT_ROOT, "api", "src", "voices")
client = TestClient(app)
@pytest.mark.asyncio
async def test_generate_from_phonemes(async_client, mock_tts_service):
"""Test generating audio from phonemes"""
# Setup mocks
mock_tts_service._voice_manager.list_voices.return_value = ["af_bella"]
mock_tts_service.generate_audio.return_value = (torch.zeros(48000).numpy(), 1.0)
mock_tts_service._voice_manager.load_voice.return_value = torch.zeros(192)
mock_tts_service._voice_manager.get_voice_path.return_value = "/mock/voices/af_bella.pt"
test_request = {
"model": "kokoro",
"input": "h @ l oU w r= l d",
"voice": "af_bella",
"response_format": "wav",
"speed": 1.0,
"stream": False,
}
response = await async_client.post("/v1/audio/speech", json=test_request)
assert response.status_code == 200
assert response.headers["content-type"] == "audio/wav"
assert response.headers["content-disposition"] == "attachment; filename=speech.wav"
mock_tts_service.generate_audio.assert_called_once()
@pytest.mark.asyncio
async def test_generate_from_phonemes_invalid_voice(async_client, mock_tts_service):
"""Test generating audio from phonemes with invalid voice"""
# Setup mocks
mock_tts_service._voice_manager.list_voices.return_value = ["af_bella"]
mock_tts_service._voice_manager.get_voice_path.return_value = None
test_request = {
"model": "kokoro",
"input": "h @ l oU w r= l d",
"voice": "invalid_voice",
"response_format": "wav",
"speed": 1.0,
"stream": False,
}
response = await async_client.post("/v1/audio/speech", json=test_request)
assert response.status_code == 400
assert "Voice not found" in response.json()["detail"]["message"]
@pytest.mark.asyncio
async def test_generate_from_phonemes_chunked(async_client, mock_tts_service):
"""Test generating chunked audio from phonemes"""
# Setup mocks
mock_tts_service._voice_manager.list_voices.return_value = ["af_bella"]
mock_tts_service._voice_manager.load_voice.return_value = torch.zeros(192)
mock_tts_service._voice_manager.get_voice_path.return_value = "/mock/voices/af_bella.pt"
async def mock_stream():
yield b"chunk1"
yield b"chunk2"
mock_tts_service.generate_audio_stream.return_value = mock_stream()
test_request = {
"model": "kokoro",
"input": "h @ l oU w r= l d",
"voice": "af_bella",
"response_format": "mp3",
"stream": True,
}
headers = {"x-raw-response": "stream"}
response = await async_client.post(
"/v1/audio/speech", json=test_request, headers=headers
)
assert response.status_code == 200
assert response.headers["content-type"] == "audio/mpeg"
assert response.headers["content-disposition"] == "attachment; filename=speech.mp3"
@pytest.mark.asyncio
async def test_invalid_phonemes(async_client, mock_tts_service):
"""Test handling invalid phonemes"""
# Setup mocks
mock_tts_service._voice_manager.list_voices.return_value = ["af_bella"]
mock_tts_service._voice_manager.load_voice.return_value = torch.zeros(192)
mock_tts_service._voice_manager.get_voice_path.return_value = "/mock/voices/af_bella.pt"
test_request = {
"model": "kokoro",
"input": "", # Empty input
"voice": "af_bella",
"response_format": "wav",
"speed": 1.0,
"stream": False,
}
response = await async_client.post("/v1/audio/speech", json=test_request)
assert response.status_code == 400
assert "Text is empty" in response.json()["detail"]["message"]
@pytest.mark.asyncio
async def test_phonemes_with_combined_voice(async_client, mock_tts_service):
"""Test generating audio from phonemes with combined voice"""
# Setup mocks
mock_tts_service._voice_manager.list_voices.return_value = ["af_bella", "af_sarah"]
mock_tts_service._voice_manager.combine_voices.return_value = "af_bella_af_sarah"
mock_tts_service._voice_manager.load_voice.return_value = torch.zeros(192)
mock_tts_service._voice_manager.get_voice_path.return_value = "/mock/voices/af_bella_af_sarah.pt"
mock_tts_service.generate_audio.return_value = (torch.zeros(48000).numpy(), 1.0)
test_request = {
"model": "kokoro",
"input": "h @ l oU w r= l d",
"voice": "af_bella+af_sarah",
"response_format": "wav",
"speed": 1.0,
"stream": False,
}
response = await async_client.post("/v1/audio/speech", json=test_request)
assert response.status_code == 200
assert response.headers["content-type"] == "audio/wav"
mock_tts_service._voice_manager.combine_voices.assert_called_once()
mock_tts_service.generate_audio.assert_called_once()

View file

@ -0,0 +1,118 @@
"""Tests for TTSService"""
import os
import numpy as np
import pytest
import torch
from unittest.mock import AsyncMock, MagicMock, Mock, patch
from api.src.services.tts_service import TTSService
# Get project root path
PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
MOCK_VOICES_DIR = os.path.join(PROJECT_ROOT, "api", "src", "voices")
MOCK_MODEL_DIR = os.path.join(PROJECT_ROOT, "api", "src", "models")
@pytest.mark.asyncio
async def test_service_initialization(mock_model_manager, mock_voice_manager):
"""Test TTSService initialization"""
# Create service using factory method
with patch("api.src.services.tts_service.get_model_manager", return_value=mock_model_manager), \
patch("api.src.services.tts_service.get_voice_manager", return_value=mock_voice_manager):
service = await TTSService.create()
assert service is not None
assert service.model_manager == mock_model_manager
assert service._voice_manager == mock_voice_manager
@pytest.mark.asyncio
async def test_generate_audio_basic(mock_tts_service):
"""Test basic audio generation"""
text = "Hello world"
voice = "af"
audio, duration = await mock_tts_service.generate_audio(text, voice)
assert isinstance(audio, np.ndarray)
assert duration > 0
@pytest.mark.asyncio
async def test_generate_audio_empty_text(mock_tts_service):
"""Test handling empty text input"""
with pytest.raises(ValueError, match="Text is empty after preprocessing"):
await mock_tts_service.generate_audio("", "af")
@pytest.mark.asyncio
async def test_generate_audio_stream(mock_tts_service):
"""Test streaming audio generation"""
text = "Hello world"
voice = "af"
# Setup mock stream
async def mock_stream():
yield b"chunk1"
yield b"chunk2"
mock_tts_service.generate_audio_stream.return_value = mock_stream()
# Test streaming
stream = mock_tts_service.generate_audio_stream(text, voice)
chunks = []
async for chunk in await stream:
chunks.append(chunk)
assert len(chunks) > 0
assert all(isinstance(chunk, bytes) for chunk in chunks)
@pytest.mark.asyncio
async def test_list_voices(mock_tts_service):
"""Test listing available voices"""
with patch("api.src.inference.voice_manager.settings") as mock_settings:
mock_settings.voices_dir = MOCK_VOICES_DIR
voices = await mock_tts_service.list_voices()
assert isinstance(voices, list)
assert len(voices) == 4 # ["af", "af_bella", "af_sarah", "bm_lewis"]
assert all(isinstance(voice, str) for voice in voices)
@pytest.mark.asyncio
async def test_combine_voices(mock_tts_service):
"""Test combining voices"""
with patch("api.src.inference.voice_manager.settings") as mock_settings:
mock_settings.voices_dir = MOCK_VOICES_DIR
voices = ["af_bella", "af_sarah"]
result = await mock_tts_service.combine_voices(voices)
assert isinstance(result, str)
assert result == "af_bella_af_sarah"
@pytest.mark.asyncio
async def test_audio_to_bytes(mock_tts_service):
"""Test converting audio to bytes"""
audio = np.zeros(48000, dtype=np.float32)
audio_bytes = mock_tts_service._audio_to_bytes(audio)
assert isinstance(audio_bytes, bytes)
assert len(audio_bytes) > 0
@pytest.mark.asyncio
async def test_voice_loading(mock_tts_service):
"""Test voice loading"""
with patch("api.src.inference.voice_manager.settings") as mock_settings, \
patch("os.path.exists", return_value=True), \
patch("torch.load", return_value=torch.zeros(192)):
mock_settings.voices_dir = MOCK_VOICES_DIR
voice = await mock_tts_service._voice_manager.load_voice("af", device="cpu")
assert isinstance(voice, torch.Tensor)
assert voice.shape == (192,)
@pytest.mark.asyncio
async def test_model_generation(mock_tts_service):
"""Test model generation"""
tokens = [1, 2, 3]
voice_tensor = torch.zeros(192)
audio = await mock_tts_service.model_manager.generate(tokens, voice_tensor)
assert isinstance(audio, torch.Tensor)
assert audio.shape == (48000,)
assert audio.dtype == torch.float32

View file

@ -15,6 +15,7 @@ class Settings(BaseSettings):
default_voice: str = "af"
use_gpu: bool = False # Whether to use GPU acceleration if available
use_onnx: bool = True # Whether to use ONNX runtime
allow_local_voice_saving: bool = True # Whether to allow saving combined voices locally
# Container absolute paths
model_dir: str = "/app/api/src/models" # Absolute path in container
@ -25,6 +26,12 @@ class Settings(BaseSettings):
max_chunk_size: int = 300 # Maximum size of text chunks for processing
gap_trim_ms: int = 250 # Amount to trim from streaming chunk ends in milliseconds
# Web Player Settings
enable_web_player: bool = True # Whether to serve the web player UI
web_player_path: str = "web" # Path to web player static files
cors_origins: list[str] = ["*"] # CORS origins for web player
cors_enabled: bool = True # Whether to enable CORS
class Config:
env_file = ".env"

View file

@ -265,4 +265,76 @@ async def read_file(path: str) -> str:
async with aiofiles.open(path, 'r', encoding='utf-8') as f:
return await f.read()
except Exception as e:
raise RuntimeError(f"Failed to read file {path}: {e}")
raise RuntimeError(f"Failed to read file {path}: {e}")
async def read_bytes(path: str) -> bytes:
"""Read file as bytes asynchronously.
Args:
path: Path to file
Returns:
File contents as bytes
Raises:
RuntimeError: If file cannot be read
"""
try:
async with aiofiles.open(path, 'rb') as f:
return await f.read()
except Exception as e:
raise RuntimeError(f"Failed to read file {path}: {e}")
async def get_web_file_path(filename: str) -> str:
"""Get path to web static file.
Args:
filename: Name of file in web directory
Returns:
Absolute path to file
Raises:
RuntimeError: If file not found
"""
# Get project root directory (four levels up from core to get to project root)
root_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
# Construct web directory path relative to project root
web_dir = os.path.join("/app", settings.web_player_path)
# Search in web directory
search_paths = [web_dir]
logger.debug(f"Searching for web file in path: {web_dir}")
return await _find_file(filename, search_paths)
async def get_content_type(path: str) -> str:
"""Get content type for file.
Args:
path: Path to file
Returns:
Content type string
"""
ext = os.path.splitext(path)[1].lower()
return {
'.html': 'text/html',
'.js': 'application/javascript',
'.css': 'text/css',
'.png': 'image/png',
'.jpg': 'image/jpeg',
'.jpeg': 'image/jpeg',
'.gif': 'image/gif',
'.svg': 'image/svg+xml',
'.ico': 'image/x-icon',
}.get(ext, 'application/octet-stream')
async def verify_model_path(model_path: str) -> bool:
"""Verify model file exists at path."""
return await aiofiles.os.path.exists(model_path)

View file

@ -1,7 +1,7 @@
"""Model management and caching."""
import asyncio
from typing import Dict, Optional
from typing import Dict, Optional, Tuple
import torch
from loguru import logger
@ -23,17 +23,11 @@ _manager_lock = asyncio.Lock()
class ModelManager:
"""Manages model loading and inference across backends."""
# Class-level state for shared resources
_loaded_models = {}
_backends = {}
def __init__(self, config: Optional[ModelConfig] = None):
"""Initialize model manager.
Args:
config: Optional configuration
Note:
This should not be called directly. Use get_manager() instead.
"""
@ -90,16 +84,70 @@ class ModelManager:
logger.error(f"Failed to initialize backend: {e}")
raise RuntimeError("Failed to initialize backend")
async def initialize_with_warmup(self, voice_manager) -> tuple[str, str, int]:
"""Initialize model with warmup and pre-cache voices.
Args:
voice_manager: Voice manager instance for loading voices
Returns:
Tuple of (device type, model type, number of loaded voices)
Raises:
RuntimeError: If initialization fails
"""
try:
# Determine backend type based on settings
if settings.use_gpu and torch.cuda.is_available():
backend_type = 'pytorch_gpu' if not settings.use_onnx else 'onnx_gpu'
else:
backend_type = 'pytorch_cpu' if not settings.use_onnx else 'onnx_cpu'
# Get backend
backend = self.get_backend(backend_type)
# Get and verify model path
model_file = model_config.pytorch_model_file if not settings.use_onnx else model_config.onnx_model_file
model_path = await paths.get_model_path(model_file)
if not await paths.verify_model_path(model_path):
raise RuntimeError(f"Model file not found: {model_path}")
# Pre-cache default voice and use for warmup
warmup_voice = await voice_manager.load_voice(
settings.default_voice, device=backend.device)
logger.info(f"Pre-cached voice {settings.default_voice} for warmup")
# Initialize model with warmup voice
await self.load_model(model_path, warmup_voice, backend_type)
# Pre-cache common voices in background
common_voices = ['af', 'af_bella', 'af_sky', 'af_nicole']
for voice_name in common_voices:
try:
await voice_manager.load_voice(voice_name, device=backend.device)
logger.debug(f"Pre-cached voice {voice_name}")
except Exception as e:
logger.warning(f"Failed to pre-cache voice {voice_name}: {e}")
# Get available voices count
voices = await voice_manager.list_voices()
voicepack_count = len(voices)
# Get device info for return
device = "GPU" if settings.use_gpu else "CPU"
model = "ONNX" if settings.use_onnx else "PyTorch"
return device, model, voicepack_count
except Exception as e:
logger.error(f"Failed to initialize model with warmup: {e}")
raise RuntimeError(f"Failed to initialize model with warmup: {e}")
def get_backend(self, backend_type: Optional[str] = None) -> BaseModelBackend:
"""Get specified backend.
Args:
backend_type: Backend type ('pytorch_cpu', 'pytorch_gpu', 'onnx_cpu', 'onnx_gpu'),
uses default if None
Returns:
Model backend instance
Raises:
ValueError: If backend type is invalid
RuntimeError: If no backends are available
@ -120,10 +168,8 @@ class ModelManager:
def _determine_backend(self, model_path: str) -> str:
"""Determine appropriate backend based on model file and settings.
Args:
model_path: Path to model file
Returns:
Backend type to use
"""
@ -142,12 +188,10 @@ class ModelManager:
backend_type: Optional[str] = None
) -> None:
"""Load model on specified backend.
Args:
model_path: Path to model file
warmup_voice: Optional voice tensor for warmup, skips warmup if None
backend_type: Backend to load on, uses default if None
Raises:
RuntimeError: If model loading fails
"""
@ -270,34 +314,24 @@ class ModelManager:
@property
def available_backends(self) -> list[str]:
"""Get list of available backends.
Returns:
List of backend names
"""
return list(self._backends.keys())
@property
def current_backend(self) -> str:
"""Get current default backend.
Returns:
Backend name
"""
return self._current_backend
async def get_manager(config: Optional[ModelConfig] = None) -> ModelManager:
"""Get global model manager instance.
Args:
config: Optional model configuration
Returns:
ModelManager instance
Thread Safety:
This function is thread-safe and ensures only one instance is created
even under concurrent access.
This function should be thread-safe. Lemme know if it unravels on you
"""
global _manager_instance

View file

@ -117,14 +117,18 @@ class VoiceManager:
voices_dir = os.path.join(api_dir, settings.voices_dir)
os.makedirs(voices_dir, exist_ok=True)
# Save combined voice
combined_path = os.path.join(voices_dir, f"{combined_name}.pt")
try:
torch.save(combined_tensor, combined_path)
# Cache the new combined voice
self._voice_cache[f"{combined_path}_{device}"] = combined_tensor
except Exception as e:
raise RuntimeError(f"Failed to save combined voice: {e}")
# Only save to disk if local voice saving is allowed
if settings.allow_local_voice_saving:
combined_path = os.path.join(voices_dir, f"{combined_name}.pt")
try:
torch.save(combined_tensor, combined_path)
# Cache the new combined voice with disk path
self._voice_cache[f"{combined_path}_{device}"] = combined_tensor
except Exception as e:
raise RuntimeError(f"Failed to save combined voice: {e}")
else:
# Just cache the combined voice in memory without saving to disk
self._voice_cache[f"{combined_name}_{device}"] = combined_tensor
return combined_name
@ -135,20 +139,30 @@ class VoiceManager:
"""List available voices.
Returns:
List of voice names
List of voice names, including both disk-saved and in-memory combined voices
"""
voices = []
voices = set() # Use set to avoid duplicates
try:
# Get voices from disk
api_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
voices_dir = os.path.join(api_dir, settings.voices_dir)
os.makedirs(voices_dir, exist_ok=True)
for entry in os.listdir(voices_dir):
if entry.endswith(".pt"):
voices.append(entry[:-3])
voices.add(entry[:-3])
# Add in-memory combined voices from cache
for cache_key in self._voice_cache:
# Extract voice name from cache key (format: "name_device" or "path_device")
voice_name = cache_key.split("_")[0]
if "/" in voice_name: # It's a path
voice_name = os.path.basename(voice_name)[:-3] # Remove .pt extension
voices.add(voice_name)
except Exception as e:
logger.error(f"Error listing voices: {e}")
return sorted(voices)
return sorted(list(voices))
def validate_voice(self, voice_path: str) -> bool:
"""Validate voice file.

View file

@ -13,6 +13,7 @@ from fastapi.middleware.cors import CORSMiddleware
from loguru import logger
from .core.config import settings
from .routers.web_player import router as web_router
from .core.model_config import model_config
from .routers.development import router as dev_router
from .routers.openai_compatible import router as openai_router
@ -54,46 +55,8 @@ async def lifespan(app: FastAPI):
model_manager = await get_manager()
voice_manager = await get_voice_manager()
# Determine backend type based on settings
if settings.use_gpu and torch.cuda.is_available():
backend_type = 'pytorch_gpu' if not settings.use_onnx else 'onnx_gpu'
else:
backend_type = 'pytorch_cpu' if not settings.use_onnx else 'onnx_cpu'
# Get backend and initialize model
backend = model_manager.get_backend(backend_type)
# Use model path from model_config
model_file = model_config.pytorch_model_file if not settings.use_onnx else model_config.onnx_model_file
model_path = os.path.join(settings.model_dir, model_file)
if not os.path.exists(model_path):
raise RuntimeError(f"Model file not found: {model_path}")
# Pre-cache default voice and use for warmup
warmup_voice = await voice_manager.load_voice(settings.default_voice, device=backend.device)
logger.info(f"Pre-cached voice {settings.default_voice} for warmup")
# Initialize model with warmup voice
await model_manager.load_model(model_path, warmup_voice, backend_type)
# Pre-cache common voices in background
common_voices = ['af', 'af_bella', 'af_sarah', 'af_nicole']
for voice_name in common_voices:
try:
await voice_manager.load_voice(voice_name, device=backend.device)
logger.debug(f"Pre-cached voice {voice_name}")
except Exception as e:
logger.warning(f"Failed to pre-cache voice {voice_name}: {e}")
# Get available voices for startup message
voices = await voice_manager.list_voices()
voicepack_count = len(voices)
# Get device info for startup message
device = "GPU" if settings.use_gpu else "CPU"
model = "ONNX" if settings.use_onnx else "PyTorch"
# Initialize model with warmup and get status
device, model, voicepack_count = await model_manager.initialize_with_warmup(voice_manager)
except Exception as e:
logger.error(f"Failed to initialize model: {e}")
raise
@ -112,7 +75,14 @@ async def lifespan(app: FastAPI):
{boundary}
"""
startup_msg += f"\nModel warmed up on {device}: {model}"
startup_msg += f"\n{voicepack_count} voice packs loaded\n"
startup_msg += f"\n{voicepack_count} voice packs loaded"
# Add web player info if enabled
if settings.enable_web_player:
startup_msg += f"\n\nWeb Player: http://{settings.host}:{settings.port}/web/"
else:
startup_msg += "\n\nWeb Player: disabled"
startup_msg += f"\n{boundary}\n"
logger.info(startup_msg)
@ -128,19 +98,21 @@ app = FastAPI(
openapi_url="/openapi.json", # Explicitly enable OpenAPI schema
)
# Add CORS middleware
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Add CORS middleware if enabled
if settings.cors_enabled:
app.add_middleware(
CORSMiddleware,
allow_origins=settings.cors_origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Include routers
app.include_router(openai_router, prefix="/v1")
app.include_router(dev_router) # New development endpoints
# app.include_router(text_router) # Deprecated but still live for backwards compatibility
app.include_router(dev_router) # Development endpoints
if settings.enable_web_player:
app.include_router(web_router, prefix="/web") # Web player static files
# Health check endpoint

View file

@ -118,9 +118,32 @@ async def create_speech(
# Check if streaming is requested (default for OpenAI client)
if request.stream:
# Create generator but don't start it yet
generator = stream_audio_chunks(tts_service, request, client_request)
# Test the generator by attempting to get first chunk
try:
first_chunk = await anext(generator)
except StopAsyncIteration:
first_chunk = b"" # Empty audio case
except Exception as e:
# Re-raise any errors to be caught by the outer try-except
raise RuntimeError(f"Failed to initialize audio stream: {str(e)}") from e
# If we got here, streaming can begin
async def safe_stream():
yield first_chunk
try:
async for chunk in generator:
yield chunk
except Exception as e:
# Log the error but don't yield anything - the connection will close
logger.error(f"Error during streaming: {str(e)}")
raise
# Stream audio chunks as they're generated
return StreamingResponse(
stream_audio_chunks(tts_service, request, client_request),
safe_stream(),
media_type=content_type,
headers={
"Content-Disposition": f"attachment; filename=speech.{request.response_format}",

View file

@ -0,0 +1,48 @@
"""Web player router with async file serving."""
from fastapi import APIRouter, HTTPException
from fastapi.responses import Response
from loguru import logger
from ..core.config import settings
from ..core.paths import get_web_file_path, read_bytes, get_content_type
router = APIRouter(
tags=["Web Player"],
responses={404: {"description": "Not found"}},
)
@router.get("/{filename:path}")
async def serve_web_file(filename: str):
"""Serve web player static files asynchronously."""
if not settings.enable_web_player:
raise HTTPException(status_code=404, detail="Web player is disabled")
try:
# Default to index.html for root path
if filename == "" or filename == "/":
filename = "index.html"
# Get file path
file_path = await get_web_file_path(filename)
# Read file content
content = await read_bytes(file_path)
# Get content type
content_type = await get_content_type(file_path)
return Response(
content=content,
media_type=content_type,
headers={
"Cache-Control": "no-cache", # Prevent caching during development
}
)
except RuntimeError as e:
logger.warning(f"Web file not found: {filename}")
raise HTTPException(status_code=404, detail=str(e))
except Exception as e:
logger.error(f"Error serving web file {filename}: {e}")
raise HTTPException(status_code=500, detail="Internal server error")

View file

@ -1,7 +1,7 @@
from pydantic import BaseModel, Field
from pydantic import validator
from pydantic import BaseModel, Field, field_validator
from typing import List, Union, Optional
class PhonemeRequest(BaseModel):
text: str
language: str = "a" # Default to American English
@ -24,12 +24,14 @@ class StitchOptions(BaseModel):
description="Milliseconds to trim from chunk boundaries when using static_trim"
)
@validator('gap_method')
def validate_gap_method(cls, v):
@field_validator('gap_method')
@classmethod
def validate_gap_method(cls, v: str) -> str:
if v != 'static_trim':
raise ValueError("Currently only 'static_trim' gap method is supported")
return v
class GenerateFromPhonemesRequest(BaseModel):
phonemes: Union[str, List[str]] = Field(
...,

View file

@ -1,126 +1,64 @@
import os
import shutil
import sys
from unittest.mock import MagicMock, Mock, patch
import aiofiles.threadpool
import numpy as np
import pytest
import pytest_asyncio
from unittest.mock import AsyncMock, MagicMock, patch
import numpy as np
import torch
from pathlib import Path
def cleanup_mock_dirs():
"""Clean up any MagicMock directories created during tests"""
mock_dir = "MagicMock"
if os.path.exists(mock_dir):
shutil.rmtree(mock_dir)
@pytest.fixture(autouse=True)
def setup_aiofiles():
"""Setup aiofiles mock wrapper"""
aiofiles.threadpool.wrap.register(MagicMock)(
lambda *args, **kwargs: aiofiles.threadpool.AsyncBufferedIOBase(*args, **kwargs)
)
yield
@pytest.fixture(autouse=True)
def cleanup():
"""Automatically clean up before and after each test"""
cleanup_mock_dirs()
yield
cleanup_mock_dirs()
# Mock modules before they're imported
sys.modules["transformers"] = Mock()
sys.modules["phonemizer"] = Mock()
sys.modules["models"] = Mock()
sys.modules["models.build_model"] = Mock()
sys.modules["kokoro"] = Mock()
sys.modules["kokoro.generate"] = Mock()
sys.modules["kokoro.phonemize"] = Mock()
sys.modules["kokoro.tokenize"] = Mock()
# Mock ONNX runtime
mock_onnx = Mock()
mock_onnx.InferenceSession = Mock()
mock_onnx.SessionOptions = Mock()
mock_onnx.GraphOptimizationLevel = Mock()
mock_onnx.ExecutionMode = Mock()
sys.modules["onnxruntime"] = mock_onnx
# Create mock settings module
mock_settings_module = Mock()
mock_settings = Mock()
mock_settings.model_dir = "/mock/model/dir"
mock_settings.onnx_model_path = "mock.onnx"
mock_settings_module.settings = mock_settings
sys.modules["api.src.core.config"] = mock_settings_module
class MockTTSModel:
_instance = None
_onnx_session = None
VOICES_DIR = "/mock/voices/dir"
def __init__(self):
self._initialized = False
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance
@classmethod
def initialize(cls, model_dir):
cls._onnx_session = Mock()
cls._onnx_session.run = Mock(return_value=[np.zeros(48000)])
cls._instance._initialized = True
return cls._onnx_session
@classmethod
def setup(cls):
if not cls._instance._initialized:
cls.initialize("/mock/model/dir")
return cls._instance
@classmethod
def generate_from_tokens(cls, tokens, voicepack, speed):
if not cls._instance._initialized:
raise RuntimeError("Model not initialized. Call setup() first.")
return np.zeros(48000)
@classmethod
def process_text(cls, text, language):
return "mock phonemes", [1, 2, 3]
@staticmethod
def get_device():
return "cpu"
from api.src.services.tts_service import TTSService
from api.src.inference.voice_manager import VoiceManager
from api.src.inference.model_manager import ModelManager
from api.src.structures.model_schemas import VoiceConfig
@pytest.fixture
def mock_tts_service(monkeypatch):
"""Mock TTSService for testing"""
mock_service = Mock()
mock_service._get_voice_path.return_value = "/mock/path/voice.pt"
mock_service._load_voice.return_value = np.zeros((1, 192))
# Mock TTSModel.generate_from_tokens since we call it directly
mock_generate = Mock(return_value=np.zeros(48000))
monkeypatch.setattr(
"api.src.routers.development.TTSModel.generate_from_tokens", mock_generate
)
return mock_service
def mock_voice_tensor():
"""Mock voice tensor for testing."""
return torch.randn(1, 128) # Dummy tensor
@pytest.fixture
def mock_audio_service(monkeypatch):
"""Mock AudioService"""
mock_service = Mock()
mock_service.convert_audio.return_value = b"mock audio data"
monkeypatch.setattr("api.src.routers.development.AudioService", mock_service)
return mock_service
def mock_audio_output():
"""Mock audio output for testing."""
return np.random.rand(16000) # 1 second of random audio
@pytest_asyncio.fixture
async def mock_model_manager(mock_audio_output):
"""Mock model manager for testing."""
manager = AsyncMock(spec=ModelManager)
manager.get_backend = MagicMock()
manager.generate = AsyncMock(return_value=mock_audio_output)
return manager
@pytest_asyncio.fixture
async def mock_voice_manager(mock_voice_tensor):
"""Mock voice manager for testing."""
manager = AsyncMock(spec=VoiceManager)
manager.get_voice_path = MagicMock(return_value="/mock/path/voice.pt")
manager.load_voice = AsyncMock(return_value=mock_voice_tensor)
manager.list_voices = AsyncMock(return_value=["voice1", "voice2"])
manager.combine_voices = AsyncMock(return_value="voice1_voice2")
return manager
@pytest_asyncio.fixture
async def tts_service(mock_model_manager, mock_voice_manager):
"""Get mocked TTS service instance."""
service = TTSService()
service.model_manager = mock_model_manager
service._voice_manager = mock_voice_manager
return service
@pytest.fixture
def test_voice():
"""Return a test voice name."""
return "voice1"
@pytest.fixture(scope="session")
def event_loop():
"""Create an instance of the default event loop for the test session."""
import asyncio
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
yield loop
loop.close()

View file

@ -1,402 +0,0 @@
import asyncio
from unittest.mock import AsyncMock, Mock
import pytest
import pytest_asyncio
from fastapi.testclient import TestClient
from httpx import AsyncClient
from ..src.main import app
# Create test client
client = TestClient(app)
# Create async client fixture
@pytest_asyncio.fixture
async def async_client():
async with AsyncClient(app=app, base_url="http://test") as ac:
yield ac
# Mock services
@pytest.fixture
def mock_tts_service(monkeypatch):
mock_service = Mock()
mock_service._generate_audio.return_value = (bytes([0, 1, 2, 3]), 1.0)
# Create proper async generator mock
async def mock_stream(*args, **kwargs):
for chunk in [b"chunk1", b"chunk2"]:
yield chunk
mock_service.generate_audio_stream = mock_stream
# Create async mocks
mock_service.list_voices = AsyncMock(
return_value=[
"af",
"bm_lewis",
"bf_isabella",
"bf_emma",
"af_sarah",
"af_bella",
"am_adam",
"am_michael",
"bm_george",
]
)
mock_service.combine_voices = AsyncMock()
monkeypatch.setattr(
"api.src.routers.openai_compatible.TTSService",
lambda *args, **kwargs: mock_service,
)
return mock_service
@pytest.fixture
def mock_audio_service(monkeypatch):
mock_service = Mock()
mock_service.convert_audio.return_value = b"converted mock audio data"
monkeypatch.setattr("api.src.routers.openai_compatible.AudioService", mock_service)
return mock_service
def test_health_check():
"""Test the health check endpoint"""
response = client.get("/health")
assert response.status_code == 200
assert response.json() == {"status": "healthy"}
@pytest.mark.asyncio
async def test_openai_speech_endpoint(
mock_tts_service, mock_audio_service, async_client
):
"""Test the OpenAI-compatible speech endpoint"""
test_request = {
"model": "kokoro",
"input": "Hello world",
"voice": "bm_lewis",
"response_format": "wav",
"speed": 1.0,
"stream": False, # Explicitly disable streaming
}
response = await async_client.post("/v1/audio/speech", json=test_request)
assert response.status_code == 200
assert response.headers["content-type"] == "audio/wav"
assert response.headers["content-disposition"] == "attachment; filename=speech.wav"
mock_tts_service._generate_audio.assert_called_once_with(
text="Hello world", voice="bm_lewis", speed=1.0, stitch_long_output=True
)
assert response.content == b"converted mock audio data"
@pytest.mark.asyncio
async def test_openai_speech_invalid_voice(mock_tts_service, async_client):
"""Test the OpenAI-compatible speech endpoint with invalid voice"""
test_request = {
"model": "kokoro",
"input": "Hello world",
"voice": "invalid_voice",
"response_format": "wav",
"speed": 1.0,
"stream": False, # Explicitly disable streaming
}
response = await async_client.post("/v1/audio/speech", json=test_request)
assert response.status_code == 400 # Bad request
assert "not found" in response.json()["detail"]["message"]
@pytest.mark.asyncio
async def test_openai_speech_invalid_speed(mock_tts_service, async_client):
"""Test the OpenAI-compatible speech endpoint with invalid speed"""
test_request = {
"model": "kokoro",
"input": "Hello world",
"voice": "af",
"response_format": "wav",
"speed": -1.0, # Invalid speed
"stream": False, # Explicitly disable streaming
}
response = await async_client.post("/v1/audio/speech", json=test_request)
assert response.status_code == 422 # Validation error
@pytest.mark.asyncio
async def test_openai_speech_generation_error(mock_tts_service, async_client):
"""Test error handling in speech generation"""
mock_tts_service._generate_audio.side_effect = Exception("Generation failed")
test_request = {
"model": "kokoro",
"input": "Hello world",
"voice": "af",
"response_format": "wav",
"speed": 1.0,
"stream": False, # Explicitly disable streaming
}
response = await async_client.post("/v1/audio/speech", json=test_request)
assert response.status_code == 500
assert "Generation failed" in response.json()["detail"]["message"]
@pytest.mark.asyncio
async def test_combine_voices_list_success(mock_tts_service, async_client):
"""Test successful voice combination using list format"""
test_voices = ["af_bella", "af_sarah"]
mock_tts_service.combine_voices = AsyncMock(return_value="af_bella_af_sarah")
response = await async_client.post("/v1/audio/voices/combine", json=test_voices)
assert response.status_code == 200
assert response.json()["voice"] == "af_bella_af_sarah"
mock_tts_service.combine_voices.assert_called_once_with(voices=test_voices)
@pytest.mark.asyncio
async def test_combine_voices_string_success(mock_tts_service, async_client):
"""Test successful voice combination using string format with +"""
test_voices = "af_bella+af_sarah"
mock_tts_service.combine_voices = AsyncMock(return_value="af_bella_af_sarah")
response = await async_client.post("/v1/audio/voices/combine", json=test_voices)
assert response.status_code == 200
assert response.json()["voice"] == "af_bella_af_sarah"
mock_tts_service.combine_voices.assert_called_once_with(
voices=["af_bella", "af_sarah"]
)
@pytest.mark.asyncio
async def test_combine_voices_single_voice(mock_tts_service, async_client):
"""Test combining single voice returns same voice"""
test_voices = ["af_bella"]
response = await async_client.post("/v1/audio/voices/combine", json=test_voices)
assert response.status_code == 200
assert response.json()["voice"] == "af_bella"
@pytest.mark.asyncio
async def test_combine_voices_empty_list(mock_tts_service, async_client):
"""Test combining empty voice list returns error"""
test_voices = []
response = await async_client.post("/v1/audio/voices/combine", json=test_voices)
assert response.status_code == 400
assert "No voices provided" in response.json()["detail"]["message"]
@pytest.mark.asyncio
async def test_combine_voices_error(mock_tts_service, async_client):
"""Test error handling in voice combination"""
test_voices = ["af_bella", "af_sarah"]
mock_tts_service.combine_voices = AsyncMock(
side_effect=Exception("Combination failed")
)
response = await async_client.post("/v1/audio/voices/combine", json=test_voices)
assert response.status_code == 500
assert "Server error" in response.json()["detail"]["message"]
@pytest.mark.asyncio
async def test_speech_with_combined_voice(
mock_tts_service, mock_audio_service, async_client
):
"""Test speech generation with combined voice using + syntax"""
mock_tts_service.combine_voices = AsyncMock(return_value="af_bella_af_sarah")
test_request = {
"model": "kokoro",
"input": "Hello world",
"voice": "af_bella+af_sarah",
"response_format": "wav",
"speed": 1.0,
"stream": False,
}
response = await async_client.post("/v1/audio/speech", json=test_request)
assert response.status_code == 200
assert response.headers["content-type"] == "audio/wav"
mock_tts_service._generate_audio.assert_called_once_with(
text="Hello world",
voice="af_bella_af_sarah",
speed=1.0,
stitch_long_output=True,
)
@pytest.mark.asyncio
async def test_speech_with_whitespace_in_voice(
mock_tts_service, mock_audio_service, async_client
):
"""Test speech generation with whitespace in voice combination"""
mock_tts_service.combine_voices = AsyncMock(return_value="af_bella_af_sarah")
test_request = {
"model": "kokoro",
"input": "Hello world",
"voice": " af_bella + af_sarah ",
"response_format": "wav",
"speed": 1.0,
"stream": False,
}
response = await async_client.post("/v1/audio/speech", json=test_request)
assert response.status_code == 200
assert response.headers["content-type"] == "audio/wav"
mock_tts_service.combine_voices.assert_called_once_with(
voices=["af_bella", "af_sarah"]
)
@pytest.mark.asyncio
async def test_speech_with_empty_voice_combination(mock_tts_service, async_client):
"""Test speech generation with empty voice combination"""
test_request = {
"model": "kokoro",
"input": "Hello world",
"voice": "+",
"response_format": "wav",
"speed": 1.0,
"stream": False,
}
response = await async_client.post("/v1/audio/speech", json=test_request)
assert response.status_code == 400
assert "No voices provided" in response.json()["detail"]["message"]
@pytest.mark.asyncio
async def test_speech_with_invalid_combined_voice(mock_tts_service, async_client):
"""Test speech generation with invalid voice combination"""
test_request = {
"model": "kokoro",
"input": "Hello world",
"voice": "invalid+combination",
"response_format": "wav",
"speed": 1.0,
"stream": False,
}
response = await async_client.post("/v1/audio/speech", json=test_request)
assert response.status_code == 400
assert "not found" in response.json()["detail"]["message"]
@pytest.mark.asyncio
async def test_speech_streaming_with_combined_voice(mock_tts_service, async_client):
"""Test streaming speech with combined voice using + syntax"""
mock_tts_service.combine_voices = AsyncMock(return_value="af_bella_af_sarah")
test_request = {
"model": "kokoro",
"input": "Hello world",
"voice": "af_bella+af_sarah",
"response_format": "mp3",
"stream": True,
}
# Create streaming mock
async def mock_stream(*args, **kwargs):
for chunk in [b"mp3header", b"mp3data"]:
yield chunk
mock_tts_service.generate_audio_stream = mock_stream
# Add streaming header
headers = {"x-raw-response": "stream"}
response = await async_client.post(
"/v1/audio/speech", json=test_request, headers=headers
)
assert response.status_code == 200
assert response.headers["content-type"] == "audio/mpeg"
assert response.headers["content-disposition"] == "attachment; filename=speech.mp3"
@pytest.mark.asyncio
async def test_openai_speech_pcm_streaming(mock_tts_service, async_client):
"""Test streaming PCM audio for real-time playback"""
test_request = {
"model": "kokoro",
"input": "Hello world",
"voice": "af",
"response_format": "pcm",
"stream": True,
}
# Create streaming mock for this test
async def mock_stream(*args, **kwargs):
for chunk in [b"chunk1", b"chunk2"]:
yield chunk
mock_tts_service.generate_audio_stream = mock_stream
# Add streaming header
headers = {"x-raw-response": "stream"}
response = await async_client.post(
"/v1/audio/speech", json=test_request, headers=headers
)
assert response.status_code == 200
assert response.headers["content-type"] == "audio/pcm"
@pytest.mark.asyncio
async def test_openai_speech_streaming_mp3(mock_tts_service, async_client):
"""Test streaming MP3 audio to file"""
test_request = {
"model": "kokoro",
"input": "Hello world",
"voice": "af",
"response_format": "mp3",
"stream": True,
}
# Create streaming mock for this test
async def mock_stream(*args, **kwargs):
for chunk in [b"mp3header", b"mp3data"]:
yield chunk
mock_tts_service.generate_audio_stream = mock_stream
# Add streaming header
headers = {"x-raw-response": "stream"}
response = await async_client.post(
"/v1/audio/speech", json=test_request, headers=headers
)
assert response.status_code == 200
assert response.headers["content-type"] == "audio/mpeg"
assert response.headers["content-disposition"] == "attachment; filename=speech.mp3"
@pytest.mark.asyncio
async def test_openai_speech_streaming_generator(mock_tts_service, async_client):
"""Test streaming with async generator"""
test_request = {
"model": "kokoro",
"input": "Hello world",
"voice": "af",
"response_format": "pcm",
"stream": True,
}
# Create streaming mock for this test
async def mock_stream(*args, **kwargs):
for chunk in [b"chunk1", b"chunk2"]:
yield chunk
mock_tts_service.generate_audio_stream = mock_stream
# Add streaming header
headers = {"x-raw-response": "stream"}
response = await async_client.post(
"/v1/audio/speech", json=test_request, headers=headers
)
assert response.status_code == 200
assert response.headers["content-type"] == "audio/pcm"

View file

@ -1,108 +0,0 @@
"""Tests for FastAPI application"""
from unittest.mock import MagicMock, call, patch
import pytest
from fastapi.testclient import TestClient
from api.src.main import app, lifespan
@pytest.fixture
def test_client():
"""Create a test client"""
return TestClient(app)
def test_health_check(test_client):
"""Test health check endpoint"""
response = test_client.get("/health")
assert response.status_code == 200
assert response.json() == {"status": "healthy"}
@pytest.mark.asyncio
@patch("api.src.main.TTSModel")
@patch("api.src.main.logger")
async def test_lifespan_successful_warmup(mock_logger, mock_tts_model):
"""Test successful model warmup in lifespan"""
# Mock file system for voice counting
mock_tts_model.VOICES_DIR = "/mock/voices"
# Create async mock
async def async_setup():
return 3
mock_tts_model.setup = MagicMock()
mock_tts_model.setup.side_effect = async_setup
mock_tts_model.get_device.return_value = "cuda"
with patch("os.listdir", return_value=["voice1.pt", "voice2.pt", "voice3.pt"]):
# Create an async generator from the lifespan context manager
async_gen = lifespan(MagicMock())
# Start the context manager
await async_gen.__aenter__()
# Verify the expected logging sequence
mock_logger.info.assert_any_call("Loading TTS model and voice packs...")
# Check for the startup message containing the required info
startup_calls = [call[0][0] for call in mock_logger.info.call_args_list]
startup_msg = next(msg for msg in startup_calls if "Model warmed up on" in msg)
assert "Model warmed up on" in startup_msg
assert "3 voice packs loaded" in startup_msg
# Verify model setup was called
mock_tts_model.setup.assert_called_once()
# Clean up
await async_gen.__aexit__(None, None, None)
@pytest.mark.asyncio
@patch("api.src.main.TTSModel")
@patch("api.src.main.logger")
async def test_lifespan_failed_warmup(mock_logger, mock_tts_model):
"""Test failed model warmup in lifespan"""
# Mock the model setup to fail
mock_tts_model.setup.side_effect = RuntimeError("Failed to initialize model")
# Create an async generator from the lifespan context manager
async_gen = lifespan(MagicMock())
# Verify the exception is raised
with pytest.raises(RuntimeError, match="Failed to initialize model"):
await async_gen.__aenter__()
# Verify the expected logging sequence
mock_logger.info.assert_called_with("Loading TTS model and voice packs...")
# Clean up
await async_gen.__aexit__(None, None, None)
@pytest.mark.asyncio
@patch("api.src.main.TTSModel")
async def test_lifespan_cuda_warmup(mock_tts_model):
"""Test model warmup specifically on CUDA"""
# Mock file system for voice counting
mock_tts_model.VOICES_DIR = "/mock/voices"
# Create async mock
async def async_setup():
return 2
mock_tts_model.setup = MagicMock()
mock_tts_model.setup.side_effect = async_setup
mock_tts_model.get_device.return_value = "cuda"
with patch("os.listdir", return_value=["voice1.pt", "voice2.pt"]):
# Create an async generator from the lifespan context manager
async_gen = lifespan(MagicMock())
await async_gen.__aenter__()
# Verify model setup was called
mock_tts_model.setup.assert_called_once()
# Clean up
await async_gen.__aexit__(None, None, None)

View file

@ -0,0 +1,215 @@
import pytest
from unittest.mock import AsyncMock, patch, MagicMock
from fastapi.testclient import TestClient
import numpy as np
import asyncio
from typing import AsyncGenerator
from api.src.main import app
from api.src.services.tts_service import TTSService
client = TestClient(app)
@pytest.fixture
def mock_audio_bytes():
"""Mock audio bytes for testing."""
return b"mock audio data"
@pytest.fixture
def mock_tts_service(mock_audio_bytes):
"""Mock TTS service for testing."""
with patch("api.src.routers.openai_compatible.get_tts_service") as mock_get:
service = AsyncMock(spec=TTSService)
service.generate_audio.return_value = (np.zeros(1000), 0.1)
# Create a proper async generator for streaming
async def mock_stream(*args, **kwargs) -> AsyncGenerator[bytes, None]:
yield mock_audio_bytes
service.generate_audio_stream = mock_stream
service.list_voices.return_value = ["voice1", "voice2"]
service.combine_voices.return_value = "voice1_voice2"
# Return the same instance for all calls
mock_get.return_value = service
mock_get.side_effect = None
yield service
def test_openai_speech_endpoint(mock_tts_service, test_voice):
"""Test the OpenAI-compatible speech endpoint with basic MP3 generation"""
response = client.post(
"/v1/audio/speech",
json={
"model": "kokoro",
"input": "Hello world",
"voice": test_voice,
"response_format": "mp3",
"stream": False
}
)
assert response.status_code == 200
assert response.headers["content-type"] == "audio/mpeg"
assert len(response.content) > 0
def test_openai_speech_streaming(mock_tts_service, test_voice, mock_audio_bytes):
"""Test the OpenAI-compatible speech endpoint with streaming"""
response = client.post(
"/v1/audio/speech",
json={
"model": "kokoro",
"input": "Hello world",
"voice": test_voice,
"response_format": "mp3",
"stream": True
}
)
assert response.status_code == 200
assert response.headers["content-type"] == "audio/mpeg"
assert "Transfer-Encoding" in response.headers
assert response.headers["Transfer-Encoding"] == "chunked"
# For streaming responses, we need to read the content in chunks
content = b""
for chunk in response.iter_bytes():
content += chunk
assert content == mock_audio_bytes
def test_openai_speech_pcm_streaming(mock_tts_service, test_voice, mock_audio_bytes):
"""Test PCM streaming format"""
response = client.post(
"/v1/audio/speech",
json={
"model": "kokoro",
"input": "Hello world",
"voice": test_voice,
"response_format": "pcm",
"stream": True
}
)
assert response.status_code == 200
assert response.headers["content-type"] == "audio/pcm"
# For streaming responses, we need to read the content in chunks
content = b""
for chunk in response.iter_bytes():
content += chunk
assert content == mock_audio_bytes
def test_openai_speech_invalid_voice(mock_tts_service):
"""Test error handling for invalid voice"""
mock_tts_service.generate_audio.side_effect = ValueError("Voice 'invalid_voice' not found")
response = client.post(
"/v1/audio/speech",
json={
"model": "kokoro",
"input": "Hello world",
"voice": "invalid_voice",
"response_format": "mp3",
"stream": False
}
)
assert response.status_code == 400
error_response = response.json()
assert error_response["detail"]["error"] == "validation_error"
assert "Voice 'invalid_voice' not found" in error_response["detail"]["message"]
assert error_response["detail"]["type"] == "invalid_request_error"
def test_openai_speech_empty_text(mock_tts_service, test_voice):
"""Test error handling for empty text"""
mock_tts_service.generate_audio.side_effect = ValueError("Text is empty after preprocessing")
response = client.post(
"/v1/audio/speech",
json={
"model": "kokoro",
"input": "",
"voice": test_voice,
"response_format": "mp3",
"stream": False
}
)
assert response.status_code == 400
error_response = response.json()
assert error_response["detail"]["error"] == "validation_error"
assert "Text is empty after preprocessing" in error_response["detail"]["message"]
assert error_response["detail"]["type"] == "invalid_request_error"
def test_openai_speech_invalid_format(mock_tts_service, test_voice):
"""Test error handling for invalid format"""
response = client.post(
"/v1/audio/speech",
json={
"model": "kokoro",
"input": "Hello world",
"voice": test_voice,
"response_format": "invalid_format",
"stream": False
}
)
assert response.status_code == 422 # Validation error from Pydantic
def test_list_voices(mock_tts_service):
"""Test listing available voices"""
response = client.get("/v1/audio/voices")
assert response.status_code == 200
data = response.json()
assert "voices" in data
assert len(data["voices"]) == 2
assert "voice1" in data["voices"]
assert "voice2" in data["voices"]
def test_combine_voices(mock_tts_service):
"""Test combining voices endpoint"""
response = client.post(
"/v1/audio/voices/combine",
json="voice1+voice2"
)
assert response.status_code == 200
data = response.json()
assert "voice" in data
assert data["voice"] == "voice1_voice2"
def test_server_error(mock_tts_service, test_voice):
"""Test handling of server errors"""
mock_tts_service.generate_audio.side_effect = RuntimeError("Internal server error")
response = client.post(
"/v1/audio/speech",
json={
"model": "kokoro",
"input": "Hello world",
"voice": test_voice,
"response_format": "mp3",
"stream": False
}
)
assert response.status_code == 500
error_response = response.json()
assert error_response["detail"]["error"] == "processing_error"
assert error_response["detail"]["type"] == "server_error"
def test_streaming_error(mock_tts_service, test_voice):
"""Test handling streaming errors"""
# Create a proper async generator that raises an error
async def mock_error_stream(*args, **kwargs) -> AsyncGenerator[bytes, None]:
if False: # This makes it a proper generator
yield b""
raise RuntimeError("Streaming failed")
mock_tts_service.generate_audio_stream = mock_error_stream
response = client.post(
"/v1/audio/speech",
json={
"model": "kokoro",
"input": "Hello world",
"voice": test_voice,
"response_format": "mp3",
"stream": True
}
)
assert response.status_code == 500
error_response = response.json()
assert error_response["detail"]["error"] == "processing_error"
assert error_response["detail"]["type"] == "server_error"

View file

@ -1,122 +0,0 @@
"""Tests for text processing endpoints"""
from unittest.mock import Mock, patch
import numpy as np
import pytest
import pytest_asyncio
from httpx import AsyncClient
from ..src.main import app
from .conftest import MockTTSModel
@pytest_asyncio.fixture
async def async_client():
async with AsyncClient(app=app, base_url="http://test") as ac:
yield ac
@pytest.mark.asyncio
async def test_phonemize_endpoint(async_client):
"""Test phoneme generation endpoint"""
with patch("api.src.routers.development.phonemize") as mock_phonemize, patch(
"api.src.routers.development.tokenize"
) as mock_tokenize:
# Setup mocks
mock_phonemize.return_value = "həlˈ"
mock_tokenize.return_value = [1, 2, 3]
# Test request
response = await async_client.post(
"/text/phonemize", json={"text": "hello", "language": "a"}
)
# Verify response
assert response.status_code == 200
result = response.json()
assert result["phonemes"] == "həlˈ"
assert result["tokens"] == [0, 1, 2, 3, 0] # Should add start/end tokens
@pytest.mark.asyncio
async def test_phonemize_empty_text(async_client):
"""Test phoneme generation with empty text"""
response = await async_client.post(
"/text/phonemize", json={"text": "", "language": "a"}
)
assert response.status_code == 500
assert "error" in response.json()["detail"]
@pytest.mark.asyncio
async def test_generate_from_phonemes(
async_client, mock_tts_service, mock_audio_service
):
"""Test audio generation from phonemes"""
with patch(
"api.src.routers.development.TTSService", return_value=mock_tts_service
):
response = await async_client.post(
"/text/generate_from_phonemes",
json={"phonemes": "həlˈ", "voice": "af_bella", "speed": 1.0},
)
assert response.status_code == 200
assert response.headers["content-type"] == "audio/wav"
assert (
response.headers["content-disposition"] == "attachment; filename=speech.wav"
)
assert response.content == b"mock audio data"
@pytest.mark.asyncio
async def test_generate_from_phonemes_invalid_voice(async_client, mock_tts_service):
"""Test audio generation with invalid voice"""
mock_tts_service._get_voice_path.return_value = None
with patch(
"api.src.routers.development.TTSService", return_value=mock_tts_service
):
response = await async_client.post(
"/text/generate_from_phonemes",
json={"phonemes": "həlˈ", "voice": "invalid_voice", "speed": 1.0},
)
assert response.status_code == 400
assert "Voice not found" in response.json()["detail"]["message"]
@pytest.mark.asyncio
async def test_generate_from_phonemes_invalid_speed(async_client, monkeypatch):
"""Test audio generation with invalid speed"""
# Mock TTSModel initialization
mock_model = Mock()
mock_model.generate_from_tokens = Mock(return_value=np.zeros(48000))
monkeypatch.setattr("api.src.services.tts_model.TTSModel._instance", mock_model)
monkeypatch.setattr(
"api.src.services.tts_model.TTSModel.get_instance",
Mock(return_value=mock_model),
)
response = await async_client.post(
"/text/generate_from_phonemes",
json={"phonemes": "həlˈ", "voice": "af_bella", "speed": -1.0},
)
assert response.status_code == 422 # Validation error
@pytest.mark.asyncio
async def test_generate_from_phonemes_empty_phonemes(async_client, mock_tts_service):
"""Test audio generation with empty phonemes"""
with patch(
"api.src.routers.development.TTSService", return_value=mock_tts_service
):
response = await async_client.post(
"/text/generate_from_phonemes",
json={"phonemes": "", "voice": "af_bella", "speed": 1.0},
)
assert response.status_code == 400
assert "Invalid request" in response.json()["detail"]["error"]

View file

@ -1,201 +0,0 @@
"""Tests for TTS model implementations"""
import os
from unittest.mock import AsyncMock, MagicMock, patch
import numpy as np
import pytest
import torch
from api.src.services.tts_base import TTSBaseModel
from api.src.services.tts_cpu import TTSCPUModel
from api.src.services.tts_gpu import TTSGPUModel, length_to_mask
# Base Model Tests
def test_get_device_error():
"""Test get_device() raises error when not initialized"""
TTSBaseModel._device = None
with pytest.raises(RuntimeError, match="Model not initialized"):
TTSBaseModel.get_device()
@pytest.mark.asyncio
@patch("torch.cuda.is_available")
@patch("os.path.exists")
@patch("os.path.join")
@patch("os.listdir")
@patch("torch.load")
@patch("torch.save")
@patch("api.src.services.tts_base.settings")
@patch("api.src.services.warmup.WarmupService")
async def test_setup_cuda_available(
mock_warmup_class, mock_settings, mock_save, mock_load, mock_listdir, mock_join, mock_exists, mock_cuda_available
):
"""Test setup with CUDA available"""
TTSBaseModel._device = None
# Mock CUDA as unavailable since we're using CPU PyTorch
mock_cuda_available.return_value = False
mock_exists.return_value = True
mock_load.return_value = torch.zeros(1)
mock_listdir.return_value = ["voice1.pt", "voice2.pt"]
mock_join.return_value = "/mocked/path"
# Configure mock settings
mock_settings.model_dir = "/mock/model/dir"
mock_settings.onnx_model_path = "model.onnx"
mock_settings.voices_dir = "voices"
# Configure mock warmup service
mock_warmup = MagicMock()
mock_warmup.load_voices.return_value = [torch.zeros(1)]
mock_warmup.warmup_voices = AsyncMock()
mock_warmup_class.return_value = mock_warmup
# Create mock model
mock_model = MagicMock()
mock_model.bert = MagicMock()
mock_model.process_text = MagicMock(return_value=("dummy", [1, 2, 3]))
mock_model.generate_from_tokens = MagicMock(return_value=np.zeros(1000))
# Mock initialize to return our mock model
TTSBaseModel.initialize = MagicMock(return_value=mock_model)
TTSBaseModel._instance = mock_model
voice_count = await TTSBaseModel.setup()
assert TTSBaseModel._device == "cpu"
assert voice_count == 2
@pytest.mark.asyncio
@patch("torch.cuda.is_available")
@patch("os.path.exists")
@patch("os.path.join")
@patch("os.listdir")
@patch("torch.load")
@patch("torch.save")
@patch("api.src.services.tts_base.settings")
@patch("api.src.services.warmup.WarmupService")
async def test_setup_cuda_unavailable(
mock_warmup_class, mock_settings, mock_save, mock_load, mock_listdir, mock_join, mock_exists, mock_cuda_available
):
"""Test setup with CUDA unavailable"""
TTSBaseModel._device = None
mock_cuda_available.return_value = False
mock_exists.return_value = True
mock_load.return_value = torch.zeros(1)
mock_listdir.return_value = ["voice1.pt", "voice2.pt"]
mock_join.return_value = "/mocked/path"
# Configure mock settings
mock_settings.model_dir = "/mock/model/dir"
mock_settings.onnx_model_path = "model.onnx"
mock_settings.voices_dir = "voices"
# Configure mock warmup service
mock_warmup = MagicMock()
mock_warmup.load_voices.return_value = [torch.zeros(1)]
mock_warmup.warmup_voices = AsyncMock()
mock_warmup_class.return_value = mock_warmup
# Create mock model
mock_model = MagicMock()
mock_model.bert = MagicMock()
mock_model.process_text = MagicMock(return_value=("dummy", [1, 2, 3]))
mock_model.generate_from_tokens = MagicMock(return_value=np.zeros(1000))
# Mock initialize to return our mock model
TTSBaseModel.initialize = MagicMock(return_value=mock_model)
TTSBaseModel._instance = mock_model
voice_count = await TTSBaseModel.setup()
assert TTSBaseModel._device == "cpu"
assert voice_count == 2
# CPU Model Tests
def test_cpu_initialize_missing_model():
"""Test CPU initialize with missing model"""
TTSCPUModel._onnx_session = None # Reset the session
with patch("os.path.exists", return_value=False), patch(
"onnxruntime.InferenceSession", return_value=None
):
result = TTSCPUModel.initialize("dummy_dir")
assert result is None
def test_cpu_generate_uninitialized():
"""Test CPU generate methods with uninitialized model"""
TTSCPUModel._onnx_session = None
with pytest.raises(RuntimeError, match="ONNX model not initialized"):
TTSCPUModel.generate_from_text("test", torch.zeros(1), "en", 1.0)
with pytest.raises(RuntimeError, match="ONNX model not initialized"):
TTSCPUModel.generate_from_tokens([1, 2, 3], torch.zeros(1), 1.0)
def test_cpu_process_text():
"""Test CPU process_text functionality"""
with patch("api.src.services.tts_cpu.phonemize") as mock_phonemize, patch(
"api.src.services.tts_cpu.tokenize"
) as mock_tokenize:
mock_phonemize.return_value = "test phonemes"
mock_tokenize.return_value = [1, 2, 3]
phonemes, tokens = TTSCPUModel.process_text("test", "en")
assert phonemes == "test phonemes"
assert tokens == [0, 1, 2, 3, 0] # Should add start/end tokens
# GPU Model Tests
@patch("torch.cuda.is_available")
def test_gpu_initialize_cuda_unavailable(mock_cuda_available):
"""Test GPU initialize with CUDA unavailable"""
mock_cuda_available.return_value = False
TTSGPUModel._instance = None
result = TTSGPUModel.initialize("dummy_dir", "dummy_path")
assert result is None
@patch("api.src.services.tts_gpu.length_to_mask")
def test_gpu_length_to_mask(mock_length_to_mask):
"""Test length_to_mask function"""
# Setup mock return value
expected_mask = torch.tensor(
[[False, False, False, True, True], [False, False, False, False, False]]
)
mock_length_to_mask.return_value = expected_mask
# Call function with test input
lengths = torch.tensor([3, 5])
mask = mock_length_to_mask(lengths)
# Verify mock was called with correct input
mock_length_to_mask.assert_called_once()
assert torch.equal(mask, expected_mask)
def test_gpu_generate_uninitialized():
"""Test GPU generate methods with uninitialized model"""
TTSGPUModel._instance = None
with pytest.raises(RuntimeError, match="GPU model not initialized"):
TTSGPUModel.generate_from_text("test", torch.zeros(1), "en", 1.0)
with pytest.raises(RuntimeError, match="GPU model not initialized"):
TTSGPUModel.generate_from_tokens([1, 2, 3], torch.zeros(1), 1.0)
def test_gpu_process_text():
"""Test GPU process_text functionality"""
with patch("api.src.services.tts_gpu.phonemize") as mock_phonemize, patch(
"api.src.services.tts_gpu.tokenize"
) as mock_tokenize:
mock_phonemize.return_value = "test phonemes"
mock_tokenize.return_value = [1, 2, 3]
phonemes, tokens = TTSGPUModel.process_text("test", "en")
assert phonemes == "test phonemes"
assert tokens == [1, 2, 3] # GPU implementation doesn't add start/end tokens

View file

@ -1,260 +0,0 @@
"""Tests for TTSService"""
import os
from unittest.mock import MagicMock, call, patch
import numpy as np
import pytest
import torch
from onnxruntime import InferenceSession
from api.src.core.config import settings
from api.src.services.tts_cpu import TTSCPUModel
from api.src.services.tts_gpu import TTSGPUModel
from api.src.services.tts_model import TTSModel
from api.src.services.tts_service import TTSService
@pytest.fixture
def tts_service(monkeypatch):
"""Create a TTSService instance for testing"""
# Mock TTSModel initialization
mock_model = MagicMock()
mock_model.generate_from_tokens = MagicMock(return_value=np.zeros(48000))
mock_model.process_text = MagicMock(return_value=("mock phonemes", [1, 2, 3]))
# Set up model instance
monkeypatch.setattr("api.src.services.tts_model.TTSModel._instance", mock_model)
monkeypatch.setattr(
"api.src.services.tts_model.TTSModel.get_instance",
MagicMock(return_value=mock_model),
)
monkeypatch.setattr(
"api.src.services.tts_model.TTSModel.get_device", MagicMock(return_value="cpu")
)
return TTSService()
@pytest.fixture
def sample_audio():
"""Generate a simple sine wave for testing"""
sample_rate = 24000
duration = 0.1 # 100ms
t = np.linspace(0, duration, int(sample_rate * duration))
frequency = 440 # A4 note
return np.sin(2 * np.pi * frequency * t).astype(np.float32)
def test_audio_to_bytes(tts_service, sample_audio):
"""Test converting audio tensor to bytes"""
audio_bytes = tts_service._audio_to_bytes(sample_audio)
assert isinstance(audio_bytes, bytes)
assert len(audio_bytes) > 0
@pytest.mark.asyncio
async def test_list_voices(tts_service):
"""Test listing available voices"""
# Override list_voices for testing
# # TODO:
# Whatever aiofiles does here pathing aiofiles vs aiofiles.os
# I am thoroughly confused by it.
# Cheating the test as it seems to work in the real world (for now)
async def mock_list_voices():
return ["voice1", "voice2"]
tts_service.list_voices = mock_list_voices
voices = await tts_service.list_voices()
assert len(voices) == 2
assert "voice1" in voices
assert "voice2" in voices
@pytest.mark.asyncio
async def test_list_voices_error(tts_service):
"""Test error handling in list_voices"""
# Override list_voices for testing
# TODO: See above.
async def mock_list_voices():
return []
tts_service.list_voices = mock_list_voices
voices = await tts_service.list_voices()
assert voices == []
def mock_model_setup(cuda_available=False):
"""Helper function to mock model setup"""
# Reset model state
TTSModel._instance = None
TTSModel._device = None
TTSModel._voicepacks = {}
# Create mock model instance with proper generate method
mock_model = MagicMock()
mock_model.generate.return_value = np.zeros(24000, dtype=np.float32)
TTSModel._instance = mock_model
# Set device based on CUDA availability
TTSModel._device = "cuda" if cuda_available else "cpu"
return 3 # Return voice count (including af.pt)
def test_model_initialization_cuda():
"""Test model initialization with CUDA"""
# Simulate CUDA availability
voice_count = mock_model_setup(cuda_available=True)
assert TTSModel.get_device() == "cuda"
assert voice_count == 3 # voice1.pt, voice2.pt, af.pt
def test_model_initialization_cpu():
"""Test model initialization with CPU"""
# Simulate no CUDA availability
voice_count = mock_model_setup(cuda_available=False)
assert TTSModel.get_device() == "cpu"
assert voice_count == 3 # voice1.pt, voice2.pt, af.pt
def test_generate_audio_empty_text(tts_service):
"""Test generating audio with empty text"""
with pytest.raises(ValueError, match="Text is empty after preprocessing"):
tts_service._generate_audio("", "af", 1.0)
@pytest.fixture(autouse=True)
def mock_settings():
"""Mock settings for all tests"""
with patch("api.src.services.text_processing.chunker.settings") as mock_settings:
mock_settings.max_chunk_size = 300
yield mock_settings
@patch("api.src.services.tts_model.TTSModel.get_instance")
@patch("api.src.services.tts_model.TTSModel.get_device")
@patch("os.path.exists")
@patch("kokoro.normalize_text")
@patch("kokoro.phonemize")
@patch("kokoro.tokenize")
@patch("kokoro.generate")
@patch("torch.load")
def test_generate_audio_phonemize_error(
mock_torch_load,
mock_generate,
mock_tokenize,
mock_phonemize,
mock_normalize,
mock_exists,
mock_get_device,
mock_instance,
tts_service,
):
"""Test handling phonemization error"""
mock_normalize.return_value = "Test text"
mock_phonemize.side_effect = Exception("Phonemization failed")
mock_instance.return_value = (
mock_generate,
"cpu",
) # Use the same mock for consistency
mock_get_device.return_value = "cpu"
mock_exists.return_value = True
mock_torch_load.return_value = torch.zeros((10, 24000))
mock_generate.return_value = (None, None)
with pytest.raises(ValueError, match="No chunks were processed successfully"):
tts_service._generate_audio("Test text", "af", 1.0)
@patch("api.src.services.tts_model.TTSModel.get_instance")
@patch("api.src.services.tts_model.TTSModel.get_device")
@patch("os.path.exists")
@patch("kokoro.normalize_text")
@patch("kokoro.phonemize")
@patch("kokoro.tokenize")
@patch("kokoro.generate")
@patch("torch.load")
def test_generate_audio_error(
mock_torch_load,
mock_generate,
mock_tokenize,
mock_phonemize,
mock_normalize,
mock_exists,
mock_get_device,
mock_instance,
tts_service,
):
"""Test handling generation error"""
mock_normalize.return_value = "Test text"
mock_phonemize.return_value = "Test text"
mock_tokenize.return_value = [1, 2] # Return integers instead of strings
mock_generate.side_effect = Exception("Generation failed")
mock_instance.return_value = (
mock_generate,
"cpu",
) # Use the same mock for consistency
mock_get_device.return_value = "cpu"
mock_exists.return_value = True
mock_torch_load.return_value = torch.zeros((10, 24000))
with pytest.raises(ValueError, match="No chunks were processed successfully"):
tts_service._generate_audio("Test text", "af", 1.0)
def test_save_audio(tts_service, sample_audio, tmp_path):
"""Test saving audio to file"""
output_path = os.path.join(tmp_path, "test_output.wav")
tts_service._save_audio(sample_audio, output_path)
assert os.path.exists(output_path)
assert os.path.getsize(output_path) > 0
@pytest.mark.asyncio
async def test_combine_voices(tts_service):
"""Test combining multiple voices"""
# Setup mocks for torch operations
with patch("torch.load", return_value=torch.tensor([1.0, 2.0])), patch(
"torch.stack", return_value=torch.tensor([[1.0, 2.0], [3.0, 4.0]])
), patch("torch.mean", return_value=torch.tensor([2.0, 3.0])), patch(
"torch.save"
), patch("os.path.exists", return_value=True):
# Test combining two voices
result = await tts_service.combine_voices(["voice1", "voice2"])
assert result == "voice1_voice2"
@pytest.mark.asyncio
async def test_combine_voices_invalid_input(tts_service):
"""Test combining voices with invalid input"""
# Test with empty list
with pytest.raises(ValueError, match="At least 2 voices are required"):
await tts_service.combine_voices([])
# Test with single voice
with pytest.raises(ValueError, match="At least 2 voices are required"):
await tts_service.combine_voices(["voice1"])
@patch("api.src.services.tts_service.TTSService._get_voice_path")
@patch("api.src.services.tts_model.TTSModel.get_instance")
def test_voicepack_loading_error(mock_get_instance, mock_get_voice_path):
"""Test voicepack loading error handling"""
mock_get_voice_path.return_value = None
mock_instance = MagicMock()
mock_instance.generate.return_value = np.zeros(24000, dtype=np.float32)
mock_get_instance.return_value = (mock_instance, "cpu")
TTSModel._voicepacks = {} # Reset voicepacks
service = TTSService()
with pytest.raises(ValueError, match="Voice not found: nonexistent_voice"):
service._generate_audio("test", "nonexistent_voice", 1.0)

View file

@ -0,0 +1,140 @@
import pytest
import numpy as np
from unittest.mock import AsyncMock, patch
@pytest.mark.asyncio
async def test_generate_audio(tts_service, mock_audio_output, test_voice):
"""Test basic audio generation"""
audio, processing_time = await tts_service.generate_audio(
text="Hello world",
voice=test_voice,
speed=1.0
)
assert isinstance(audio, np.ndarray)
assert np.array_equal(audio, mock_audio_output)
assert processing_time > 0
tts_service.model_manager.generate.assert_called_once()
@pytest.mark.asyncio
async def test_generate_audio_with_combined_voice(tts_service, mock_audio_output):
"""Test audio generation with a combined voice"""
test_voices = ["voice1", "voice2"]
combined_id = await tts_service._voice_manager.combine_voices(test_voices)
audio, processing_time = await tts_service.generate_audio(
text="Hello world",
voice=combined_id,
speed=1.0
)
assert isinstance(audio, np.ndarray)
assert np.array_equal(audio, mock_audio_output)
assert processing_time > 0
@pytest.mark.asyncio
async def test_generate_audio_stream(tts_service, mock_audio_output, test_voice):
"""Test streaming audio generation"""
tts_service.model_manager.generate.return_value = mock_audio_output
chunks = []
async for chunk in tts_service.generate_audio_stream(
text="Hello world",
voice=test_voice,
speed=1.0,
output_format="pcm"
):
assert isinstance(chunk, bytes)
chunks.append(chunk)
assert len(chunks) > 0
tts_service.model_manager.generate.assert_called()
@pytest.mark.asyncio
async def test_empty_text(tts_service, test_voice):
"""Test handling empty text"""
with pytest.raises(ValueError) as exc_info:
await tts_service.generate_audio(
text="",
voice=test_voice,
speed=1.0
)
assert "Text is empty after preprocessing" in str(exc_info.value)
@pytest.mark.asyncio
async def test_invalid_voice(tts_service):
"""Test handling invalid voice"""
tts_service._voice_manager.load_voice.side_effect = ValueError("Voice not found")
with pytest.raises(ValueError) as exc_info:
await tts_service.generate_audio(
text="Hello world",
voice="invalid_voice",
speed=1.0
)
assert "Voice not found" in str(exc_info.value)
@pytest.mark.asyncio
async def test_model_generation_error(tts_service, test_voice):
"""Test handling model generation error"""
# Make generate return None to simulate failed generation
tts_service.model_manager.generate.return_value = None
with pytest.raises(ValueError) as exc_info:
await tts_service.generate_audio(
text="Hello world",
voice=test_voice,
speed=1.0
)
assert "No audio chunks were generated successfully" in str(exc_info.value)
@pytest.mark.asyncio
async def test_streaming_generation_error(tts_service, test_voice):
"""Test handling streaming generation error"""
# Make generate return None to simulate failed generation
tts_service.model_manager.generate.return_value = None
chunks = []
async for chunk in tts_service.generate_audio_stream(
text="Hello world",
voice=test_voice,
speed=1.0,
output_format="pcm"
):
chunks.append(chunk)
# Should get no chunks if generation fails
assert len(chunks) == 0
@pytest.mark.asyncio
async def test_list_voices(tts_service):
"""Test listing available voices"""
voices = await tts_service.list_voices()
assert len(voices) == 2
assert "voice1" in voices
assert "voice2" in voices
tts_service._voice_manager.list_voices.assert_called_once()
@pytest.mark.asyncio
async def test_combine_voices(tts_service):
"""Test combining voices"""
test_voices = ["voice1", "voice2"]
combined_id = await tts_service.combine_voices(test_voices)
assert combined_id == "voice1_voice2"
tts_service._voice_manager.combine_voices.assert_called_once_with(test_voices)
@pytest.mark.asyncio
async def test_chunked_text_processing(tts_service, test_voice, mock_audio_output):
"""Test processing chunked text"""
long_text = "First sentence. Second sentence. Third sentence."
audio, processing_time = await tts_service.generate_audio(
text=long_text,
voice=test_voice,
speed=1.0,
stitch_long_output=True
)
assert tts_service.model_manager.generate.call_count > 1
assert isinstance(audio, np.ndarray)
assert processing_time > 0

View file

@ -0,0 +1,82 @@
import pytest
from unittest.mock import AsyncMock, patch
import torch
from pathlib import Path
@pytest.mark.asyncio
async def test_list_available_voices(mock_voice_manager):
"""Test listing available voices"""
voices = await mock_voice_manager.list_voices()
assert len(voices) == 2
assert "voice1" in voices
assert "voice2" in voices
@pytest.mark.asyncio
async def test_get_voice_path(mock_voice_manager):
"""Test getting path for a specific voice"""
voice_path = mock_voice_manager.get_voice_path("voice1")
assert voice_path == "/mock/path/voice.pt"
# Test invalid voice
mock_voice_manager.get_voice_path.return_value = None
assert mock_voice_manager.get_voice_path("invalid_voice") is None
@pytest.mark.asyncio
async def test_load_voice(mock_voice_manager, mock_voice_tensor):
"""Test loading a voice tensor"""
voice_tensor = await mock_voice_manager.load_voice("voice1")
assert torch.equal(voice_tensor, mock_voice_tensor)
mock_voice_manager.load_voice.assert_called_once_with("voice1")
@pytest.mark.asyncio
async def test_load_voice_not_found(mock_voice_manager):
"""Test loading non-existent voice"""
mock_voice_manager.get_voice_path.return_value = None
mock_voice_manager.load_voice.side_effect = ValueError("Voice not found: invalid_voice")
with pytest.raises(ValueError, match="Voice not found: invalid_voice"):
await mock_voice_manager.load_voice("invalid_voice")
@pytest.mark.asyncio
async def test_combine_voices(mock_voice_manager):
"""Test combining two voices"""
voices = ["voice1", "voice2"]
weights = [0.5, 0.5]
combined_id = await mock_voice_manager.combine_voices(voices, weights)
assert combined_id == "voice1_voice2"
mock_voice_manager.combine_voices.assert_called_once_with(voices, weights)
@pytest.mark.asyncio
async def test_combine_voices_invalid_weights(mock_voice_manager):
"""Test combining voices with invalid weights"""
voices = ["voice1", "voice2"]
weights = [0.3, 0.3] # Doesn't sum to 1
mock_voice_manager.combine_voices.side_effect = ValueError("Weights must sum to 1")
with pytest.raises(ValueError, match="Weights must sum to 1"):
await mock_voice_manager.combine_voices(voices, weights)
@pytest.mark.asyncio
async def test_combine_voices_single_voice(mock_voice_manager):
"""Test combining with single voice"""
voices = ["voice1"]
weights = [1.0]
mock_voice_manager.combine_voices.side_effect = ValueError("At least 2 voices are required")
with pytest.raises(ValueError, match="At least 2 voices are required"):
await mock_voice_manager.combine_voices(voices, weights)
@pytest.mark.asyncio
async def test_cache_management(mock_voice_manager, mock_voice_tensor):
"""Test voice cache management"""
# Mock cache info
mock_voice_manager.cache_info = {"size": 1, "max_size": 10}
# Load voice to test caching
await mock_voice_manager.load_voice("voice1")
# Check cache info
cache_info = mock_voice_manager.cache_info
assert cache_info["size"] == 1
assert cache_info["max_size"] == 10

View file

@ -33,6 +33,7 @@ RUN --mount=type=cache,target=/root/.cache/uv \
# Copy project files
COPY --chown=appuser:appuser api ./api
COPY --chown=appuser:appuser web ./web
# Install project
RUN --mount=type=cache,target=/root/.cache/uv \

View file

@ -37,6 +37,7 @@ RUN --mount=type=cache,target=/root/.cache/uv \
# Copy project files
COPY --chown=appuser:appuser api ./api
COPY --chown=appuser:appuser web ./web
# Install project with GPU extras
RUN --mount=type=cache,target=/root/.cache/uv \

View file

@ -33,6 +33,7 @@ dependencies = [
"ebooklib>=0.18",
"html2text>=2024.2.26",
"pydub>=0.25.1",
"matplotlib>=3.10.0",
]
[project.optional-dependencies]

View file

@ -1,9 +1,46 @@
import gradio as gr
import pytest
from unittest.mock import AsyncMock, Mock
from api.src.services.tts_service import TTSService
@pytest.fixture
def mock_gr_context():
"""Provides a context for testing Gradio components"""
with gr.Blocks():
yield
async def mock_model_manager():
"""Mock model manager for UI tests"""
manager = AsyncMock()
manager.get_backend = Mock(return_value=Mock(device="cpu"))
return manager
@pytest.fixture
async def mock_voice_manager():
"""Mock voice manager for UI tests"""
manager = AsyncMock()
manager.list_voices = AsyncMock(return_value=["af", "bm_lewis", "af_sarah"])
return manager
@pytest.fixture
async def mock_tts_service(mock_model_manager, mock_voice_manager):
"""Mock TTSService for UI tests"""
service = AsyncMock()
service.model_manager = mock_model_manager
service._voice_manager = mock_voice_manager
return service
@pytest.fixture(autouse=True)
async def setup_mocks(monkeypatch, mock_model_manager, mock_voice_manager, mock_tts_service):
"""Setup global mocks for UI tests"""
async def mock_get_model():
return mock_model_manager
async def mock_get_voice():
return mock_voice_manager
async def mock_create_service():
return mock_tts_service
monkeypatch.setattr("api.src.inference.model_manager.get_manager", mock_get_model)
monkeypatch.setattr("api.src.inference.voice_manager.get_manager", mock_get_voice)
monkeypatch.setattr("api.src.services.tts_service.TTSService.create", mock_create_service)

675
uv.lock generated
View file

@ -301,6 +301,70 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/e3/51/9b208e85196941db2f0654ad0357ca6388ab3ed67efdbfc799f35d1f83aa/colorlog-6.9.0-py3-none-any.whl", hash = "sha256:5906e71acd67cb07a71e779c47c4bcb45fb8c2993eebe9e5adcd6a6f1b283eff", size = 11424 },
]
[[package]]
name = "contourpy"
version = "1.3.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "numpy" },
]
sdist = { url = "https://files.pythonhosted.org/packages/25/c2/fc7193cc5383637ff390a712e88e4ded0452c9fbcf84abe3de5ea3df1866/contourpy-1.3.1.tar.gz", hash = "sha256:dfd97abd83335045a913e3bcc4a09c0ceadbe66580cf573fe961f4a825efa699", size = 13465753 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/b2/a3/80937fe3efe0edacf67c9a20b955139a1a622730042c1ea991956f2704ad/contourpy-1.3.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a045f341a77b77e1c5de31e74e966537bba9f3c4099b35bf4c2e3939dd54cdab", size = 268466 },
{ url = "https://files.pythonhosted.org/packages/82/1d/e3eaebb4aa2d7311528c048350ca8e99cdacfafd99da87bc0a5f8d81f2c2/contourpy-1.3.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:500360b77259914f7805af7462e41f9cb7ca92ad38e9f94d6c8641b089338124", size = 253314 },
{ url = "https://files.pythonhosted.org/packages/de/f3/d796b22d1a2b587acc8100ba8c07fb7b5e17fde265a7bb05ab967f4c935a/contourpy-1.3.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2f926efda994cdf3c8d3fdb40b9962f86edbc4457e739277b961eced3d0b4c1", size = 312003 },
{ url = "https://files.pythonhosted.org/packages/bf/f5/0e67902bc4394daee8daa39c81d4f00b50e063ee1a46cb3938cc65585d36/contourpy-1.3.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:adce39d67c0edf383647a3a007de0a45fd1b08dedaa5318404f1a73059c2512b", size = 351896 },
{ url = "https://files.pythonhosted.org/packages/1f/d6/e766395723f6256d45d6e67c13bb638dd1fa9dc10ef912dc7dd3dcfc19de/contourpy-1.3.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:abbb49fb7dac584e5abc6636b7b2a7227111c4f771005853e7d25176daaf8453", size = 320814 },
{ url = "https://files.pythonhosted.org/packages/a9/57/86c500d63b3e26e5b73a28b8291a67c5608d4aa87ebd17bd15bb33c178bc/contourpy-1.3.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0cffcbede75c059f535725c1680dfb17b6ba8753f0c74b14e6a9c68c29d7ea3", size = 324969 },
{ url = "https://files.pythonhosted.org/packages/b8/62/bb146d1289d6b3450bccc4642e7f4413b92ebffd9bf2e91b0404323704a7/contourpy-1.3.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ab29962927945d89d9b293eabd0d59aea28d887d4f3be6c22deaefbb938a7277", size = 1265162 },
{ url = "https://files.pythonhosted.org/packages/18/04/9f7d132ce49a212c8e767042cc80ae390f728060d2eea47058f55b9eff1c/contourpy-1.3.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:974d8145f8ca354498005b5b981165b74a195abfae9a8129df3e56771961d595", size = 1324328 },
{ url = "https://files.pythonhosted.org/packages/46/23/196813901be3f97c83ababdab1382e13e0edc0bb4e7b49a7bff15fcf754e/contourpy-1.3.1-cp310-cp310-win32.whl", hash = "sha256:ac4578ac281983f63b400f7fe6c101bedc10651650eef012be1ccffcbacf3697", size = 173861 },
{ url = "https://files.pythonhosted.org/packages/e0/82/c372be3fc000a3b2005061ca623a0d1ecd2eaafb10d9e883a2fc8566e951/contourpy-1.3.1-cp310-cp310-win_amd64.whl", hash = "sha256:174e758c66bbc1c8576992cec9599ce8b6672b741b5d336b5c74e35ac382b18e", size = 218566 },
{ url = "https://files.pythonhosted.org/packages/12/bb/11250d2906ee2e8b466b5f93e6b19d525f3e0254ac8b445b56e618527718/contourpy-1.3.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3e8b974d8db2c5610fb4e76307e265de0edb655ae8169e8b21f41807ccbeec4b", size = 269555 },
{ url = "https://files.pythonhosted.org/packages/67/71/1e6e95aee21a500415f5d2dbf037bf4567529b6a4e986594d7026ec5ae90/contourpy-1.3.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:20914c8c973f41456337652a6eeca26d2148aa96dd7ac323b74516988bea89fc", size = 254549 },
{ url = "https://files.pythonhosted.org/packages/31/2c/b88986e8d79ac45efe9d8801ae341525f38e087449b6c2f2e6050468a42c/contourpy-1.3.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19d40d37c1c3a4961b4619dd9d77b12124a453cc3d02bb31a07d58ef684d3d86", size = 313000 },
{ url = "https://files.pythonhosted.org/packages/c4/18/65280989b151fcf33a8352f992eff71e61b968bef7432fbfde3a364f0730/contourpy-1.3.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:113231fe3825ebf6f15eaa8bc1f5b0ddc19d42b733345eae0934cb291beb88b6", size = 352925 },
{ url = "https://files.pythonhosted.org/packages/f5/c7/5fd0146c93220dbfe1a2e0f98969293b86ca9bc041d6c90c0e065f4619ad/contourpy-1.3.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4dbbc03a40f916a8420e420d63e96a1258d3d1b58cbdfd8d1f07b49fcbd38e85", size = 323693 },
{ url = "https://files.pythonhosted.org/packages/85/fc/7fa5d17daf77306840a4e84668a48ddff09e6bc09ba4e37e85ffc8e4faa3/contourpy-1.3.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a04ecd68acbd77fa2d39723ceca4c3197cb2969633836ced1bea14e219d077c", size = 326184 },
{ url = "https://files.pythonhosted.org/packages/ef/e7/104065c8270c7397c9571620d3ab880558957216f2b5ebb7e040f85eeb22/contourpy-1.3.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c414fc1ed8ee1dbd5da626cf3710c6013d3d27456651d156711fa24f24bd1291", size = 1268031 },
{ url = "https://files.pythonhosted.org/packages/e2/4a/c788d0bdbf32c8113c2354493ed291f924d4793c4a2e85b69e737a21a658/contourpy-1.3.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:31c1b55c1f34f80557d3830d3dd93ba722ce7e33a0b472cba0ec3b6535684d8f", size = 1325995 },
{ url = "https://files.pythonhosted.org/packages/a6/e6/a2f351a90d955f8b0564caf1ebe4b1451a3f01f83e5e3a414055a5b8bccb/contourpy-1.3.1-cp311-cp311-win32.whl", hash = "sha256:f611e628ef06670df83fce17805c344710ca5cde01edfdc72751311da8585375", size = 174396 },
{ url = "https://files.pythonhosted.org/packages/a8/7e/cd93cab453720a5d6cb75588cc17dcdc08fc3484b9de98b885924ff61900/contourpy-1.3.1-cp311-cp311-win_amd64.whl", hash = "sha256:b2bdca22a27e35f16794cf585832e542123296b4687f9fd96822db6bae17bfc9", size = 219787 },
{ url = "https://files.pythonhosted.org/packages/37/6b/175f60227d3e7f5f1549fcb374592be311293132207e451c3d7c654c25fb/contourpy-1.3.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:0ffa84be8e0bd33410b17189f7164c3589c229ce5db85798076a3fa136d0e509", size = 271494 },
{ url = "https://files.pythonhosted.org/packages/6b/6a/7833cfae2c1e63d1d8875a50fd23371394f540ce809d7383550681a1fa64/contourpy-1.3.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:805617228ba7e2cbbfb6c503858e626ab528ac2a32a04a2fe88ffaf6b02c32bc", size = 255444 },
{ url = "https://files.pythonhosted.org/packages/7f/b3/7859efce66eaca5c14ba7619791b084ed02d868d76b928ff56890d2d059d/contourpy-1.3.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ade08d343436a94e633db932e7e8407fe7de8083967962b46bdfc1b0ced39454", size = 307628 },
{ url = "https://files.pythonhosted.org/packages/48/b2/011415f5e3f0a50b1e285a0bf78eb5d92a4df000553570f0851b6e309076/contourpy-1.3.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:47734d7073fb4590b4a40122b35917cd77be5722d80683b249dac1de266aac80", size = 347271 },
{ url = "https://files.pythonhosted.org/packages/84/7d/ef19b1db0f45b151ac78c65127235239a8cf21a59d1ce8507ce03e89a30b/contourpy-1.3.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2ba94a401342fc0f8b948e57d977557fbf4d515f03c67682dd5c6191cb2d16ec", size = 318906 },
{ url = "https://files.pythonhosted.org/packages/ba/99/6794142b90b853a9155316c8f470d2e4821fe6f086b03e372aca848227dd/contourpy-1.3.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efa874e87e4a647fd2e4f514d5e91c7d493697127beb95e77d2f7561f6905bd9", size = 323622 },
{ url = "https://files.pythonhosted.org/packages/3c/0f/37d2c84a900cd8eb54e105f4fa9aebd275e14e266736778bb5dccbf3bbbb/contourpy-1.3.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1bf98051f1045b15c87868dbaea84f92408337d4f81d0e449ee41920ea121d3b", size = 1266699 },
{ url = "https://files.pythonhosted.org/packages/3a/8a/deb5e11dc7d9cc8f0f9c8b29d4f062203f3af230ba83c30a6b161a6effc9/contourpy-1.3.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:61332c87493b00091423e747ea78200659dc09bdf7fd69edd5e98cef5d3e9a8d", size = 1326395 },
{ url = "https://files.pythonhosted.org/packages/1a/35/7e267ae7c13aaf12322ccc493531f1e7f2eb8fba2927b9d7a05ff615df7a/contourpy-1.3.1-cp312-cp312-win32.whl", hash = "sha256:e914a8cb05ce5c809dd0fe350cfbb4e881bde5e2a38dc04e3afe1b3e58bd158e", size = 175354 },
{ url = "https://files.pythonhosted.org/packages/a1/35/c2de8823211d07e8a79ab018ef03960716c5dff6f4d5bff5af87fd682992/contourpy-1.3.1-cp312-cp312-win_amd64.whl", hash = "sha256:08d9d449a61cf53033612cb368f3a1b26cd7835d9b8cd326647efe43bca7568d", size = 220971 },
{ url = "https://files.pythonhosted.org/packages/9a/e7/de62050dce687c5e96f946a93546910bc67e483fe05324439e329ff36105/contourpy-1.3.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a761d9ccfc5e2ecd1bf05534eda382aa14c3e4f9205ba5b1684ecfe400716ef2", size = 271548 },
{ url = "https://files.pythonhosted.org/packages/78/4d/c2a09ae014ae984c6bdd29c11e74d3121b25eaa117eca0bb76340efd7e1c/contourpy-1.3.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:523a8ee12edfa36f6d2a49407f705a6ef4c5098de4f498619787e272de93f2d5", size = 255576 },
{ url = "https://files.pythonhosted.org/packages/ab/8a/915380ee96a5638bda80cd061ccb8e666bfdccea38d5741cb69e6dbd61fc/contourpy-1.3.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece6df05e2c41bd46776fbc712e0996f7c94e0d0543af1656956d150c4ca7c81", size = 306635 },
{ url = "https://files.pythonhosted.org/packages/29/5c/c83ce09375428298acd4e6582aeb68b1e0d1447f877fa993d9bf6cd3b0a0/contourpy-1.3.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:573abb30e0e05bf31ed067d2f82500ecfdaec15627a59d63ea2d95714790f5c2", size = 345925 },
{ url = "https://files.pythonhosted.org/packages/29/63/5b52f4a15e80c66c8078a641a3bfacd6e07106835682454647aca1afc852/contourpy-1.3.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a9fa36448e6a3a1a9a2ba23c02012c43ed88905ec80163f2ffe2421c7192a5d7", size = 318000 },
{ url = "https://files.pythonhosted.org/packages/9a/e2/30ca086c692691129849198659bf0556d72a757fe2769eb9620a27169296/contourpy-1.3.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ea9924d28fc5586bf0b42d15f590b10c224117e74409dd7a0be3b62b74a501c", size = 322689 },
{ url = "https://files.pythonhosted.org/packages/6b/77/f37812ef700f1f185d348394debf33f22d531e714cf6a35d13d68a7003c7/contourpy-1.3.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5b75aa69cb4d6f137b36f7eb2ace9280cfb60c55dc5f61c731fdf6f037f958a3", size = 1268413 },
{ url = "https://files.pythonhosted.org/packages/3f/6d/ce84e79cdd128542ebeb268f84abb4b093af78e7f8ec504676673d2675bc/contourpy-1.3.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:041b640d4ec01922083645a94bb3b2e777e6b626788f4095cf21abbe266413c1", size = 1326530 },
{ url = "https://files.pythonhosted.org/packages/72/22/8282f4eae20c73c89bee7a82a19c4e27af9b57bb602ecaa00713d5bdb54d/contourpy-1.3.1-cp313-cp313-win32.whl", hash = "sha256:36987a15e8ace5f58d4d5da9dca82d498c2bbb28dff6e5d04fbfcc35a9cb3a82", size = 175315 },
{ url = "https://files.pythonhosted.org/packages/e3/d5/28bca491f65312b438fbf076589dcde7f6f966b196d900777f5811b9c4e2/contourpy-1.3.1-cp313-cp313-win_amd64.whl", hash = "sha256:a7895f46d47671fa7ceec40f31fae721da51ad34bdca0bee83e38870b1f47ffd", size = 220987 },
{ url = "https://files.pythonhosted.org/packages/2f/24/a4b285d6adaaf9746e4700932f579f1a7b6f9681109f694cfa233ae75c4e/contourpy-1.3.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:9ddeb796389dadcd884c7eb07bd14ef12408aaae358f0e2ae24114d797eede30", size = 285001 },
{ url = "https://files.pythonhosted.org/packages/48/1d/fb49a401b5ca4f06ccf467cd6c4f1fd65767e63c21322b29b04ec40b40b9/contourpy-1.3.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:19c1555a6801c2f084c7ddc1c6e11f02eb6a6016ca1318dd5452ba3f613a1751", size = 268553 },
{ url = "https://files.pythonhosted.org/packages/79/1e/4aef9470d13fd029087388fae750dccb49a50c012a6c8d1d634295caa644/contourpy-1.3.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:841ad858cff65c2c04bf93875e384ccb82b654574a6d7f30453a04f04af71342", size = 310386 },
{ url = "https://files.pythonhosted.org/packages/b0/34/910dc706ed70153b60392b5305c708c9810d425bde12499c9184a1100888/contourpy-1.3.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4318af1c925fb9a4fb190559ef3eec206845f63e80fb603d47f2d6d67683901c", size = 349806 },
{ url = "https://files.pythonhosted.org/packages/31/3c/faee6a40d66d7f2a87f7102236bf4780c57990dd7f98e5ff29881b1b1344/contourpy-1.3.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:14c102b0eab282427b662cb590f2e9340a9d91a1c297f48729431f2dcd16e14f", size = 321108 },
{ url = "https://files.pythonhosted.org/packages/17/69/390dc9b20dd4bb20585651d7316cc3054b7d4a7b4f8b710b2b698e08968d/contourpy-1.3.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05e806338bfeaa006acbdeba0ad681a10be63b26e1b17317bfac3c5d98f36cda", size = 327291 },
{ url = "https://files.pythonhosted.org/packages/ef/74/7030b67c4e941fe1e5424a3d988080e83568030ce0355f7c9fc556455b01/contourpy-1.3.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4d76d5993a34ef3df5181ba3c92fabb93f1eaa5729504fb03423fcd9f3177242", size = 1263752 },
{ url = "https://files.pythonhosted.org/packages/f0/ed/92d86f183a8615f13f6b9cbfc5d4298a509d6ce433432e21da838b4b63f4/contourpy-1.3.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:89785bb2a1980c1bd87f0cb1517a71cde374776a5f150936b82580ae6ead44a1", size = 1318403 },
{ url = "https://files.pythonhosted.org/packages/b3/0e/c8e4950c77dcfc897c71d61e56690a0a9df39543d2164040301b5df8e67b/contourpy-1.3.1-cp313-cp313t-win32.whl", hash = "sha256:8eb96e79b9f3dcadbad2a3891672f81cdcab7f95b27f28f1c67d75f045b6b4f1", size = 185117 },
{ url = "https://files.pythonhosted.org/packages/c1/31/1ae946f11dfbd229222e6d6ad8e7bd1891d3d48bde5fbf7a0beb9491f8e3/contourpy-1.3.1-cp313-cp313t-win_amd64.whl", hash = "sha256:287ccc248c9e0d0566934e7d606201abd74761b5703d804ff3df8935f523d546", size = 236668 },
{ url = "https://files.pythonhosted.org/packages/3e/4f/e56862e64b52b55b5ddcff4090085521fc228ceb09a88390a2b103dccd1b/contourpy-1.3.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:b457d6430833cee8e4b8e9b6f07aa1c161e5e0d52e118dc102c8f9bd7dd060d6", size = 265605 },
{ url = "https://files.pythonhosted.org/packages/b0/2e/52bfeeaa4541889f23d8eadc6386b442ee2470bd3cff9baa67deb2dd5c57/contourpy-1.3.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cb76c1a154b83991a3cbbf0dfeb26ec2833ad56f95540b442c73950af2013750", size = 315040 },
{ url = "https://files.pythonhosted.org/packages/52/94/86bfae441707205634d80392e873295652fc313dfd93c233c52c4dc07874/contourpy-1.3.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:44a29502ca9c7b5ba389e620d44f2fbe792b1fb5734e8b931ad307071ec58c53", size = 218221 },
]
[[package]]
name = "coverage"
version = "7.6.10"
@ -387,6 +451,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/13/84/069db1325f5a6dd034524c0099da3978de8fdb6242ce63223ead188da940/csvw-3.5.1-py2.py3-none-any.whl", hash = "sha256:8dd3864aae51bfd943713a62ec2c6688d3f406a9627b5f16de2479b1281febe5", size = 59881 },
]
[[package]]
name = "cycler"
version = "0.12.1"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/a9/95/a3dbbb5028f35eafb79008e7522a75244477d2838f38cbb722248dabc2a8/cycler-0.12.1.tar.gz", hash = "sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c", size = 7615 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/e7/05/c19819d5e3d95294a6f5947fb9b9629efb316b96de511b418c53d245aae6/cycler-0.12.1-py3-none-any.whl", hash = "sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30", size = 8321 },
]
[[package]]
name = "distro"
version = "1.9.0"
@ -465,6 +538,47 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/fb/b4/31c461eef98b96b8ab736d97274548eaf2b2e349bf09e4de3902f7d53084/flatbuffers-24.12.23-py2.py3-none-any.whl", hash = "sha256:c418e0d48890f4142b92fd3e343e73a48f194e1f80075ddcc5793779b3585444", size = 30962 },
]
[[package]]
name = "fonttools"
version = "4.55.4"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/13/8d/8912cdde6a2b4c19ced69ea5790cd17d1c095a3c0104c1c936a1de804a64/fonttools-4.55.4.tar.gz", hash = "sha256:9598af0af85073659facbe9612fcc56b071ef2f26e3819ebf9bd8c5d35f958c5", size = 3498560 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/06/84/da14576ce30bbed3c882bfc4de84d2e4348c65b1382688812357cb21416a/fonttools-4.55.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3b332ea7b7f5f3d99f9bc5a28a23c3824ae72711abf7c4e1d62fa21699fdebe7", size = 2774346 },
{ url = "https://files.pythonhosted.org/packages/50/1d/3da7148a5552871c5dbe368de755602a0df0672e339edc133ed3e9704f2a/fonttools-4.55.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5d8f925909256e62152e7c3e192655dbca3ab8c3cdef7d7b436732727e80feb6", size = 2302782 },
{ url = "https://files.pythonhosted.org/packages/1b/9d/6b5be027fbfc8eab302d89608fc158b37531f3116506062e0d7183546465/fonttools-4.55.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a58af9b98e39bcd773aa352b4512be79b472830b799cb1d3cafb2b4796b71cd", size = 4584269 },
{ url = "https://files.pythonhosted.org/packages/53/6f/c5ccd4c8f90fd7f6964a1b8981e58f5cc6361acedb0a473a8dae4e1ac3c6/fonttools-4.55.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:736d750d2ab4523067d8058e5294b40b01f2eee521e0fd401bec0d5e21e80b12", size = 4626917 },
{ url = "https://files.pythonhosted.org/packages/c7/ea/53c4c75212b30d257e0865d6905eb6747ec7450b414caff742ff031eb758/fonttools-4.55.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:1a9a2e7e8a9d3bfa9589db3e6c4e4c127fec252493924b2f87a67a25f9430057", size = 4581220 },
{ url = "https://files.pythonhosted.org/packages/04/4f/05d9bf9595d75ece4d65e52bd994431cff575e11f00a9444ac8b2781091e/fonttools-4.55.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:87824368e994af34a95cb4279a8c711e51974b3c28d052d39d768531cc9e8e59", size = 4750636 },
{ url = "https://files.pythonhosted.org/packages/43/21/d91c8d4583e0f6ee8e08868d5ab3de44f78af8da37d47e265f5b433bd0e2/fonttools-4.55.4-cp310-cp310-win32.whl", hash = "sha256:6c36dcbfe64bce38c4d4f1d436cdc6445e969eee96eb98d98be603b5abf8c3f2", size = 2177793 },
{ url = "https://files.pythonhosted.org/packages/b8/33/c26363a57f5e766f38c84fb4e34d26d32a26398804f72e12a00c007166a1/fonttools-4.55.4-cp310-cp310-win_amd64.whl", hash = "sha256:3c53a467e5cf629acdbefc98b0f554859539fb6447bbeae4117b9ab51464ccc5", size = 2222313 },
{ url = "https://files.pythonhosted.org/packages/5c/22/cf0707f681486bf91f998c3a6a6492d806d1cf09445ce01b26a724917439/fonttools-4.55.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:1605b28165c785bf26c2cbd205dc0822463e3f9f56f187049eb214dc5f4a59cb", size = 2775483 },
{ url = "https://files.pythonhosted.org/packages/09/79/11a07753a7b9ef46eaaa5e85b72558095713060aeca1393057a081fb21e3/fonttools-4.55.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d851d8b2fdb676507365d1430c3285d62c4039d0d7760d8cf2f2e5ea3aa19d73", size = 2303701 },
{ url = "https://files.pythonhosted.org/packages/93/67/173994471ddb0ff8cd45b0a2ff9fa03416152ca90bd14d1cbe1ff75fb66c/fonttools-4.55.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3fb3cf1cddf08cec0338f238f950cb76fabab23a324a579e3e1f9b2ef2578329", size = 4891469 },
{ url = "https://files.pythonhosted.org/packages/16/b9/22e8be0fceaed86187ba35a1035b309e47575c68ee6ace3b66f146300f43/fonttools-4.55.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ddd3208b06186ca00fbd329c0d0fed5ba209c99017cc46e2c4ea42233c2fbd00", size = 4920672 },
{ url = "https://files.pythonhosted.org/packages/cc/15/ed0f0a9d303419e7c885b3a71bfe70bb71c8f964e5b1d515056e38551c69/fonttools-4.55.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9bd98819cb585a894dda9dcb337afeb2601abf17da17de7bfbfc1bc2e4a062c7", size = 4899903 },
{ url = "https://files.pythonhosted.org/packages/b5/02/bd0da57dac3f44f37898b058659cf3beedbfd89b7d0f4b10761c9602dc1b/fonttools-4.55.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:4877376c10541e8dccf14876c8476d5082338fa5d21103894894382cc245144b", size = 5067979 },
{ url = "https://files.pythonhosted.org/packages/a0/b9/c232b07c0ecaba9e522695780ca8d711b099bf87889a19a6b35a4ebfde90/fonttools-4.55.4-cp311-cp311-win32.whl", hash = "sha256:3a5e466894ec6d8a009b0eb8e02a6eb26959a318d5b7a906280c26bdadce6423", size = 2176681 },
{ url = "https://files.pythonhosted.org/packages/e3/50/2aa1cf2492e6aded4320122aed690268e97076aba1f418c0b4c68fb11a50/fonttools-4.55.4-cp311-cp311-win_amd64.whl", hash = "sha256:f595129e6f9c6402965d6295fe8c18c1945d27af0f90bdb52ff426226e647afc", size = 2223239 },
{ url = "https://files.pythonhosted.org/packages/7a/ee/c7f06da45f60c076677291470599eb9f8aae6605cbfbebbcb8ee12428e26/fonttools-4.55.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:b3db72ad2d26a0e9ec694cbfb4485a8da9c095d29f66561cf935dbd19f3efcea", size = 2769913 },
{ url = "https://files.pythonhosted.org/packages/d9/a9/19aa6a9685d0bb285678850bfa22365a8376c590a7aaacc9f03d3a43beaa/fonttools-4.55.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:87717808fd5953588c3ffaf512e8cab0e43c09c1da04e42ba87fa4c07d8170c7", size = 2301168 },
{ url = "https://files.pythonhosted.org/packages/00/63/88740f4333008336844aadbc9f7ef85d50e2eed779a5c33e13907a2439eb/fonttools-4.55.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f49dac626ad5bc1a0147b88e6157e3211fd440d00007f0da6c9e5f91dd5cb88e", size = 4806195 },
{ url = "https://files.pythonhosted.org/packages/7b/fa/1d103fe6e9bf174afd1c04772ca4f88e8f577f44d37b7cc8644fe5ff2620/fonttools-4.55.4-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2d0ac8656ada8b604ae5da15d9aa075232f2181b95b51a3a2a55195222df7e7", size = 4877282 },
{ url = "https://files.pythonhosted.org/packages/b8/53/1cdd447f30598950e4bf8a2de8cd1f6573e6cb34b726cf23713a3cd8fb1e/fonttools-4.55.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:013c8b45873fa77a4ff6d25e43fecf1046cb7e8c6b32f1843117f98f3f8eac60", size = 4784688 },
{ url = "https://files.pythonhosted.org/packages/71/21/edfdcd85c1cce918d410909759a8db667f95bf3faed88141b1abfa2cefe1/fonttools-4.55.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:94caad375d254a0332926512f06791f5e66c24a913ebecd6178b14f61d27c62f", size = 5012253 },
{ url = "https://files.pythonhosted.org/packages/7d/e7/7c16717b75e40f735e01d899ee152a0573e90be0e6b8fc2d47c16ba8239c/fonttools-4.55.4-cp312-cp312-win32.whl", hash = "sha256:cb3eb4bf3a0c4e431e1ccab7a33ef4f1bb32657133fff4a61dc4fcbd54b94d29", size = 2165283 },
{ url = "https://files.pythonhosted.org/packages/50/ff/85d1c1d396a3ceaabcf7cb543da56d2223d9b76429bafd6c87f4a4e880df/fonttools-4.55.4-cp312-cp312-win_amd64.whl", hash = "sha256:6914269f6ff6b20c6b5a9b19d0b752880bd8ee218d9a7d6afe9960bbf1922d98", size = 2212080 },
{ url = "https://files.pythonhosted.org/packages/09/9b/e7505e7f08c291ab28e6b5c7ae9fe92aab10f5c4b3666fc67eb59f6e454b/fonttools-4.55.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:699dd32da7258a89939567a3d71b3f8decf84da54488a2526693f0d981a76479", size = 2757277 },
{ url = "https://files.pythonhosted.org/packages/7c/15/a26ae0e5be690038cf1d62277f1007282d4d355dc30dbf0a95224fe69b0e/fonttools-4.55.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0f374b18ac04fbf78f20940418aee7882be3cdcb328ded80e16c3356499f64cf", size = 2294678 },
{ url = "https://files.pythonhosted.org/packages/71/6a/20863c8ddf4dc7fd290b5ffddfc83d5918447523001b67a2dc81a0899b0d/fonttools-4.55.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b18792529ca3c24259090b6faa60bd0bdfcc4a06312e8f06d6fccab007f07193", size = 4784624 },
{ url = "https://files.pythonhosted.org/packages/94/2f/c74fa21fddd6a4c22c80f2f86820a0c960a5c0f8f46407bc9c1e1c9b9f50/fonttools-4.55.4-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e91d25261ebc9ff2143b95e6272f46b9f28e260b8f40feda07c80b66ff7e61d", size = 4856618 },
{ url = "https://files.pythonhosted.org/packages/00/37/1e9f1cb3b2454adac0b5fe85e940ea8d4eb174a9338e47020ec7d3cf1057/fonttools-4.55.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:2695781a897395d03504fd24b60c944726b5e7b7af9ea3d922f7319d70c6fc37", size = 4765002 },
{ url = "https://files.pythonhosted.org/packages/ff/fa/36b815132a71b9df13e9c52cd198194b48eb31f9a6d041f3ec6476d8b74f/fonttools-4.55.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:21de3ef5b8e5361fd01d6aef2c09dda4ede139d6b3a1f5cf621d6bea48840dfd", size = 4985906 },
{ url = "https://files.pythonhosted.org/packages/73/9a/51eb1cdc08d0883c40a3ea6d9a8ecd862bac587371bc92e0f35315688994/fonttools-4.55.4-cp313-cp313-win32.whl", hash = "sha256:0ef33fda14e39aabb892a18ed16805b0b5b4e8a801fd1815a694be9dc7f30024", size = 2163286 },
{ url = "https://files.pythonhosted.org/packages/2b/a0/6fdeb063dfb401e3efc342ef8ff6cd9e290d9895c3777dbc3850842eb1ad/fonttools-4.55.4-cp313-cp313-win_amd64.whl", hash = "sha256:e953b1614e32b6da828ae7659c8f330a593b6c4b7a4a31f8f63c01b12f0d3680", size = 2209022 },
{ url = "https://files.pythonhosted.org/packages/f3/5d/29b126e12df844432e188d19e74f47c2578fa5a72a122b4f41819e1e0923/fonttools-4.55.4-py3-none-any.whl", hash = "sha256:d07ad8f31038c6394a0945752458313367a0ef8125d284ee59f99e68393a3c2d", size = 1111964 },
]
[[package]]
name = "fsspec"
version = "2024.12.0"
@ -647,7 +761,7 @@ name = "humanfriendly"
version = "10.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "pyreadline3", marker = "sys_platform == 'win32' or (extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu')" },
{ name = "pyreadline3", marker = "sys_platform == 'win32'" },
]
sdist = { url = "https://files.pythonhosted.org/packages/cc/3f/2c29224acb2e2df4d2046e4c73ee2662023c58ff5b113c4c1adac0886c43/humanfriendly-10.0.tar.gz", hash = "sha256:6b0b831ce8f15f7300721aa49829fc4e83921a9a301cc7f606be6686a2288ddc", size = 360702 }
wheels = [
@ -788,6 +902,93 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/d1/0f/8910b19ac0670a0f80ce1008e5e751c4a57e14d2c4c13a482aa6079fa9d6/jsonschema_specifications-2024.10.1-py3-none-any.whl", hash = "sha256:a09a0680616357d9a0ecf05c12ad234479f549239d0f5b55f3deea67475da9bf", size = 18459 },
]
[[package]]
name = "kiwisolver"
version = "1.4.8"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/82/59/7c91426a8ac292e1cdd53a63b6d9439abd573c875c3f92c146767dd33faf/kiwisolver-1.4.8.tar.gz", hash = "sha256:23d5f023bdc8c7e54eb65f03ca5d5bb25b601eac4d7f1a042888a1f45237987e", size = 97538 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/47/5f/4d8e9e852d98ecd26cdf8eaf7ed8bc33174033bba5e07001b289f07308fd/kiwisolver-1.4.8-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:88c6f252f6816a73b1f8c904f7bbe02fd67c09a69f7cb8a0eecdbf5ce78e63db", size = 124623 },
{ url = "https://files.pythonhosted.org/packages/1d/70/7f5af2a18a76fe92ea14675f8bd88ce53ee79e37900fa5f1a1d8e0b42998/kiwisolver-1.4.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c72941acb7b67138f35b879bbe85be0f6c6a70cab78fe3ef6db9c024d9223e5b", size = 66720 },
{ url = "https://files.pythonhosted.org/packages/c6/13/e15f804a142353aefd089fadc8f1d985561a15358c97aca27b0979cb0785/kiwisolver-1.4.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ce2cf1e5688edcb727fdf7cd1bbd0b6416758996826a8be1d958f91880d0809d", size = 65413 },
{ url = "https://files.pythonhosted.org/packages/ce/6d/67d36c4d2054e83fb875c6b59d0809d5c530de8148846b1370475eeeece9/kiwisolver-1.4.8-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:c8bf637892dc6e6aad2bc6d4d69d08764166e5e3f69d469e55427b6ac001b19d", size = 1650826 },
{ url = "https://files.pythonhosted.org/packages/de/c6/7b9bb8044e150d4d1558423a1568e4f227193662a02231064e3824f37e0a/kiwisolver-1.4.8-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:034d2c891f76bd3edbdb3ea11140d8510dca675443da7304205a2eaa45d8334c", size = 1628231 },
{ url = "https://files.pythonhosted.org/packages/b6/38/ad10d437563063eaaedbe2c3540a71101fc7fb07a7e71f855e93ea4de605/kiwisolver-1.4.8-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d47b28d1dfe0793d5e96bce90835e17edf9a499b53969b03c6c47ea5985844c3", size = 1408938 },
{ url = "https://files.pythonhosted.org/packages/52/ce/c0106b3bd7f9e665c5f5bc1e07cc95b5dabd4e08e3dad42dbe2faad467e7/kiwisolver-1.4.8-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eb158fe28ca0c29f2260cca8c43005329ad58452c36f0edf298204de32a9a3ed", size = 1422799 },
{ url = "https://files.pythonhosted.org/packages/d0/87/efb704b1d75dc9758087ba374c0f23d3254505edaedd09cf9d247f7878b9/kiwisolver-1.4.8-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5536185fce131780ebd809f8e623bf4030ce1b161353166c49a3c74c287897f", size = 1354362 },
{ url = "https://files.pythonhosted.org/packages/eb/b3/fd760dc214ec9a8f208b99e42e8f0130ff4b384eca8b29dd0efc62052176/kiwisolver-1.4.8-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:369b75d40abedc1da2c1f4de13f3482cb99e3237b38726710f4a793432b1c5ff", size = 2222695 },
{ url = "https://files.pythonhosted.org/packages/a2/09/a27fb36cca3fc01700687cc45dae7a6a5f8eeb5f657b9f710f788748e10d/kiwisolver-1.4.8-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:641f2ddf9358c80faa22e22eb4c9f54bd3f0e442e038728f500e3b978d00aa7d", size = 2370802 },
{ url = "https://files.pythonhosted.org/packages/3d/c3/ba0a0346db35fe4dc1f2f2cf8b99362fbb922d7562e5f911f7ce7a7b60fa/kiwisolver-1.4.8-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:d561d2d8883e0819445cfe58d7ddd673e4015c3c57261d7bdcd3710d0d14005c", size = 2334646 },
{ url = "https://files.pythonhosted.org/packages/41/52/942cf69e562f5ed253ac67d5c92a693745f0bed3c81f49fc0cbebe4d6b00/kiwisolver-1.4.8-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:1732e065704b47c9afca7ffa272f845300a4eb959276bf6970dc07265e73b605", size = 2467260 },
{ url = "https://files.pythonhosted.org/packages/32/26/2d9668f30d8a494b0411d4d7d4ea1345ba12deb6a75274d58dd6ea01e951/kiwisolver-1.4.8-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:bcb1ebc3547619c3b58a39e2448af089ea2ef44b37988caf432447374941574e", size = 2288633 },
{ url = "https://files.pythonhosted.org/packages/98/99/0dd05071654aa44fe5d5e350729961e7bb535372935a45ac89a8924316e6/kiwisolver-1.4.8-cp310-cp310-win_amd64.whl", hash = "sha256:89c107041f7b27844179ea9c85d6da275aa55ecf28413e87624d033cf1f6b751", size = 71885 },
{ url = "https://files.pythonhosted.org/packages/6c/fc/822e532262a97442989335394d441cd1d0448c2e46d26d3e04efca84df22/kiwisolver-1.4.8-cp310-cp310-win_arm64.whl", hash = "sha256:b5773efa2be9eb9fcf5415ea3ab70fc785d598729fd6057bea38d539ead28271", size = 65175 },
{ url = "https://files.pythonhosted.org/packages/da/ed/c913ee28936c371418cb167b128066ffb20bbf37771eecc2c97edf8a6e4c/kiwisolver-1.4.8-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a4d3601908c560bdf880f07d94f31d734afd1bb71e96585cace0e38ef44c6d84", size = 124635 },
{ url = "https://files.pythonhosted.org/packages/4c/45/4a7f896f7467aaf5f56ef093d1f329346f3b594e77c6a3c327b2d415f521/kiwisolver-1.4.8-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:856b269c4d28a5c0d5e6c1955ec36ebfd1651ac00e1ce0afa3e28da95293b561", size = 66717 },
{ url = "https://files.pythonhosted.org/packages/5f/b4/c12b3ac0852a3a68f94598d4c8d569f55361beef6159dce4e7b624160da2/kiwisolver-1.4.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c2b9a96e0f326205af81a15718a9073328df1173a2619a68553decb7097fd5d7", size = 65413 },
{ url = "https://files.pythonhosted.org/packages/a9/98/1df4089b1ed23d83d410adfdc5947245c753bddfbe06541c4aae330e9e70/kiwisolver-1.4.8-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c5020c83e8553f770cb3b5fc13faac40f17e0b205bd237aebd21d53d733adb03", size = 1343994 },
{ url = "https://files.pythonhosted.org/packages/8d/bf/b4b169b050c8421a7c53ea1ea74e4ef9c335ee9013216c558a047f162d20/kiwisolver-1.4.8-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dace81d28c787956bfbfbbfd72fdcef014f37d9b48830829e488fdb32b49d954", size = 1434804 },
{ url = "https://files.pythonhosted.org/packages/66/5a/e13bd341fbcf73325ea60fdc8af752addf75c5079867af2e04cc41f34434/kiwisolver-1.4.8-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:11e1022b524bd48ae56c9b4f9296bce77e15a2e42a502cceba602f804b32bb79", size = 1450690 },
{ url = "https://files.pythonhosted.org/packages/9b/4f/5955dcb376ba4a830384cc6fab7d7547bd6759fe75a09564910e9e3bb8ea/kiwisolver-1.4.8-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b9b4d2892fefc886f30301cdd80debd8bb01ecdf165a449eb6e78f79f0fabd6", size = 1376839 },
{ url = "https://files.pythonhosted.org/packages/3a/97/5edbed69a9d0caa2e4aa616ae7df8127e10f6586940aa683a496c2c280b9/kiwisolver-1.4.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a96c0e790ee875d65e340ab383700e2b4891677b7fcd30a699146f9384a2bb0", size = 1435109 },
{ url = "https://files.pythonhosted.org/packages/13/fc/e756382cb64e556af6c1809a1bbb22c141bbc2445049f2da06b420fe52bf/kiwisolver-1.4.8-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:23454ff084b07ac54ca8be535f4174170c1094a4cff78fbae4f73a4bcc0d4dab", size = 2245269 },
{ url = "https://files.pythonhosted.org/packages/76/15/e59e45829d7f41c776d138245cabae6515cb4eb44b418f6d4109c478b481/kiwisolver-1.4.8-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:87b287251ad6488e95b4f0b4a79a6d04d3ea35fde6340eb38fbd1ca9cd35bbbc", size = 2393468 },
{ url = "https://files.pythonhosted.org/packages/e9/39/483558c2a913ab8384d6e4b66a932406f87c95a6080112433da5ed668559/kiwisolver-1.4.8-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:b21dbe165081142b1232a240fc6383fd32cdd877ca6cc89eab93e5f5883e1c25", size = 2355394 },
{ url = "https://files.pythonhosted.org/packages/01/aa/efad1fbca6570a161d29224f14b082960c7e08268a133fe5dc0f6906820e/kiwisolver-1.4.8-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:768cade2c2df13db52475bd28d3a3fac8c9eff04b0e9e2fda0f3760f20b3f7fc", size = 2490901 },
{ url = "https://files.pythonhosted.org/packages/c9/4f/15988966ba46bcd5ab9d0c8296914436720dd67fca689ae1a75b4ec1c72f/kiwisolver-1.4.8-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d47cfb2650f0e103d4bf68b0b5804c68da97272c84bb12850d877a95c056bd67", size = 2312306 },
{ url = "https://files.pythonhosted.org/packages/2d/27/bdf1c769c83f74d98cbc34483a972f221440703054894a37d174fba8aa68/kiwisolver-1.4.8-cp311-cp311-win_amd64.whl", hash = "sha256:ed33ca2002a779a2e20eeb06aea7721b6e47f2d4b8a8ece979d8ba9e2a167e34", size = 71966 },
{ url = "https://files.pythonhosted.org/packages/4a/c9/9642ea855604aeb2968a8e145fc662edf61db7632ad2e4fb92424be6b6c0/kiwisolver-1.4.8-cp311-cp311-win_arm64.whl", hash = "sha256:16523b40aab60426ffdebe33ac374457cf62863e330a90a0383639ce14bf44b2", size = 65311 },
{ url = "https://files.pythonhosted.org/packages/fc/aa/cea685c4ab647f349c3bc92d2daf7ae34c8e8cf405a6dcd3a497f58a2ac3/kiwisolver-1.4.8-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:d6af5e8815fd02997cb6ad9bbed0ee1e60014438ee1a5c2444c96f87b8843502", size = 124152 },
{ url = "https://files.pythonhosted.org/packages/c5/0b/8db6d2e2452d60d5ebc4ce4b204feeb16176a851fd42462f66ade6808084/kiwisolver-1.4.8-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:bade438f86e21d91e0cf5dd7c0ed00cda0f77c8c1616bd83f9fc157fa6760d31", size = 66555 },
{ url = "https://files.pythonhosted.org/packages/60/26/d6a0db6785dd35d3ba5bf2b2df0aedc5af089962c6eb2cbf67a15b81369e/kiwisolver-1.4.8-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b83dc6769ddbc57613280118fb4ce3cd08899cc3369f7d0e0fab518a7cf37fdb", size = 65067 },
{ url = "https://files.pythonhosted.org/packages/c9/ed/1d97f7e3561e09757a196231edccc1bcf59d55ddccefa2afc9c615abd8e0/kiwisolver-1.4.8-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:111793b232842991be367ed828076b03d96202c19221b5ebab421ce8bcad016f", size = 1378443 },
{ url = "https://files.pythonhosted.org/packages/29/61/39d30b99954e6b46f760e6289c12fede2ab96a254c443639052d1b573fbc/kiwisolver-1.4.8-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:257af1622860e51b1a9d0ce387bf5c2c4f36a90594cb9514f55b074bcc787cfc", size = 1472728 },
{ url = "https://files.pythonhosted.org/packages/0c/3e/804163b932f7603ef256e4a715e5843a9600802bb23a68b4e08c8c0ff61d/kiwisolver-1.4.8-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:69b5637c3f316cab1ec1c9a12b8c5f4750a4c4b71af9157645bf32830e39c03a", size = 1478388 },
{ url = "https://files.pythonhosted.org/packages/8a/9e/60eaa75169a154700be74f875a4d9961b11ba048bef315fbe89cb6999056/kiwisolver-1.4.8-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:782bb86f245ec18009890e7cb8d13a5ef54dcf2ebe18ed65f795e635a96a1c6a", size = 1413849 },
{ url = "https://files.pythonhosted.org/packages/bc/b3/9458adb9472e61a998c8c4d95cfdfec91c73c53a375b30b1428310f923e4/kiwisolver-1.4.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc978a80a0db3a66d25767b03688f1147a69e6237175c0f4ffffaaedf744055a", size = 1475533 },
{ url = "https://files.pythonhosted.org/packages/e4/7a/0a42d9571e35798de80aef4bb43a9b672aa7f8e58643d7bd1950398ffb0a/kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:36dbbfd34838500a31f52c9786990d00150860e46cd5041386f217101350f0d3", size = 2268898 },
{ url = "https://files.pythonhosted.org/packages/d9/07/1255dc8d80271400126ed8db35a1795b1a2c098ac3a72645075d06fe5c5d/kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:eaa973f1e05131de5ff3569bbba7f5fd07ea0595d3870ed4a526d486fe57fa1b", size = 2425605 },
{ url = "https://files.pythonhosted.org/packages/84/df/5a3b4cf13780ef6f6942df67b138b03b7e79e9f1f08f57c49957d5867f6e/kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:a66f60f8d0c87ab7f59b6fb80e642ebb29fec354a4dfad687ca4092ae69d04f4", size = 2375801 },
{ url = "https://files.pythonhosted.org/packages/8f/10/2348d068e8b0f635c8c86892788dac7a6b5c0cb12356620ab575775aad89/kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:858416b7fb777a53f0c59ca08190ce24e9abbd3cffa18886a5781b8e3e26f65d", size = 2520077 },
{ url = "https://files.pythonhosted.org/packages/32/d8/014b89fee5d4dce157d814303b0fce4d31385a2af4c41fed194b173b81ac/kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:085940635c62697391baafaaeabdf3dd7a6c3643577dde337f4d66eba021b2b8", size = 2338410 },
{ url = "https://files.pythonhosted.org/packages/bd/72/dfff0cc97f2a0776e1c9eb5bef1ddfd45f46246c6533b0191887a427bca5/kiwisolver-1.4.8-cp312-cp312-win_amd64.whl", hash = "sha256:01c3d31902c7db5fb6182832713d3b4122ad9317c2c5877d0539227d96bb2e50", size = 71853 },
{ url = "https://files.pythonhosted.org/packages/dc/85/220d13d914485c0948a00f0b9eb419efaf6da81b7d72e88ce2391f7aed8d/kiwisolver-1.4.8-cp312-cp312-win_arm64.whl", hash = "sha256:a3c44cb68861de93f0c4a8175fbaa691f0aa22550c331fefef02b618a9dcb476", size = 65424 },
{ url = "https://files.pythonhosted.org/packages/79/b3/e62464a652f4f8cd9006e13d07abad844a47df1e6537f73ddfbf1bc997ec/kiwisolver-1.4.8-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:1c8ceb754339793c24aee1c9fb2485b5b1f5bb1c2c214ff13368431e51fc9a09", size = 124156 },
{ url = "https://files.pythonhosted.org/packages/8d/2d/f13d06998b546a2ad4f48607a146e045bbe48030774de29f90bdc573df15/kiwisolver-1.4.8-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:54a62808ac74b5e55a04a408cda6156f986cefbcf0ada13572696b507cc92fa1", size = 66555 },
{ url = "https://files.pythonhosted.org/packages/59/e3/b8bd14b0a54998a9fd1e8da591c60998dc003618cb19a3f94cb233ec1511/kiwisolver-1.4.8-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:68269e60ee4929893aad82666821aaacbd455284124817af45c11e50a4b42e3c", size = 65071 },
{ url = "https://files.pythonhosted.org/packages/f0/1c/6c86f6d85ffe4d0ce04228d976f00674f1df5dc893bf2dd4f1928748f187/kiwisolver-1.4.8-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:34d142fba9c464bc3bbfeff15c96eab0e7310343d6aefb62a79d51421fcc5f1b", size = 1378053 },
{ url = "https://files.pythonhosted.org/packages/4e/b9/1c6e9f6dcb103ac5cf87cb695845f5fa71379021500153566d8a8a9fc291/kiwisolver-1.4.8-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ddc373e0eef45b59197de815b1b28ef89ae3955e7722cc9710fb91cd77b7f47", size = 1472278 },
{ url = "https://files.pythonhosted.org/packages/ee/81/aca1eb176de671f8bda479b11acdc42c132b61a2ac861c883907dde6debb/kiwisolver-1.4.8-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:77e6f57a20b9bd4e1e2cedda4d0b986ebd0216236f0106e55c28aea3d3d69b16", size = 1478139 },
{ url = "https://files.pythonhosted.org/packages/49/f4/e081522473671c97b2687d380e9e4c26f748a86363ce5af48b4a28e48d06/kiwisolver-1.4.8-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08e77738ed7538f036cd1170cbed942ef749137b1311fa2bbe2a7fda2f6bf3cc", size = 1413517 },
{ url = "https://files.pythonhosted.org/packages/8f/e9/6a7d025d8da8c4931522922cd706105aa32b3291d1add8c5427cdcd66e63/kiwisolver-1.4.8-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a5ce1e481a74b44dd5e92ff03ea0cb371ae7a0268318e202be06c8f04f4f1246", size = 1474952 },
{ url = "https://files.pythonhosted.org/packages/82/13/13fa685ae167bee5d94b415991c4fc7bb0a1b6ebea6e753a87044b209678/kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:fc2ace710ba7c1dfd1a3b42530b62b9ceed115f19a1656adefce7b1782a37794", size = 2269132 },
{ url = "https://files.pythonhosted.org/packages/ef/92/bb7c9395489b99a6cb41d502d3686bac692586db2045adc19e45ee64ed23/kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:3452046c37c7692bd52b0e752b87954ef86ee2224e624ef7ce6cb21e8c41cc1b", size = 2425997 },
{ url = "https://files.pythonhosted.org/packages/ed/12/87f0e9271e2b63d35d0d8524954145837dd1a6c15b62a2d8c1ebe0f182b4/kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:7e9a60b50fe8b2ec6f448fe8d81b07e40141bfced7f896309df271a0b92f80f3", size = 2376060 },
{ url = "https://files.pythonhosted.org/packages/02/6e/c8af39288edbce8bf0fa35dee427b082758a4b71e9c91ef18fa667782138/kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:918139571133f366e8362fa4a297aeba86c7816b7ecf0bc79168080e2bd79957", size = 2520471 },
{ url = "https://files.pythonhosted.org/packages/13/78/df381bc7b26e535c91469f77f16adcd073beb3e2dd25042efd064af82323/kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e063ef9f89885a1d68dd8b2e18f5ead48653176d10a0e324e3b0030e3a69adeb", size = 2338793 },
{ url = "https://files.pythonhosted.org/packages/d0/dc/c1abe38c37c071d0fc71c9a474fd0b9ede05d42f5a458d584619cfd2371a/kiwisolver-1.4.8-cp313-cp313-win_amd64.whl", hash = "sha256:a17b7c4f5b2c51bb68ed379defd608a03954a1845dfed7cc0117f1cc8a9b7fd2", size = 71855 },
{ url = "https://files.pythonhosted.org/packages/a0/b6/21529d595b126ac298fdd90b705d87d4c5693de60023e0efcb4f387ed99e/kiwisolver-1.4.8-cp313-cp313-win_arm64.whl", hash = "sha256:3cd3bc628b25f74aedc6d374d5babf0166a92ff1317f46267f12d2ed54bc1d30", size = 65430 },
{ url = "https://files.pythonhosted.org/packages/34/bd/b89380b7298e3af9b39f49334e3e2a4af0e04819789f04b43d560516c0c8/kiwisolver-1.4.8-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:370fd2df41660ed4e26b8c9d6bbcad668fbe2560462cba151a721d49e5b6628c", size = 126294 },
{ url = "https://files.pythonhosted.org/packages/83/41/5857dc72e5e4148eaac5aa76e0703e594e4465f8ab7ec0fc60e3a9bb8fea/kiwisolver-1.4.8-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:84a2f830d42707de1d191b9490ac186bf7997a9495d4e9072210a1296345f7dc", size = 67736 },
{ url = "https://files.pythonhosted.org/packages/e1/d1/be059b8db56ac270489fb0b3297fd1e53d195ba76e9bbb30e5401fa6b759/kiwisolver-1.4.8-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:7a3ad337add5148cf51ce0b55642dc551c0b9d6248458a757f98796ca7348712", size = 66194 },
{ url = "https://files.pythonhosted.org/packages/e1/83/4b73975f149819eb7dcf9299ed467eba068ecb16439a98990dcb12e63fdd/kiwisolver-1.4.8-cp313-cp313t-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7506488470f41169b86d8c9aeff587293f530a23a23a49d6bc64dab66bedc71e", size = 1465942 },
{ url = "https://files.pythonhosted.org/packages/c7/2c/30a5cdde5102958e602c07466bce058b9d7cb48734aa7a4327261ac8e002/kiwisolver-1.4.8-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f0121b07b356a22fb0414cec4666bbe36fd6d0d759db3d37228f496ed67c880", size = 1595341 },
{ url = "https://files.pythonhosted.org/packages/ff/9b/1e71db1c000385aa069704f5990574b8244cce854ecd83119c19e83c9586/kiwisolver-1.4.8-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d6d6bd87df62c27d4185de7c511c6248040afae67028a8a22012b010bc7ad062", size = 1598455 },
{ url = "https://files.pythonhosted.org/packages/85/92/c8fec52ddf06231b31cbb779af77e99b8253cd96bd135250b9498144c78b/kiwisolver-1.4.8-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:291331973c64bb9cce50bbe871fb2e675c4331dab4f31abe89f175ad7679a4d7", size = 1522138 },
{ url = "https://files.pythonhosted.org/packages/0b/51/9eb7e2cd07a15d8bdd976f6190c0164f92ce1904e5c0c79198c4972926b7/kiwisolver-1.4.8-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:893f5525bb92d3d735878ec00f781b2de998333659507d29ea4466208df37bed", size = 1582857 },
{ url = "https://files.pythonhosted.org/packages/0f/95/c5a00387a5405e68ba32cc64af65ce881a39b98d73cc394b24143bebc5b8/kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:b47a465040146981dc9db8647981b8cb96366fbc8d452b031e4f8fdffec3f26d", size = 2293129 },
{ url = "https://files.pythonhosted.org/packages/44/83/eeb7af7d706b8347548313fa3a3a15931f404533cc54fe01f39e830dd231/kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:99cea8b9dd34ff80c521aef46a1dddb0dcc0283cf18bde6d756f1e6f31772165", size = 2421538 },
{ url = "https://files.pythonhosted.org/packages/05/f9/27e94c1b3eb29e6933b6986ffc5fa1177d2cd1f0c8efc5f02c91c9ac61de/kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:151dffc4865e5fe6dafce5480fab84f950d14566c480c08a53c663a0020504b6", size = 2390661 },
{ url = "https://files.pythonhosted.org/packages/d9/d4/3c9735faa36ac591a4afcc2980d2691000506050b7a7e80bcfe44048daa7/kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:577facaa411c10421314598b50413aa1ebcf5126f704f1e5d72d7e4e9f020d90", size = 2546710 },
{ url = "https://files.pythonhosted.org/packages/4c/fa/be89a49c640930180657482a74970cdcf6f7072c8d2471e1babe17a222dc/kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:be4816dc51c8a471749d664161b434912eee82f2ea66bd7628bd14583a833e85", size = 2349213 },
{ url = "https://files.pythonhosted.org/packages/1f/f9/ae81c47a43e33b93b0a9819cac6723257f5da2a5a60daf46aa5c7226ea85/kiwisolver-1.4.8-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:e7a019419b7b510f0f7c9dceff8c5eae2392037eae483a7f9162625233802b0a", size = 60403 },
{ url = "https://files.pythonhosted.org/packages/58/ca/f92b5cb6f4ce0c1ebfcfe3e2e42b96917e16f7090e45b21102941924f18f/kiwisolver-1.4.8-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:286b18e86682fd2217a48fc6be6b0f20c1d0ed10958d8dc53453ad58d7be0bf8", size = 58657 },
{ url = "https://files.pythonhosted.org/packages/80/28/ae0240f732f0484d3a4dc885d055653c47144bdf59b670aae0ec3c65a7c8/kiwisolver-1.4.8-pp310-pypy310_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4191ee8dfd0be1c3666ccbac178c5a05d5f8d689bbe3fc92f3c4abec817f8fe0", size = 84948 },
{ url = "https://files.pythonhosted.org/packages/5d/eb/78d50346c51db22c7203c1611f9b513075f35c4e0e4877c5dde378d66043/kiwisolver-1.4.8-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7cd2785b9391f2873ad46088ed7599a6a71e762e1ea33e87514b1a441ed1da1c", size = 81186 },
{ url = "https://files.pythonhosted.org/packages/43/f8/7259f18c77adca88d5f64f9a522792e178b2691f3748817a8750c2d216ef/kiwisolver-1.4.8-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c07b29089b7ba090b6f1a669f1411f27221c3662b3a1b7010e67b59bb5a6f10b", size = 80279 },
{ url = "https://files.pythonhosted.org/packages/3a/1d/50ad811d1c5dae091e4cf046beba925bcae0a610e79ae4c538f996f63ed5/kiwisolver-1.4.8-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:65ea09a5a3faadd59c2ce96dc7bf0f364986a315949dc6374f04396b0d60e09b", size = 71762 },
]
[[package]]
name = "kokoro-fastapi"
version = "0.1.0"
@ -799,9 +1000,9 @@ dependencies = [
{ name = "fastapi" },
{ name = "html2text" },
{ name = "loguru" },
{ name = "matplotlib" },
{ name = "munch" },
{ name = "numpy" },
{ name = "onnxruntime" },
{ name = "openai" },
{ name = "phonemizer" },
{ name = "pydantic" },
@ -821,10 +1022,11 @@ dependencies = [
[package.optional-dependencies]
cpu = [
{ name = "torch", version = "2.5.1", source = { registry = "https://download.pytorch.org/whl/cpu" }, marker = "(platform_machine == 'aarch64' and sys_platform == 'linux') or sys_platform == 'darwin'" },
{ name = "torch", version = "2.5.1+cpu", source = { registry = "https://download.pytorch.org/whl/cpu" }, marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" },
{ name = "onnxruntime" },
{ name = "torch", version = "2.5.1", source = { registry = "https://download.pytorch.org/whl/cpu" } },
]
gpu = [
{ name = "onnxruntime-gpu" },
{ name = "torch", version = "2.5.1+cu121", source = { registry = "https://download.pytorch.org/whl/cu121" } },
]
test = [
@ -846,9 +1048,11 @@ requires-dist = [
{ name = "html2text", specifier = ">=2024.2.26" },
{ name = "httpx", marker = "extra == 'test'", specifier = "==0.26.0" },
{ name = "loguru", specifier = "==0.7.3" },
{ name = "matplotlib", specifier = ">=3.10.0" },
{ name = "munch", specifier = "==4.0.0" },
{ name = "numpy", specifier = ">=1.26.0" },
{ name = "onnxruntime", specifier = "==1.20.1" },
{ name = "onnxruntime", marker = "extra == 'cpu'", specifier = "==1.20.1" },
{ name = "onnxruntime-gpu", marker = "extra == 'gpu'", specifier = "==1.20.1" },
{ name = "openai", specifier = ">=1.59.6" },
{ name = "openai", marker = "extra == 'test'", specifier = ">=1.59.6" },
{ name = "phonemizer", specifier = "==3.3.0" },
@ -1035,6 +1239,58 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/3f/14/c3554d512d5f9100a95e737502f4a2323a1959f6d0d01e0d0997b35f7b10/MarkupSafe-2.1.5-cp312-cp312-win_amd64.whl", hash = "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb", size = 17127 },
]
[[package]]
name = "matplotlib"
version = "3.10.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "contourpy" },
{ name = "cycler" },
{ name = "fonttools" },
{ name = "kiwisolver" },
{ name = "numpy" },
{ name = "packaging" },
{ name = "pillow" },
{ name = "pyparsing" },
{ name = "python-dateutil" },
]
sdist = { url = "https://files.pythonhosted.org/packages/68/dd/fa2e1a45fce2d09f4aea3cee169760e672c8262325aa5796c49d543dc7e6/matplotlib-3.10.0.tar.gz", hash = "sha256:b886d02a581b96704c9d1ffe55709e49b4d2d52709ccebc4be42db856e511278", size = 36686418 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/09/ec/3cdff7b5239adaaacefcc4f77c316dfbbdf853c4ed2beec467e0fec31b9f/matplotlib-3.10.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2c5829a5a1dd5a71f0e31e6e8bb449bc0ee9dbfb05ad28fc0c6b55101b3a4be6", size = 8160551 },
{ url = "https://files.pythonhosted.org/packages/41/f2/b518f2c7f29895c9b167bf79f8529c63383ae94eaf49a247a4528e9a148d/matplotlib-3.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a2a43cbefe22d653ab34bb55d42384ed30f611bcbdea1f8d7f431011a2e1c62e", size = 8034853 },
{ url = "https://files.pythonhosted.org/packages/ed/8d/45754b4affdb8f0d1a44e4e2bcd932cdf35b256b60d5eda9f455bb293ed0/matplotlib-3.10.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:607b16c8a73943df110f99ee2e940b8a1cbf9714b65307c040d422558397dac5", size = 8446724 },
{ url = "https://files.pythonhosted.org/packages/09/5a/a113495110ae3e3395c72d82d7bc4802902e46dc797f6b041e572f195c56/matplotlib-3.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01d2b19f13aeec2e759414d3bfe19ddfb16b13a1250add08d46d5ff6f9be83c6", size = 8583905 },
{ url = "https://files.pythonhosted.org/packages/12/b1/8b1655b4c9ed4600c817c419f7eaaf70082630efd7556a5b2e77a8a3cdaf/matplotlib-3.10.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:5e6c6461e1fc63df30bf6f80f0b93f5b6784299f721bc28530477acd51bfc3d1", size = 9395223 },
{ url = "https://files.pythonhosted.org/packages/5a/85/b9a54d64585a6b8737a78a61897450403c30f39e0bd3214270bb0b96f002/matplotlib-3.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:994c07b9d9fe8d25951e3202a68c17900679274dadfc1248738dcfa1bd40d7f3", size = 8025355 },
{ url = "https://files.pythonhosted.org/packages/0c/f1/e37f6c84d252867d7ddc418fff70fc661cfd363179263b08e52e8b748e30/matplotlib-3.10.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:fd44fc75522f58612ec4a33958a7e5552562b7705b42ef1b4f8c0818e304a363", size = 8171677 },
{ url = "https://files.pythonhosted.org/packages/c7/8b/92e9da1f28310a1f6572b5c55097b0c0ceb5e27486d85fb73b54f5a9b939/matplotlib-3.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c58a9622d5dbeb668f407f35f4e6bfac34bb9ecdcc81680c04d0258169747997", size = 8044945 },
{ url = "https://files.pythonhosted.org/packages/c5/cb/49e83f0fd066937a5bd3bc5c5d63093703f3637b2824df8d856e0558beef/matplotlib-3.10.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:845d96568ec873be63f25fa80e9e7fae4be854a66a7e2f0c8ccc99e94a8bd4ef", size = 8458269 },
{ url = "https://files.pythonhosted.org/packages/b2/7d/2d873209536b9ee17340754118a2a17988bc18981b5b56e6715ee07373ac/matplotlib-3.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5439f4c5a3e2e8eab18e2f8c3ef929772fd5641876db71f08127eed95ab64683", size = 8599369 },
{ url = "https://files.pythonhosted.org/packages/b8/03/57d6cbbe85c61fe4cbb7c94b54dce443d68c21961830833a1f34d056e5ea/matplotlib-3.10.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:4673ff67a36152c48ddeaf1135e74ce0d4bce1bbf836ae40ed39c29edf7e2765", size = 9405992 },
{ url = "https://files.pythonhosted.org/packages/14/cf/e382598f98be11bf51dd0bc60eca44a517f6793e3dc8b9d53634a144620c/matplotlib-3.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:7e8632baebb058555ac0cde75db885c61f1212e47723d63921879806b40bec6a", size = 8034580 },
{ url = "https://files.pythonhosted.org/packages/44/c7/6b2d8cb7cc251d53c976799cacd3200add56351c175ba89ab9cbd7c1e68a/matplotlib-3.10.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4659665bc7c9b58f8c00317c3c2a299f7f258eeae5a5d56b4c64226fca2f7c59", size = 8172465 },
{ url = "https://files.pythonhosted.org/packages/42/2a/6d66d0fba41e13e9ca6512a0a51170f43e7e7ed3a8dfa036324100775612/matplotlib-3.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d44cb942af1693cced2604c33a9abcef6205601c445f6d0dc531d813af8a2f5a", size = 8043300 },
{ url = "https://files.pythonhosted.org/packages/90/60/2a60342b27b90a16bada939a85e29589902b41073f59668b904b15ea666c/matplotlib-3.10.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a994f29e968ca002b50982b27168addfd65f0105610b6be7fa515ca4b5307c95", size = 8448936 },
{ url = "https://files.pythonhosted.org/packages/a7/b2/d872fc3d753516870d520595ddd8ce4dd44fa797a240999f125f58521ad7/matplotlib-3.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9b0558bae37f154fffda54d779a592bc97ca8b4701f1c710055b609a3bac44c8", size = 8594151 },
{ url = "https://files.pythonhosted.org/packages/f4/bd/b2f60cf7f57d014ab33e4f74602a2b5bdc657976db8196bbc022185f6f9c/matplotlib-3.10.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:503feb23bd8c8acc75541548a1d709c059b7184cde26314896e10a9f14df5f12", size = 9400347 },
{ url = "https://files.pythonhosted.org/packages/9f/6e/264673e64001b99d747aff5a288eca82826c024437a3694e19aed1decf46/matplotlib-3.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:c40ba2eb08b3f5de88152c2333c58cee7edcead0a2a0d60fcafa116b17117adc", size = 8039144 },
{ url = "https://files.pythonhosted.org/packages/72/11/1b2a094d95dcb6e6edd4a0b238177c439006c6b7a9fe8d31801237bf512f/matplotlib-3.10.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:96f2886f5c1e466f21cc41b70c5a0cd47bfa0015eb2d5793c88ebce658600e25", size = 8173073 },
{ url = "https://files.pythonhosted.org/packages/0d/c4/87b6ad2723070511a411ea719f9c70fde64605423b184face4e94986de9d/matplotlib-3.10.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:12eaf48463b472c3c0f8dbacdbf906e573013df81a0ab82f0616ea4b11281908", size = 8043892 },
{ url = "https://files.pythonhosted.org/packages/57/69/cb0812a136550b21361335e9ffb7d459bf6d13e03cb7b015555d5143d2d6/matplotlib-3.10.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2fbbabc82fde51391c4da5006f965e36d86d95f6ee83fb594b279564a4c5d0d2", size = 8450532 },
{ url = "https://files.pythonhosted.org/packages/ea/3a/bab9deb4fb199c05e9100f94d7f1c702f78d3241e6a71b784d2b88d7bebd/matplotlib-3.10.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad2e15300530c1a94c63cfa546e3b7864bd18ea2901317bae8bbf06a5ade6dcf", size = 8593905 },
{ url = "https://files.pythonhosted.org/packages/8b/66/742fd242f989adc1847ddf5f445815f73ad7c46aa3440690cc889cfa423c/matplotlib-3.10.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:3547d153d70233a8496859097ef0312212e2689cdf8d7ed764441c77604095ae", size = 9399609 },
{ url = "https://files.pythonhosted.org/packages/fa/d6/54cee7142cef7d910a324a7aedf335c0c147b03658b54d49ec48166f10a6/matplotlib-3.10.0-cp313-cp313-win_amd64.whl", hash = "sha256:c55b20591ced744aa04e8c3e4b7543ea4d650b6c3c4b208c08a05b4010e8b442", size = 8039076 },
{ url = "https://files.pythonhosted.org/packages/43/14/815d072dc36e88753433bfd0385113405efb947e6895ff7b4d2e8614a33b/matplotlib-3.10.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:9ade1003376731a971e398cc4ef38bb83ee8caf0aee46ac6daa4b0506db1fd06", size = 8211000 },
{ url = "https://files.pythonhosted.org/packages/9a/76/34e75f364194ec352678adcb540964be6f35ec7d3d8c75ebcb17e6839359/matplotlib-3.10.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:95b710fea129c76d30be72c3b38f330269363fbc6e570a5dd43580487380b5ff", size = 8087707 },
{ url = "https://files.pythonhosted.org/packages/c3/2b/b6bc0dff6a72d333bc7df94a66e6ce662d224e43daa8ad8ae4eaa9a77f55/matplotlib-3.10.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5cdbaf909887373c3e094b0318d7ff230b2ad9dcb64da7ade654182872ab2593", size = 8477384 },
{ url = "https://files.pythonhosted.org/packages/c2/2d/b5949fb2b76e9b47ab05e25a5f5f887c70de20d8b0cbc704a4e2ee71c786/matplotlib-3.10.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d907fddb39f923d011875452ff1eca29a9e7f21722b873e90db32e5d8ddff12e", size = 8610334 },
{ url = "https://files.pythonhosted.org/packages/d6/9a/6e3c799d5134d9af44b01c787e1360bee38cf51850506ea2e743a787700b/matplotlib-3.10.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:3b427392354d10975c1d0f4ee18aa5844640b512d5311ef32efd4dd7db106ede", size = 9406777 },
{ url = "https://files.pythonhosted.org/packages/0e/dd/e6ae97151e5ed648ab2ea48885bc33d39202b640eec7a2910e2c843f7ac0/matplotlib-3.10.0-cp313-cp313t-win_amd64.whl", hash = "sha256:5fd41b0ec7ee45cd960a8e71aea7c946a28a0b8a4dcee47d2856b2af051f334c", size = 8109742 },
{ url = "https://files.pythonhosted.org/packages/32/5f/29def7ce4e815ab939b56280976ee35afffb3bbdb43f332caee74cb8c951/matplotlib-3.10.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:81713dd0d103b379de4516b861d964b1d789a144103277769238c732229d7f03", size = 8155500 },
{ url = "https://files.pythonhosted.org/packages/de/6d/d570383c9f7ca799d0a54161446f9ce7b17d6c50f2994b653514bcaa108f/matplotlib-3.10.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:359f87baedb1f836ce307f0e850d12bb5f1936f70d035561f90d41d305fdacea", size = 8032398 },
{ url = "https://files.pythonhosted.org/packages/c9/b4/680aa700d99b48e8c4393fa08e9ab8c49c0555ee6f4c9c0a5e8ea8dfde5d/matplotlib-3.10.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ae80dc3a4add4665cf2faa90138384a7ffe2a4e37c58d83e115b54287c4f06ef", size = 8587361 },
]
[[package]]
name = "mdurl"
version = "0.1.2"
@ -1107,44 +1363,165 @@ wheels = [
name = "nvidia-cublas-cu12"
version = "12.1.3.1"
source = { registry = "https://pypi.org/simple" }
resolution-markers = [
"python_full_version == '3.11.*'",
"(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version < '3.11'",
"(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version >= '3.13'",
"(python_full_version >= '3.13' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.13' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version == '3.12.*'",
"(python_full_version == '3.12.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
]
wheels = [
{ url = "https://files.pythonhosted.org/packages/37/6d/121efd7382d5b0284239f4ab1fc1590d86d34ed4a4a2fdb13b30ca8e5740/nvidia_cublas_cu12-12.1.3.1-py3-none-manylinux1_x86_64.whl", hash = "sha256:ee53ccca76a6fc08fb9701aa95b6ceb242cdaab118c3bb152af4e579af792728", size = 410594774 },
{ url = "https://files.pythonhosted.org/packages/c5/ef/32a375b74bea706c93deea5613552f7c9104f961b21df423f5887eca713b/nvidia_cublas_cu12-12.1.3.1-py3-none-win_amd64.whl", hash = "sha256:2b964d60e8cf11b5e1073d179d85fa340c120e99b3067558f3cf98dd69d02906", size = 439918445 },
]
[[package]]
name = "nvidia-cublas-cu12"
version = "12.4.5.8"
source = { registry = "https://pypi.org/simple" }
resolution-markers = [
"python_full_version == '3.11.*'",
"(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version < '3.11'",
"(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version >= '3.13'",
"(python_full_version >= '3.13' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.13' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version == '3.12.*'",
"(python_full_version == '3.12.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
]
wheels = [
{ url = "https://files.pythonhosted.org/packages/7f/7f/7fbae15a3982dc9595e49ce0f19332423b260045d0a6afe93cdbe2f1f624/nvidia_cublas_cu12-12.4.5.8-py3-none-manylinux2014_aarch64.whl", hash = "sha256:0f8aa1706812e00b9f19dfe0cdb3999b092ccb8ca168c0db5b8ea712456fd9b3", size = 363333771 },
{ url = "https://files.pythonhosted.org/packages/ae/71/1c91302526c45ab494c23f61c7a84aa568b8c1f9d196efa5993957faf906/nvidia_cublas_cu12-12.4.5.8-py3-none-manylinux2014_x86_64.whl", hash = "sha256:2fc8da60df463fdefa81e323eef2e36489e1c94335b5358bcb38360adf75ac9b", size = 363438805 },
{ url = "https://files.pythonhosted.org/packages/e2/2a/4f27ca96232e8b5269074a72e03b4e0d43aa68c9b965058b1684d07c6ff8/nvidia_cublas_cu12-12.4.5.8-py3-none-win_amd64.whl", hash = "sha256:5a796786da89203a0657eda402bcdcec6180254a8ac22d72213abc42069522dc", size = 396895858 },
]
[[package]]
name = "nvidia-cuda-cupti-cu12"
version = "12.1.105"
source = { registry = "https://pypi.org/simple" }
resolution-markers = [
"python_full_version == '3.11.*'",
"(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version < '3.11'",
"(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version >= '3.13'",
"(python_full_version >= '3.13' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.13' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version == '3.12.*'",
"(python_full_version == '3.12.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
]
wheels = [
{ url = "https://files.pythonhosted.org/packages/7e/00/6b218edd739ecfc60524e585ba8e6b00554dd908de2c9c66c1af3e44e18d/nvidia_cuda_cupti_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:e54fde3983165c624cb79254ae9818a456eb6e87a7fd4d56a2352c24ee542d7e", size = 14109015 },
{ url = "https://files.pythonhosted.org/packages/d0/56/0021e32ea2848c24242f6b56790bd0ccc8bf99f973ca790569c6ca028107/nvidia_cuda_cupti_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:bea8236d13a0ac7190bd2919c3e8e6ce1e402104276e6f9694479e48bb0eb2a4", size = 10154340 },
]
[[package]]
name = "nvidia-cuda-cupti-cu12"
version = "12.4.127"
source = { registry = "https://pypi.org/simple" }
resolution-markers = [
"python_full_version == '3.11.*'",
"(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version < '3.11'",
"(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version >= '3.13'",
"(python_full_version >= '3.13' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.13' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version == '3.12.*'",
"(python_full_version == '3.12.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
]
wheels = [
{ url = "https://files.pythonhosted.org/packages/93/b5/9fb3d00386d3361b03874246190dfec7b206fd74e6e287b26a8fcb359d95/nvidia_cuda_cupti_cu12-12.4.127-py3-none-manylinux2014_aarch64.whl", hash = "sha256:79279b35cf6f91da114182a5ce1864997fd52294a87a16179ce275773799458a", size = 12354556 },
{ url = "https://files.pythonhosted.org/packages/67/42/f4f60238e8194a3106d06a058d494b18e006c10bb2b915655bd9f6ea4cb1/nvidia_cuda_cupti_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:9dec60f5ac126f7bb551c055072b69d85392b13311fcc1bcda2202d172df30fb", size = 13813957 },
{ url = "https://files.pythonhosted.org/packages/f3/79/8cf313ec17c58ccebc965568e5bcb265cdab0a1df99c4e674bb7a3b99bfe/nvidia_cuda_cupti_cu12-12.4.127-py3-none-win_amd64.whl", hash = "sha256:5688d203301ab051449a2b1cb6690fbe90d2b372f411521c86018b950f3d7922", size = 9938035 },
]
[[package]]
name = "nvidia-cuda-nvrtc-cu12"
version = "12.1.105"
source = { registry = "https://pypi.org/simple" }
resolution-markers = [
"python_full_version == '3.11.*'",
"(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version < '3.11'",
"(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version >= '3.13'",
"(python_full_version >= '3.13' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.13' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version == '3.12.*'",
"(python_full_version == '3.12.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
]
wheels = [
{ url = "https://files.pythonhosted.org/packages/b6/9f/c64c03f49d6fbc56196664d05dba14e3a561038a81a638eeb47f4d4cfd48/nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:339b385f50c309763ca65456ec75e17bbefcbbf2893f462cb8b90584cd27a1c2", size = 23671734 },
{ url = "https://files.pythonhosted.org/packages/ad/1d/f76987c4f454eb86e0b9a0e4f57c3bf1ac1d13ad13cd1a4da4eb0e0c0ce9/nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:0a98a522d9ff138b96c010a65e145dc1b4850e9ecb75a0172371793752fd46ed", size = 19331863 },
]
[[package]]
name = "nvidia-cuda-nvrtc-cu12"
version = "12.4.127"
source = { registry = "https://pypi.org/simple" }
resolution-markers = [
"python_full_version == '3.11.*'",
"(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version < '3.11'",
"(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version >= '3.13'",
"(python_full_version >= '3.13' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.13' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version == '3.12.*'",
"(python_full_version == '3.12.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
]
wheels = [
{ url = "https://files.pythonhosted.org/packages/77/aa/083b01c427e963ad0b314040565ea396f914349914c298556484f799e61b/nvidia_cuda_nvrtc_cu12-12.4.127-py3-none-manylinux2014_aarch64.whl", hash = "sha256:0eedf14185e04b76aa05b1fea04133e59f465b6f960c0cbf4e37c3cb6b0ea198", size = 24133372 },
{ url = "https://files.pythonhosted.org/packages/2c/14/91ae57cd4db3f9ef7aa99f4019cfa8d54cb4caa7e00975df6467e9725a9f/nvidia_cuda_nvrtc_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:a178759ebb095827bd30ef56598ec182b85547f1508941a3d560eb7ea1fbf338", size = 24640306 },
{ url = "https://files.pythonhosted.org/packages/7c/30/8c844bfb770f045bcd8b2c83455c5afb45983e1a8abf0c4e5297b481b6a5/nvidia_cuda_nvrtc_cu12-12.4.127-py3-none-win_amd64.whl", hash = "sha256:a961b2f1d5f17b14867c619ceb99ef6fcec12e46612711bcec78eb05068a60ec", size = 19751955 },
]
[[package]]
name = "nvidia-cuda-runtime-cu12"
version = "12.1.105"
source = { registry = "https://pypi.org/simple" }
resolution-markers = [
"python_full_version == '3.11.*'",
"(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version < '3.11'",
"(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version >= '3.13'",
"(python_full_version >= '3.13' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.13' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version == '3.12.*'",
"(python_full_version == '3.12.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
]
wheels = [
{ url = "https://files.pythonhosted.org/packages/eb/d5/c68b1d2cdfcc59e72e8a5949a37ddb22ae6cade80cd4a57a84d4c8b55472/nvidia_cuda_runtime_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:6e258468ddf5796e25f1dc591a31029fa317d97a0a94ed93468fc86301d61e40", size = 823596 },
{ url = "https://files.pythonhosted.org/packages/9f/e2/7a2b4b5064af56ea8ea2d8b2776c0f2960d95c88716138806121ae52a9c9/nvidia_cuda_runtime_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:dfb46ef84d73fababab44cf03e3b83f80700d27ca300e537f85f636fac474344", size = 821226 },
]
[[package]]
name = "nvidia-cuda-runtime-cu12"
version = "12.4.127"
source = { registry = "https://pypi.org/simple" }
resolution-markers = [
"python_full_version == '3.11.*'",
"(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version < '3.11'",
"(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version >= '3.13'",
"(python_full_version >= '3.13' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.13' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version == '3.12.*'",
"(python_full_version == '3.12.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
]
wheels = [
{ url = "https://files.pythonhosted.org/packages/a1/aa/b656d755f474e2084971e9a297def515938d56b466ab39624012070cb773/nvidia_cuda_runtime_cu12-12.4.127-py3-none-manylinux2014_aarch64.whl", hash = "sha256:961fe0e2e716a2a1d967aab7caee97512f71767f852f67432d572e36cb3a11f3", size = 894177 },
{ url = "https://files.pythonhosted.org/packages/ea/27/1795d86fe88ef397885f2e580ac37628ed058a92ed2c39dc8eac3adf0619/nvidia_cuda_runtime_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:64403288fa2136ee8e467cdc9c9427e0434110899d07c779f25b5c068934faa5", size = 883737 },
{ url = "https://files.pythonhosted.org/packages/a8/8b/450e93fab75d85a69b50ea2d5fdd4ff44541e0138db16f9cd90123ef4de4/nvidia_cuda_runtime_cu12-12.4.127-py3-none-win_amd64.whl", hash = "sha256:09c2e35f48359752dfa822c09918211844a3d93c100a715d79b59591130c5e1e", size = 878808 },
]
[[package]]
name = "nvidia-cudnn-cu12"
version = "9.1.0.70"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "nvidia-cublas-cu12" },
{ name = "nvidia-cublas-cu12", version = "12.1.3.1", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-14-kokoro-fastapi-gpu'" },
{ name = "nvidia-cublas-cu12", version = "12.4.5.8", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-14-kokoro-fastapi-cpu'" },
]
wheels = [
{ url = "https://files.pythonhosted.org/packages/9f/fd/713452cd72343f682b1c7b9321e23829f00b842ceaedcda96e742ea0b0b3/nvidia_cudnn_cu12-9.1.0.70-py3-none-manylinux2014_x86_64.whl", hash = "sha256:165764f44ef8c61fcdfdfdbe769d687e06374059fbb388b6c89ecb0e28793a6f", size = 664752741 },
@ -1155,27 +1532,100 @@ wheels = [
name = "nvidia-cufft-cu12"
version = "11.0.2.54"
source = { registry = "https://pypi.org/simple" }
resolution-markers = [
"python_full_version == '3.11.*'",
"(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version < '3.11'",
"(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version >= '3.13'",
"(python_full_version >= '3.13' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.13' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version == '3.12.*'",
"(python_full_version == '3.12.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
]
wheels = [
{ url = "https://files.pythonhosted.org/packages/86/94/eb540db023ce1d162e7bea9f8f5aa781d57c65aed513c33ee9a5123ead4d/nvidia_cufft_cu12-11.0.2.54-py3-none-manylinux1_x86_64.whl", hash = "sha256:794e3948a1aa71fd817c3775866943936774d1c14e7628c74f6f7417224cdf56", size = 121635161 },
{ url = "https://files.pythonhosted.org/packages/f7/57/7927a3aa0e19927dfed30256d1c854caf991655d847a4e7c01fe87e3d4ac/nvidia_cufft_cu12-11.0.2.54-py3-none-win_amd64.whl", hash = "sha256:d9ac353f78ff89951da4af698f80870b1534ed69993f10a4cf1d96f21357e253", size = 121344196 },
]
[[package]]
name = "nvidia-cufft-cu12"
version = "11.2.1.3"
source = { registry = "https://pypi.org/simple" }
resolution-markers = [
"python_full_version == '3.11.*'",
"(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version < '3.11'",
"(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version >= '3.13'",
"(python_full_version >= '3.13' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.13' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version == '3.12.*'",
"(python_full_version == '3.12.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
]
dependencies = [
{ name = "nvidia-nvjitlink-cu12" },
]
wheels = [
{ url = "https://files.pythonhosted.org/packages/7a/8a/0e728f749baca3fbeffad762738276e5df60851958be7783af121a7221e7/nvidia_cufft_cu12-11.2.1.3-py3-none-manylinux2014_aarch64.whl", hash = "sha256:5dad8008fc7f92f5ddfa2101430917ce2ffacd86824914c82e28990ad7f00399", size = 211422548 },
{ url = "https://files.pythonhosted.org/packages/27/94/3266821f65b92b3138631e9c8e7fe1fb513804ac934485a8d05776e1dd43/nvidia_cufft_cu12-11.2.1.3-py3-none-manylinux2014_x86_64.whl", hash = "sha256:f083fc24912aa410be21fa16d157fed2055dab1cc4b6934a0e03cba69eb242b9", size = 211459117 },
{ url = "https://files.pythonhosted.org/packages/f6/ee/3f3f8e9874f0be5bbba8fb4b62b3de050156d159f8b6edc42d6f1074113b/nvidia_cufft_cu12-11.2.1.3-py3-none-win_amd64.whl", hash = "sha256:d802f4954291101186078ccbe22fc285a902136f974d369540fd4a5333d1440b", size = 210576476 },
]
[[package]]
name = "nvidia-curand-cu12"
version = "10.3.2.106"
source = { registry = "https://pypi.org/simple" }
resolution-markers = [
"python_full_version == '3.11.*'",
"(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version < '3.11'",
"(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version >= '3.13'",
"(python_full_version >= '3.13' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.13' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version == '3.12.*'",
"(python_full_version == '3.12.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
]
wheels = [
{ url = "https://files.pythonhosted.org/packages/44/31/4890b1c9abc496303412947fc7dcea3d14861720642b49e8ceed89636705/nvidia_curand_cu12-10.3.2.106-py3-none-manylinux1_x86_64.whl", hash = "sha256:9d264c5036dde4e64f1de8c50ae753237c12e0b1348738169cd0f8a536c0e1e0", size = 56467784 },
{ url = "https://files.pythonhosted.org/packages/5c/97/4c9c7c79efcdf5b70374241d48cf03b94ef6707fd18ea0c0f53684931d0b/nvidia_curand_cu12-10.3.2.106-py3-none-win_amd64.whl", hash = "sha256:75b6b0c574c0037839121317e17fd01f8a69fd2ef8e25853d826fec30bdba74a", size = 55995813 },
]
[[package]]
name = "nvidia-curand-cu12"
version = "10.3.5.147"
source = { registry = "https://pypi.org/simple" }
resolution-markers = [
"python_full_version == '3.11.*'",
"(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version < '3.11'",
"(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version >= '3.13'",
"(python_full_version >= '3.13' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.13' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version == '3.12.*'",
"(python_full_version == '3.12.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
]
wheels = [
{ url = "https://files.pythonhosted.org/packages/80/9c/a79180e4d70995fdf030c6946991d0171555c6edf95c265c6b2bf7011112/nvidia_curand_cu12-10.3.5.147-py3-none-manylinux2014_aarch64.whl", hash = "sha256:1f173f09e3e3c76ab084aba0de819c49e56614feae5c12f69883f4ae9bb5fad9", size = 56314811 },
{ url = "https://files.pythonhosted.org/packages/8a/6d/44ad094874c6f1b9c654f8ed939590bdc408349f137f9b98a3a23ccec411/nvidia_curand_cu12-10.3.5.147-py3-none-manylinux2014_x86_64.whl", hash = "sha256:a88f583d4e0bb643c49743469964103aa59f7f708d862c3ddb0fc07f851e3b8b", size = 56305206 },
{ url = "https://files.pythonhosted.org/packages/1c/22/2573503d0d4e45673c263a313f79410e110eb562636b0617856fdb2ff5f6/nvidia_curand_cu12-10.3.5.147-py3-none-win_amd64.whl", hash = "sha256:f307cc191f96efe9e8f05a87096abc20d08845a841889ef78cb06924437f6771", size = 55799918 },
]
[[package]]
name = "nvidia-cusolver-cu12"
version = "11.4.5.107"
source = { registry = "https://pypi.org/simple" }
resolution-markers = [
"python_full_version == '3.11.*'",
"(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version < '3.11'",
"(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version >= '3.13'",
"(python_full_version >= '3.13' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.13' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version == '3.12.*'",
"(python_full_version == '3.12.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
]
dependencies = [
{ name = "nvidia-cublas-cu12" },
{ name = "nvidia-cusparse-cu12" },
{ name = "nvidia-cublas-cu12", version = "12.1.3.1", source = { registry = "https://pypi.org/simple" } },
{ name = "nvidia-cusparse-cu12", version = "12.1.0.106", source = { registry = "https://pypi.org/simple" } },
{ name = "nvidia-nvjitlink-cu12" },
]
wheels = [
@ -1183,10 +1633,45 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/b8/80/8fca0bf819122a631c3976b6fc517c1b10741b643b94046bd8dd451522c5/nvidia_cusolver_cu12-11.4.5.107-py3-none-win_amd64.whl", hash = "sha256:74e0c3a24c78612192a74fcd90dd117f1cf21dea4822e66d89e8ea80e3cd2da5", size = 121643081 },
]
[[package]]
name = "nvidia-cusolver-cu12"
version = "11.6.1.9"
source = { registry = "https://pypi.org/simple" }
resolution-markers = [
"python_full_version == '3.11.*'",
"(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version < '3.11'",
"(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version >= '3.13'",
"(python_full_version >= '3.13' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.13' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version == '3.12.*'",
"(python_full_version == '3.12.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
]
dependencies = [
{ name = "nvidia-cublas-cu12", version = "12.4.5.8", source = { registry = "https://pypi.org/simple" } },
{ name = "nvidia-cusparse-cu12", version = "12.3.1.170", source = { registry = "https://pypi.org/simple" } },
{ name = "nvidia-nvjitlink-cu12" },
]
wheels = [
{ url = "https://files.pythonhosted.org/packages/46/6b/a5c33cf16af09166845345275c34ad2190944bcc6026797a39f8e0a282e0/nvidia_cusolver_cu12-11.6.1.9-py3-none-manylinux2014_aarch64.whl", hash = "sha256:d338f155f174f90724bbde3758b7ac375a70ce8e706d70b018dd3375545fc84e", size = 127634111 },
{ url = "https://files.pythonhosted.org/packages/3a/e1/5b9089a4b2a4790dfdea8b3a006052cfecff58139d5a4e34cb1a51df8d6f/nvidia_cusolver_cu12-11.6.1.9-py3-none-manylinux2014_x86_64.whl", hash = "sha256:19e33fa442bcfd085b3086c4ebf7e8debc07cfe01e11513cc6d332fd918ac260", size = 127936057 },
{ url = "https://files.pythonhosted.org/packages/f2/be/d435b7b020e854d5d5a682eb5de4328fd62f6182507406f2818280e206e2/nvidia_cusolver_cu12-11.6.1.9-py3-none-win_amd64.whl", hash = "sha256:e77314c9d7b694fcebc84f58989f3aa4fb4cb442f12ca1a9bde50f5e8f6d1b9c", size = 125224015 },
]
[[package]]
name = "nvidia-cusparse-cu12"
version = "12.1.0.106"
source = { registry = "https://pypi.org/simple" }
resolution-markers = [
"python_full_version == '3.11.*'",
"(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version < '3.11'",
"(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version >= '3.13'",
"(python_full_version >= '3.13' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.13' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version == '3.12.*'",
"(python_full_version == '3.12.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
]
dependencies = [
{ name = "nvidia-nvjitlink-cu12" },
]
@ -1195,6 +1680,29 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/0f/95/48fdbba24c93614d1ecd35bc6bdc6087bd17cbacc3abc4b05a9c2a1ca232/nvidia_cusparse_cu12-12.1.0.106-py3-none-win_amd64.whl", hash = "sha256:b798237e81b9719373e8fae8d4f091b70a0cf09d9d85c95a557e11df2d8e9a5a", size = 195414588 },
]
[[package]]
name = "nvidia-cusparse-cu12"
version = "12.3.1.170"
source = { registry = "https://pypi.org/simple" }
resolution-markers = [
"python_full_version == '3.11.*'",
"(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version < '3.11'",
"(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version >= '3.13'",
"(python_full_version >= '3.13' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.13' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version == '3.12.*'",
"(python_full_version == '3.12.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
]
dependencies = [
{ name = "nvidia-nvjitlink-cu12" },
]
wheels = [
{ url = "https://files.pythonhosted.org/packages/96/a9/c0d2f83a53d40a4a41be14cea6a0bf9e668ffcf8b004bd65633f433050c0/nvidia_cusparse_cu12-12.3.1.170-py3-none-manylinux2014_aarch64.whl", hash = "sha256:9d32f62896231ebe0480efd8a7f702e143c98cfaa0e8a76df3386c1ba2b54df3", size = 207381987 },
{ url = "https://files.pythonhosted.org/packages/db/f7/97a9ea26ed4bbbfc2d470994b8b4f338ef663be97b8f677519ac195e113d/nvidia_cusparse_cu12-12.3.1.170-py3-none-manylinux2014_x86_64.whl", hash = "sha256:ea4f11a2904e2a8dc4b1833cc1b5181cde564edd0d5cd33e3c168eff2d1863f1", size = 207454763 },
{ url = "https://files.pythonhosted.org/packages/a2/e0/3155ca539760a8118ec94cc279b34293309bcd14011fc724f87f31988843/nvidia_cusparse_cu12-12.3.1.170-py3-none-win_amd64.whl", hash = "sha256:9bc90fb087bc7b4c15641521f31c0371e9a612fc2ba12c338d3ae032e6b6797f", size = 204684315 },
]
[[package]]
name = "nvidia-nccl-cu12"
version = "2.21.5"
@ -1205,23 +1713,53 @@ wheels = [
[[package]]
name = "nvidia-nvjitlink-cu12"
version = "12.6.85"
version = "12.4.127"
source = { registry = "https://pypi.org/simple" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/9d/d7/c5383e47c7e9bf1c99d5bd2a8c935af2b6d705ad831a7ec5c97db4d82f4f/nvidia_nvjitlink_cu12-12.6.85-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl", hash = "sha256:eedc36df9e88b682efe4309aa16b5b4e78c2407eac59e8c10a6a47535164369a", size = 19744971 },
{ url = "https://files.pythonhosted.org/packages/31/db/dc71113d441f208cdfe7ae10d4983884e13f464a6252450693365e166dcf/nvidia_nvjitlink_cu12-12.6.85-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cf4eaa7d4b6b543ffd69d6abfb11efdeb2db48270d94dfd3a452c24150829e41", size = 19270338 },
{ url = "https://files.pythonhosted.org/packages/89/76/93c1467b1387387440a4d25102d86b7794535449b689f8e2dc22c1c8ff7f/nvidia_nvjitlink_cu12-12.6.85-py3-none-win_amd64.whl", hash = "sha256:e61120e52ed675747825cdd16febc6a0730537451d867ee58bee3853b1b13d1c", size = 161908572 },
{ url = "https://files.pythonhosted.org/packages/02/45/239d52c05074898a80a900f49b1615d81c07fceadd5ad6c4f86a987c0bc4/nvidia_nvjitlink_cu12-12.4.127-py3-none-manylinux2014_aarch64.whl", hash = "sha256:4abe7fef64914ccfa909bc2ba39739670ecc9e820c83ccc7a6ed414122599b83", size = 20552510 },
{ url = "https://files.pythonhosted.org/packages/ff/ff/847841bacfbefc97a00036e0fce5a0f086b640756dc38caea5e1bb002655/nvidia_nvjitlink_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:06b3b9b25bf3f8af351d664978ca26a16d2c5127dbd53c0497e28d1fb9611d57", size = 21066810 },
{ url = "https://files.pythonhosted.org/packages/81/19/0babc919031bee42620257b9a911c528f05fb2688520dcd9ca59159ffea8/nvidia_nvjitlink_cu12-12.4.127-py3-none-win_amd64.whl", hash = "sha256:fd9020c501d27d135f983c6d3e244b197a7ccad769e34df53a42e276b0e25fa1", size = 95336325 },
]
[[package]]
name = "nvidia-nvtx-cu12"
version = "12.1.105"
source = { registry = "https://pypi.org/simple" }
resolution-markers = [
"python_full_version == '3.11.*'",
"(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version < '3.11'",
"(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version >= '3.13'",
"(python_full_version >= '3.13' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.13' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version == '3.12.*'",
"(python_full_version == '3.12.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
]
wheels = [
{ url = "https://files.pythonhosted.org/packages/da/d3/8057f0587683ed2fcd4dbfbdfdfa807b9160b809976099d36b8f60d08f03/nvidia_nvtx_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:dc21cf308ca5691e7c04d962e213f8a4aa9bbfa23d95412f452254c2caeb09e5", size = 99138 },
{ url = "https://files.pythonhosted.org/packages/b8/d7/bd7cb2d95ac6ac6e8d05bfa96cdce69619f1ef2808e072919044c2d47a8c/nvidia_nvtx_cu12-12.1.105-py3-none-win_amd64.whl", hash = "sha256:65f4d98982b31b60026e0e6de73fbdfc09d08a96f4656dd3665ca616a11e1e82", size = 66307 },
]
[[package]]
name = "nvidia-nvtx-cu12"
version = "12.4.127"
source = { registry = "https://pypi.org/simple" }
resolution-markers = [
"python_full_version == '3.11.*'",
"(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version < '3.11'",
"(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version >= '3.13'",
"(python_full_version >= '3.13' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.13' and sys_platform != 'darwin' and sys_platform != 'linux')",
"python_full_version == '3.12.*'",
"(python_full_version == '3.12.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
]
wheels = [
{ url = "https://files.pythonhosted.org/packages/06/39/471f581edbb7804b39e8063d92fc8305bdc7a80ae5c07dbe6ea5c50d14a5/nvidia_nvtx_cu12-12.4.127-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7959ad635db13edf4fc65c06a6e9f9e55fc2f92596db928d169c0bb031e88ef3", size = 100417 },
{ url = "https://files.pythonhosted.org/packages/87/20/199b8713428322a2f22b722c62b8cc278cc53dffa9705d744484b5035ee9/nvidia_nvtx_cu12-12.4.127-py3-none-manylinux2014_x86_64.whl", hash = "sha256:781e950d9b9f60d8241ccea575b32f5105a5baf4c2351cab5256a24869f12a1a", size = 99144 },
{ url = "https://files.pythonhosted.org/packages/54/1b/f77674fbb73af98843be25803bbd3b9a4f0a96c75b8d33a2854a5c7d2d77/nvidia_nvtx_cu12-12.4.127-py3-none-win_amd64.whl", hash = "sha256:641dccaaa1139f3ffb0d3164b4b84f9d253397e38246a4f2f36728b48566d485", size = 66307 },
]
[[package]]
name = "onnxruntime"
version = "1.20.1"
@ -1258,6 +1796,30 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/14/56/fd990ca222cef4f9f4a9400567b9a15b220dee2eafffb16b2adbc55c8281/onnxruntime-1.20.1-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0df6f2df83d61f46e842dbcde610ede27218947c33e994545a22333491e72a3b", size = 13337040 },
]
[[package]]
name = "onnxruntime-gpu"
version = "1.20.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "coloredlogs" },
{ name = "flatbuffers" },
{ name = "numpy" },
{ name = "packaging" },
{ name = "protobuf" },
{ name = "sympy" },
]
wheels = [
{ url = "https://files.pythonhosted.org/packages/45/35/4f2df54e3161c61304d9463b5f2ee52c6408b5bca5960029bb787777d913/onnxruntime_gpu-1.20.1-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a5b4e1641db48752118dda353b8614c6d6570344062b58faea70b5350c41cf68", size = 291523128 },
{ url = "https://files.pythonhosted.org/packages/d6/54/e2fb1eadc21b6f8347860e9d53ad0ed34fec462cf51ab1b4303027503706/onnxruntime_gpu-1.20.1-cp310-cp310-win_amd64.whl", hash = "sha256:5e2a1d659924f19425e6b2fde1f36c0998b398a8a9b76a2b266d4fa518cfe8ed", size = 279696832 },
{ url = "https://files.pythonhosted.org/packages/e0/a5/5c2287d61f359c7342e9d59d1e3dd728a982dea85f846c7af305a801c3ca/onnxruntime_gpu-1.20.1-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1795e8bc6f9a1488a4d51d242edc4232a5ae60ec44ab4d4b0a7c65b3d17fcbff", size = 291519550 },
{ url = "https://files.pythonhosted.org/packages/be/33/6f21ea03dc4eeaa3f049127f5f17360c38799134eefd1e524f0296b23cb4/onnxruntime_gpu-1.20.1-cp311-cp311-win_amd64.whl", hash = "sha256:1348e6a0af9e8f5e8e1cfc379b70356ea40497932f5bc7f858501fe7940794ff", size = 279698022 },
{ url = "https://files.pythonhosted.org/packages/91/a8/6984a2fb070be372a866108e3e85c9eb6e8f0378a8567a66967d80befb75/onnxruntime_gpu-1.20.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1951f96cd534c6151721e552606d0d792ea6a4c3e57e2f10eed17cca8105e953", size = 291510989 },
{ url = "https://files.pythonhosted.org/packages/e8/15/0a9887bd2931b7b7e5f36995f266d51e66c0e02f84d18cab65133b111ac1/onnxruntime_gpu-1.20.1-cp312-cp312-win_amd64.whl", hash = "sha256:696de465884480fad1deffd936bee05c6f10fdfe4f5fcef1927a71f5d28ed9ef", size = 279699210 },
{ url = "https://files.pythonhosted.org/packages/51/86/de1d7e513e178265e5fac8fd7fd529decd06416cbe316ce77a5b0ff6915b/onnxruntime_gpu-1.20.1-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8337d7badb59b3f4af530ed6437a7471d48ab2e45f7b253a355c8db9cabee67c", size = 291512781 },
{ url = "https://files.pythonhosted.org/packages/c7/87/1361640e9277622591926f84d10fcc289c20be03e1ff5480d66c3cd2402f/onnxruntime_gpu-1.20.1-cp313-cp313-win_amd64.whl", hash = "sha256:9f0cc7ccd8e75da510ddab65d891b1384442d1b161863c521d335d1a56e5aa40", size = 279698268 },
{ url = "https://files.pythonhosted.org/packages/7a/26/67059fa067d9584121981cfa9983e1526e5f3ca393d1ace4a99ce9af500d/onnxruntime_gpu-1.20.1-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:965d6ee002c114d310c8d38680f42f1c94603b3547bcc62956ff565d528caa88", size = 291503278 },
]
[[package]]
name = "openai"
version = "1.59.6"
@ -2348,19 +2910,39 @@ name = "torch"
version = "2.5.1"
source = { registry = "https://download.pytorch.org/whl/cpu" }
resolution-markers = [
"python_full_version == '3.11.*'",
"(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
"(python_full_version == '3.11.*' and platform_machine == 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform == 'darwin')",
"python_full_version < '3.11'",
"(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform != 'darwin' and sys_platform != 'linux')",
"(python_full_version < '3.11' and platform_machine == 'aarch64' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'darwin')",
"python_full_version >= '3.13'",
"(python_full_version >= '3.13' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.13' and sys_platform != 'darwin' and sys_platform != 'linux')",
"(python_full_version >= '3.13' and platform_machine == 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.13' and sys_platform == 'darwin')",
"python_full_version == '3.12.*'",
"(python_full_version == '3.12.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
"(python_full_version == '3.12.*' and platform_machine == 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform == 'darwin')",
]
dependencies = [
{ name = "filelock", marker = "(platform_machine == 'aarch64' and sys_platform == 'linux') or sys_platform == 'darwin'" },
{ name = "fsspec", marker = "(platform_machine == 'aarch64' and sys_platform == 'linux') or sys_platform == 'darwin'" },
{ name = "jinja2", marker = "(platform_machine == 'aarch64' and sys_platform == 'linux') or sys_platform == 'darwin'" },
{ name = "networkx", marker = "(platform_machine == 'aarch64' and sys_platform == 'linux') or sys_platform == 'darwin'" },
{ name = "setuptools", marker = "(python_full_version >= '3.12' and platform_machine == 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.12' and sys_platform == 'darwin')" },
{ name = "sympy", marker = "(platform_machine == 'aarch64' and sys_platform == 'linux') or sys_platform == 'darwin'" },
{ name = "typing-extensions", marker = "(platform_machine == 'aarch64' and sys_platform == 'linux') or sys_platform == 'darwin'" },
{ name = "filelock" },
{ name = "fsspec" },
{ name = "jinja2" },
{ name = "networkx" },
{ name = "nvidia-cublas-cu12", version = "12.4.5.8", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-cuda-cupti-cu12", version = "12.4.127", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-cuda-nvrtc-cu12", version = "12.4.127", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-cuda-runtime-cu12", version = "12.4.127", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-cudnn-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-cufft-cu12", version = "11.2.1.3", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-curand-cu12", version = "10.3.5.147", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-cusolver-cu12", version = "11.6.1.9", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-cusparse-cu12", version = "12.3.1.170", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-nccl-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-nvjitlink-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-nvtx-cu12", version = "12.4.127", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "setuptools", marker = "python_full_version >= '3.12'" },
{ name = "sympy" },
{ name = "typing-extensions" },
]
wheels = [
{ url = "https://download.pytorch.org/whl/cpu/torch-2.5.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:269b10c34430aa8e9643dbe035dc525c4a9b1d671cd3dbc8ecbcaed280ae322d" },
@ -2371,61 +2953,40 @@ wheels = [
{ url = "https://download.pytorch.org/whl/cpu/torch-2.5.1-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:8c712df61101964eb11910a846514011f0b6f5920c55dbf567bff8a34163d5b1" },
]
[[package]]
name = "torch"
version = "2.5.1+cpu"
source = { registry = "https://download.pytorch.org/whl/cpu" }
resolution-markers = [
"(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
"(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform != 'darwin' and sys_platform != 'linux')",
"(python_full_version >= '3.13' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.13' and sys_platform != 'darwin' and sys_platform != 'linux')",
"(python_full_version == '3.12.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
]
dependencies = [
{ name = "filelock", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" },
{ name = "fsspec", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" },
{ name = "jinja2", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" },
{ name = "networkx", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" },
{ name = "setuptools", marker = "(python_full_version >= '3.12' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.12' and sys_platform != 'darwin' and sys_platform != 'linux')" },
{ name = "sympy", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" },
{ name = "typing-extensions", marker = "(platform_machine != 'aarch64' and sys_platform == 'linux') or (sys_platform != 'darwin' and sys_platform != 'linux')" },
]
wheels = [
{ url = "https://download.pytorch.org/whl/cpu/torch-2.5.1%2Bcpu-cp310-cp310-linux_x86_64.whl", hash = "sha256:7f91a2200e352745d70e22396bd501448e28350fbdbd8d8b1c83037e25451150" },
{ url = "https://download.pytorch.org/whl/cpu/torch-2.5.1%2Bcpu-cp310-cp310-win_amd64.whl", hash = "sha256:df93157482b672892d29134d3fae9d38ba3219702faedd79f407eb36774c56ce" },
{ url = "https://download.pytorch.org/whl/cpu/torch-2.5.1%2Bcpu-cp311-cp311-linux_x86_64.whl", hash = "sha256:07d7c9e069123d5af08b0cf0013d74f680b2d8be7d9e2cf561a52c90c55d9409" },
{ url = "https://download.pytorch.org/whl/cpu/torch-2.5.1%2Bcpu-cp311-cp311-win_amd64.whl", hash = "sha256:81531d4d5ca74163dc9574b87396531e546a60cceb6253303c7db6a21e867fdf" },
{ url = "https://download.pytorch.org/whl/cpu/torch-2.5.1%2Bcpu-cp312-cp312-linux_x86_64.whl", hash = "sha256:4856f9d6925121d13c2df07aa7580b767f449dfe71ae5acde9c27535d5da4840" },
{ url = "https://download.pytorch.org/whl/cpu/torch-2.5.1%2Bcpu-cp312-cp312-win_amd64.whl", hash = "sha256:a6b720410350765d3d77c01a5ce098a6c45af446284e45e87a98b8a16e7d564d" },
{ url = "https://download.pytorch.org/whl/cpu/torch-2.5.1%2Bcpu-cp313-cp313-linux_x86_64.whl", hash = "sha256:5dbbdf83caa90d0bcaa50e4933ca424889133b35226db79000877d4ec5d9ea37" },
]
[[package]]
name = "torch"
version = "2.5.1+cu121"
source = { registry = "https://download.pytorch.org/whl/cu121" }
resolution-markers = [
"python_full_version == '3.11.*'",
"(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
"(python_full_version == '3.11.*' and platform_machine == 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform == 'darwin')",
"python_full_version < '3.11'",
"(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform != 'darwin' and sys_platform != 'linux')",
"(python_full_version < '3.11' and platform_machine == 'aarch64' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'darwin')",
"python_full_version >= '3.13'",
"(python_full_version >= '3.13' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.13' and sys_platform != 'darwin' and sys_platform != 'linux')",
"(python_full_version >= '3.13' and platform_machine == 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.13' and sys_platform == 'darwin')",
"python_full_version == '3.12.*'",
"(python_full_version == '3.12.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
"(python_full_version == '3.12.*' and platform_machine == 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform == 'darwin')",
]
dependencies = [
{ name = "filelock" },
{ name = "fsspec" },
{ name = "jinja2" },
{ name = "networkx" },
{ name = "nvidia-cublas-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-cuda-cupti-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-cuda-nvrtc-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-cuda-runtime-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-cublas-cu12", version = "12.1.3.1", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-cuda-cupti-cu12", version = "12.1.105", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-cuda-nvrtc-cu12", version = "12.1.105", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-cuda-runtime-cu12", version = "12.1.105", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-cudnn-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-cufft-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-curand-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-cusolver-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-cusparse-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-cufft-cu12", version = "11.0.2.54", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-curand-cu12", version = "10.3.2.106", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-cusolver-cu12", version = "11.4.5.107", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-cusparse-cu12", version = "12.1.0.106", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-nccl-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-nvtx-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "nvidia-nvtx-cu12", version = "12.1.105", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "setuptools", marker = "python_full_version >= '3.12'" },
{ name = "sympy" },
{ name = "triton", marker = "python_full_version < '3.13' and platform_machine == 'x86_64' and sys_platform == 'linux'" },

203
web/app.js Normal file
View file

@ -0,0 +1,203 @@
class KokoroPlayer {
constructor() {
this.elements = {
textInput: document.getElementById('text-input'),
voiceSelect: document.getElementById('voice-select'),
streamToggle: document.getElementById('stream-toggle'),
autoplayToggle: document.getElementById('autoplay-toggle'),
generateBtn: document.getElementById('generate-btn'),
audioPlayer: document.getElementById('audio-player'),
status: document.getElementById('status')
};
this.isGenerating = false;
this.init();
}
async init() {
await this.loadVoices();
this.setupEventListeners();
}
async loadVoices() {
try {
const response = await fetch('/v1/audio/voices');
if (!response.ok) {
const error = await response.json();
throw new Error(error.detail?.message || 'Failed to load voices');
}
const data = await response.json();
if (!data.voices?.length) {
throw new Error('No voices available');
}
this.elements.voiceSelect.innerHTML = data.voices
.map(voice => `<option value="${voice}">${voice}</option>`)
.join('');
// Select first voice by default
if (data.voices.length > 0) {
this.elements.voiceSelect.value = data.voices[0];
}
this.showStatus('Voices loaded successfully', 'success');
} catch (error) {
this.showStatus('Failed to load voices: ' + error.message, 'error');
// Disable generate button if no voices
this.elements.generateBtn.disabled = true;
}
}
setupEventListeners() {
this.elements.generateBtn.addEventListener('click', () => this.generateSpeech());
this.elements.audioPlayer.addEventListener('ended', () => {
this.elements.generateBtn.disabled = false;
});
}
showStatus(message, type = 'info') {
this.elements.status.textContent = message;
this.elements.status.className = 'status ' + type;
setTimeout(() => {
this.elements.status.className = 'status';
}, 5000);
}
setLoading(loading) {
this.isGenerating = loading;
this.elements.generateBtn.disabled = loading;
this.elements.generateBtn.className = loading ? 'primary loading' : 'primary';
}
validateInput() {
const text = this.elements.textInput.value.trim();
if (!text) {
this.showStatus('Please enter some text', 'error');
return false;
}
const voice = this.elements.voiceSelect.value;
if (!voice) {
this.showStatus('Please select a voice', 'error');
return false;
}
return true;
}
async generateSpeech() {
if (this.isGenerating || !this.validateInput()) return;
const text = this.elements.textInput.value.trim();
const voice = this.elements.voiceSelect.value;
const stream = this.elements.streamToggle.checked;
this.setLoading(true);
try {
if (stream) {
await this.handleStreamingAudio(text, voice);
} else {
await this.handleNonStreamingAudio(text, voice);
}
} catch (error) {
this.showStatus('Error generating speech: ' + error.message, 'error');
} finally {
this.setLoading(false);
}
}
async handleStreamingAudio(text, voice) {
this.showStatus('Initializing audio stream...', 'info');
const response = await fetch('/v1/audio/speech', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
input: text,
voice: voice,
response_format: 'mp3',
stream: true
})
});
if (!response.ok) {
const error = await response.json();
throw new Error(error.detail?.message || 'Failed to generate speech');
}
const mediaSource = new MediaSource();
this.elements.audioPlayer.src = URL.createObjectURL(mediaSource);
return new Promise((resolve, reject) => {
mediaSource.addEventListener('sourceopen', async () => {
try {
const sourceBuffer = mediaSource.addSourceBuffer('audio/mpeg');
const reader = response.body.getReader();
let totalChunks = 0;
while (true) {
const {done, value} = await reader.read();
if (done) break;
// Wait for the buffer to be ready
if (sourceBuffer.updating) {
await new Promise(resolve => {
sourceBuffer.addEventListener('updateend', resolve, {once: true});
});
}
sourceBuffer.appendBuffer(value);
totalChunks++;
this.showStatus(`Received chunk ${totalChunks}...`, 'info');
}
mediaSource.endOfStream();
if (this.elements.autoplayToggle.checked) {
await this.elements.audioPlayer.play();
}
this.showStatus('Audio stream ready', 'success');
this.showStatus('Audio stream ready', 'success');
resolve();
} catch (error) {
mediaSource.endOfStream();
this.showStatus('Error during streaming: ' + error.message, 'error');
reject(error);
}
});
});
}
async handleNonStreamingAudio(text, voice) {
this.showStatus('Generating audio...', 'info');
const response = await fetch('/v1/audio/speech', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
input: text,
voice: voice,
response_format: 'mp3',
stream: false
})
});
if (!response.ok) {
const error = await response.json();
throw new Error(error.detail?.message || 'Failed to generate speech');
}
const blob = await response.blob();
const url = URL.createObjectURL(blob);
this.elements.audioPlayer.src = url;
if (this.elements.autoplayToggle.checked) {
await this.elements.audioPlayer.play();
}
this.showStatus('Audio ready', 'success');
}
}
// Initialize the player when the page loads
document.addEventListener('DOMContentLoaded', () => {
new KokoroPlayer();
});

51
web/index.html Normal file
View file

@ -0,0 +1,51 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>FastKoko: Kokoro-based TTS Player</title>
<link rel="stylesheet" href="styles.css">
</head>
<body>
<div class="container">
<header>
<h1>Kokoro TTS</h1>
</header>
<main>
<div class="input-section">
<textarea
id="text-input"
placeholder="Enter text to convert to speech..."
rows="4"
></textarea>
<div class="controls">
<select id="voice-select">
<option value="">Loading voices...</option>
</select>
<div class="options">
<label>
<input type="checkbox" id="stream-toggle" checked>
Stream audio
</label>
<label>
<input type="checkbox" id="autoplay-toggle" checked>
Auto-play
</label>
</div>
<button id="generate-btn" class="primary">
<span class="btn-text">Generate Speech</span>
<span class="loader"></span>
</button>
</div>
</div>
<div class="player-section">
<div id="status" class="status"></div>
<audio id="audio-player" controls></audio>
</div>
</main>
</div>
<script src="app.js"></script>
</body>
</html>

176
web/styles.css Normal file
View file

@ -0,0 +1,176 @@
:root {
--primary-color: #007bff;
--primary-hover: #0056b3;
--background: #ffffff;
--surface: #f8f9fa;
--text: #212529;
--text-light: #6c757d;
--border: #dee2e6;
--error: #dc3545;
--success: #28a745;
--font-family: system-ui, -apple-system, "Segoe UI", Roboto, sans-serif;
}
* {
margin: 0;
padding: 0;
box-sizing: border-box;
}
body {
font-family: var(--font-family);
line-height: 1.6;
color: var(--text);
background: var(--background);
}
.container {
max-width: 800px;
margin: 0 auto;
padding: 2rem 1rem;
}
header {
margin-bottom: 2rem;
text-align: center;
}
h1 {
color: var(--primary-color);
font-size: 2rem;
font-weight: 600;
}
.input-section {
background: var(--surface);
padding: 1.5rem;
border-radius: 8px;
margin-bottom: 1.5rem;
}
textarea {
width: 100%;
padding: 0.75rem;
border: 1px solid var(--border);
border-radius: 4px;
font-family: inherit;
font-size: 1rem;
resize: vertical;
margin-bottom: 1rem;
}
textarea:focus {
outline: none;
border-color: var(--primary-color);
}
.controls {
display: flex;
gap: 1rem;
align-items: center;
flex-wrap: wrap;
}
select {
padding: 0.5rem;
border: 1px solid var(--border);
border-radius: 4px;
font-size: 1rem;
min-width: 200px;
}
.options {
display: flex;
align-items: center;
gap: 1rem;
}
button {
padding: 0.5rem 1rem;
border: none;
border-radius: 4px;
font-size: 1rem;
cursor: pointer;
display: flex;
align-items: center;
gap: 0.5rem;
transition: background-color 0.2s;
}
button.primary {
background: var(--primary-color);
color: white;
}
button.primary:hover {
background: var(--primary-hover);
}
button:disabled {
opacity: 0.7;
cursor: not-allowed;
}
.loader {
display: none;
width: 16px;
height: 16px;
border: 2px solid #ffffff;
border-radius: 50%;
border-top-color: transparent;
animation: spin 1s linear infinite;
}
@keyframes spin {
to {
transform: rotate(360deg);
}
}
.loading .loader {
display: inline-block;
}
.loading .btn-text {
display: none;
}
.player-section {
background: var(--surface);
padding: 1.5rem;
border-radius: 8px;
}
.status {
margin-bottom: 1rem;
padding: 0.5rem;
border-radius: 4px;
display: none;
}
.status.error {
display: block;
background: rgba(220, 53, 69, 0.1);
color: var(--error);
}
.status.success {
display: block;
background: rgba(40, 167, 69, 0.1);
color: var(--success);
}
audio {
width: 100%;
}
@media (max-width: 600px) {
.controls {
flex-direction: column;
align-items: stretch;
}
select, button {
width: 100%;
}
}