mirror of
https://github.com/remsky/Kokoro-FastAPI.git
synced 2025-04-13 09:39:17 +00:00
WIP: 1.0 integration
- Introduced v1.0 model build system integration. - Updated imports to reflect new directory structure for versioned models. - Modified environment variables - Added version selection in the frontend for voice management. - Enhanced Docker build scripts for multi-platform support. - Updated configuration settings for default voice and model paths.
This commit is contained in:
parent
eb2191e23d
commit
9a588a3483
87 changed files with 3326 additions and 93 deletions
|
@ -338,7 +338,7 @@ def recursive_munch(d):
|
|||
return d
|
||||
|
||||
async def build_model(path, device):
|
||||
from ..core.paths import load_json, load_model_weights
|
||||
from api.src.core.paths import load_json, load_model_weights
|
||||
|
||||
config = Path(__file__).parent / 'config.json'
|
||||
assert config.exists(), f'Config path incorrect: config.json not found at {config}'
|
150
api/src/builds/v1_0/config.json
Normal file
150
api/src/builds/v1_0/config.json
Normal file
|
@ -0,0 +1,150 @@
|
|||
{
|
||||
"istftnet": {
|
||||
"upsample_kernel_sizes": [20, 12],
|
||||
"upsample_rates": [10, 6],
|
||||
"gen_istft_hop_size": 5,
|
||||
"gen_istft_n_fft": 20,
|
||||
"resblock_dilation_sizes": [
|
||||
[1, 3, 5],
|
||||
[1, 3, 5],
|
||||
[1, 3, 5]
|
||||
],
|
||||
"resblock_kernel_sizes": [3, 7, 11],
|
||||
"upsample_initial_channel": 512
|
||||
},
|
||||
"dim_in": 64,
|
||||
"dropout": 0.2,
|
||||
"hidden_dim": 512,
|
||||
"max_conv_dim": 512,
|
||||
"max_dur": 50,
|
||||
"multispeaker": true,
|
||||
"n_layer": 3,
|
||||
"n_mels": 80,
|
||||
"n_token": 178,
|
||||
"style_dim": 128,
|
||||
"text_encoder_kernel_size": 5,
|
||||
"plbert": {
|
||||
"hidden_size": 768,
|
||||
"num_attention_heads": 12,
|
||||
"intermediate_size": 2048,
|
||||
"max_position_embeddings": 512,
|
||||
"num_hidden_layers": 12,
|
||||
"dropout": 0.1
|
||||
},
|
||||
"vocab": {
|
||||
";": 1,
|
||||
":": 2,
|
||||
",": 3,
|
||||
".": 4,
|
||||
"!": 5,
|
||||
"?": 6,
|
||||
"—": 9,
|
||||
"…": 10,
|
||||
"\"": 11,
|
||||
"(": 12,
|
||||
")": 13,
|
||||
"“": 14,
|
||||
"”": 15,
|
||||
" ": 16,
|
||||
"\u0303": 17,
|
||||
"ʣ": 18,
|
||||
"ʥ": 19,
|
||||
"ʦ": 20,
|
||||
"ʨ": 21,
|
||||
"ᵝ": 22,
|
||||
"\uAB67": 23,
|
||||
"A": 24,
|
||||
"I": 25,
|
||||
"O": 31,
|
||||
"Q": 33,
|
||||
"S": 35,
|
||||
"T": 36,
|
||||
"W": 39,
|
||||
"Y": 41,
|
||||
"ᵊ": 42,
|
||||
"a": 43,
|
||||
"b": 44,
|
||||
"c": 45,
|
||||
"d": 46,
|
||||
"e": 47,
|
||||
"f": 48,
|
||||
"h": 50,
|
||||
"i": 51,
|
||||
"j": 52,
|
||||
"k": 53,
|
||||
"l": 54,
|
||||
"m": 55,
|
||||
"n": 56,
|
||||
"o": 57,
|
||||
"p": 58,
|
||||
"q": 59,
|
||||
"r": 60,
|
||||
"s": 61,
|
||||
"t": 62,
|
||||
"u": 63,
|
||||
"v": 64,
|
||||
"w": 65,
|
||||
"x": 66,
|
||||
"y": 67,
|
||||
"z": 68,
|
||||
"ɑ": 69,
|
||||
"ɐ": 70,
|
||||
"ɒ": 71,
|
||||
"æ": 72,
|
||||
"β": 75,
|
||||
"ɔ": 76,
|
||||
"ɕ": 77,
|
||||
"ç": 78,
|
||||
"ɖ": 80,
|
||||
"ð": 81,
|
||||
"ʤ": 82,
|
||||
"ə": 83,
|
||||
"ɚ": 85,
|
||||
"ɛ": 86,
|
||||
"ɜ": 87,
|
||||
"ɟ": 90,
|
||||
"ɡ": 92,
|
||||
"ɥ": 99,
|
||||
"ɨ": 101,
|
||||
"ɪ": 102,
|
||||
"ʝ": 103,
|
||||
"ɯ": 110,
|
||||
"ɰ": 111,
|
||||
"ŋ": 112,
|
||||
"ɳ": 113,
|
||||
"ɲ": 114,
|
||||
"ɴ": 115,
|
||||
"ø": 116,
|
||||
"ɸ": 118,
|
||||
"θ": 119,
|
||||
"œ": 120,
|
||||
"ɹ": 123,
|
||||
"ɾ": 125,
|
||||
"ɻ": 126,
|
||||
"ʁ": 128,
|
||||
"ɽ": 129,
|
||||
"ʂ": 130,
|
||||
"ʃ": 131,
|
||||
"ʈ": 132,
|
||||
"ʧ": 133,
|
||||
"ʊ": 135,
|
||||
"ʋ": 136,
|
||||
"ʌ": 138,
|
||||
"ɣ": 139,
|
||||
"ɤ": 140,
|
||||
"χ": 142,
|
||||
"ʎ": 143,
|
||||
"ʒ": 147,
|
||||
"ʔ": 148,
|
||||
"ˈ": 156,
|
||||
"ˌ": 157,
|
||||
"ː": 158,
|
||||
"ʰ": 162,
|
||||
"ʲ": 164,
|
||||
"↓": 169,
|
||||
"→": 171,
|
||||
"↗": 172,
|
||||
"↘": 173,
|
||||
"ᵻ": 177
|
||||
}
|
||||
}
|
33
api/src/builds/v1_0/models.py
Normal file
33
api/src/builds/v1_0/models.py
Normal file
|
@ -0,0 +1,33 @@
|
|||
"""
|
||||
Kokoro v1.0 model build system integration.
|
||||
"""
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from .wrapper import build_model as build_v1_model
|
||||
|
||||
|
||||
async def build_model(path: Optional[str] = None, device: str = "cuda"):
|
||||
"""Build a Kokoro v1.0 model instance.
|
||||
|
||||
This function maintains compatibility with the v0.19 build_model interface
|
||||
while using the new KModel/KPipeline architecture internally.
|
||||
|
||||
Args:
|
||||
path: Optional path to model weights. If None, uses default location.
|
||||
device: Device to load model on ("cuda" or "cpu")
|
||||
|
||||
Returns:
|
||||
Initialized model instance
|
||||
"""
|
||||
if path is None:
|
||||
# Use default path in models/v1_0
|
||||
path = str(Path(__file__).parent.parent.parent / "models/v1_0/kokoro-v1_0.pth")
|
||||
|
||||
# Config is always in builds/v1_0
|
||||
config_path = str(Path(__file__).parent / "config.json")
|
||||
|
||||
return await build_v1_model(
|
||||
config_path=config_path,
|
||||
model_path=path
|
||||
)
|
184
api/src/builds/v1_0/wrapper.py
Normal file
184
api/src/builds/v1_0/wrapper.py
Normal file
|
@ -0,0 +1,184 @@
|
|||
from pathlib import Path
|
||||
from typing import AsyncGenerator, List
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from kokoro import KModel
|
||||
from loguru import logger
|
||||
|
||||
|
||||
class KokoroV1Wrapper:
|
||||
"""Wrapper for Kokoro v1.0 KModel integration.
|
||||
|
||||
This wrapper provides a token-based interface compatible with the TTS service,
|
||||
while internally using the Kokoro KModel for direct audio generation. It handles:
|
||||
|
||||
- Token-to-phoneme conversion using the model's vocab
|
||||
- Voice tensor management
|
||||
- Audio generation with speed control
|
||||
"""
|
||||
|
||||
def __init__(self, config_path: str, model_path: str):
|
||||
"""Initialize KModel with config and weights.
|
||||
|
||||
Args:
|
||||
config_path: Path to config.json in builds/v1_0/
|
||||
model_path: Path to model weights in models/v1_0/
|
||||
"""
|
||||
self.model = KModel(config=config_path, model=model_path)
|
||||
self.vocab = self.model.vocab # Get vocab from model for token decoding
|
||||
|
||||
async def forward(self, tokens: List[int], voice_tensor: torch.Tensor, speed: float = 1.0) -> AsyncGenerator[torch.FloatTensor, None]:
|
||||
"""Generate audio using KModel's forward pass.
|
||||
|
||||
Args:
|
||||
tokens: Input token sequence to convert to phonemes
|
||||
voice_tensor: Voice embedding tensor (ref_s) containing style information
|
||||
speed: Speed multiplier for audio generation
|
||||
|
||||
Yields:
|
||||
Single audio tensor as torch.FloatTensor
|
||||
|
||||
Raises:
|
||||
RuntimeError: If token-to-phoneme conversion or audio generation fails
|
||||
"""
|
||||
try:
|
||||
# Convert tokens back to phonemes using vocab
|
||||
phonemes = []
|
||||
for token in tokens:
|
||||
for p, idx in self.vocab.items():
|
||||
if idx == token:
|
||||
phonemes.append(p)
|
||||
break
|
||||
text = ''.join(phonemes)
|
||||
logger.debug(f"Decoded tokens to text: '{text[:100]}...'")
|
||||
|
||||
# Validate and reshape voice tensor
|
||||
logger.debug(f"Initial voice tensor shape: {voice_tensor.shape}")
|
||||
|
||||
# Ensure tensor has correct number of dimensions
|
||||
if voice_tensor.dim() == 1:
|
||||
voice_tensor = voice_tensor.unsqueeze(0) # [N] -> [1, N]
|
||||
if voice_tensor.dim() == 2:
|
||||
voice_tensor = voice_tensor.unsqueeze(1) # [B, N] -> [B, 1, N]
|
||||
logger.debug(f"After reshape voice tensor shape: {voice_tensor.shape}")
|
||||
|
||||
# Validate feature dimension
|
||||
if voice_tensor.size(-1) != 256: # Expected size for style + content
|
||||
raise RuntimeError(f"Voice tensor has wrong feature size: expected 256, got {voice_tensor.size(-1)}")
|
||||
|
||||
# Generate audio directly using KModel
|
||||
audio = self.model.forward(
|
||||
phonemes=text, # text is already phonemes from token conversion
|
||||
ref_s=voice_tensor,
|
||||
speed=speed
|
||||
)
|
||||
logger.debug(f"Generated audio tensor shape: {audio.shape}")
|
||||
yield audio
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in KokoroV1Wrapper.forward: {str(e)}")
|
||||
raise RuntimeError(f"Failed to generate audio: {str(e)}")
|
||||
|
||||
async def generate(self, tokens: List[int], voice_tensor: torch.Tensor, speed: float = 1.0) -> np.ndarray:
|
||||
"""Generate audio using KModel's forward pass.
|
||||
|
||||
This method provides compatibility with the TTS service interface,
|
||||
internally using forward() for generation.
|
||||
|
||||
Args:
|
||||
tokens: Input token sequence to convert to phonemes
|
||||
voice_tensor: Voice embedding tensor (ref_s) containing style information
|
||||
speed: Speed multiplier for audio generation
|
||||
|
||||
Returns:
|
||||
Generated audio as numpy array
|
||||
|
||||
Raises:
|
||||
RuntimeError: If token-to-phoneme conversion or audio generation fails
|
||||
"""
|
||||
try:
|
||||
# Convert tokens back to phonemes using vocab
|
||||
phonemes = []
|
||||
for token in tokens:
|
||||
for p, idx in self.vocab.items():
|
||||
if idx == token:
|
||||
phonemes.append(p)
|
||||
break
|
||||
text = ''.join(phonemes)
|
||||
logger.debug(f"Decoded tokens to text: '{text[:100]}...'")
|
||||
|
||||
# Validate and reshape voice tensor
|
||||
logger.debug(f"Initial voice tensor shape: {voice_tensor.shape}")
|
||||
|
||||
# Ensure tensor has correct number of dimensions
|
||||
if voice_tensor.dim() == 1:
|
||||
voice_tensor = voice_tensor.unsqueeze(0) # [N] -> [1, N]
|
||||
if voice_tensor.dim() == 2:
|
||||
voice_tensor = voice_tensor.unsqueeze(1) # [B, N] -> [B, 1, N]
|
||||
logger.debug(f"After reshape voice tensor shape: {voice_tensor.shape}")
|
||||
|
||||
# Validate feature dimension
|
||||
if voice_tensor.size(-1) != 256: # Expected size for style + content
|
||||
raise RuntimeError(f"Voice tensor has wrong feature size: expected 256, got {voice_tensor.size(-1)}")
|
||||
|
||||
try:
|
||||
# Generate audio directly using KModel
|
||||
audio = self.model.forward(
|
||||
phonemes=text,
|
||||
ref_s=voice_tensor,
|
||||
speed=speed
|
||||
)
|
||||
logger.debug(f"Generated audio tensor shape: {audio.shape}")
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Model forward pass failed: {e}. Input shapes: voice={voice_tensor.shape}, text_len={len(text)}")
|
||||
|
||||
# Convert to numpy if needed
|
||||
if isinstance(audio, torch.Tensor):
|
||||
audio = audio.cpu().numpy()
|
||||
|
||||
return audio
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in KokoroV1Wrapper.generate: {str(e)}")
|
||||
raise RuntimeError(f"Failed to generate audio: {str(e)}")
|
||||
|
||||
|
||||
async def build_model(config_path: str = None, model_path: str = None) -> KokoroV1Wrapper:
|
||||
"""Build a v1.0 model instance.
|
||||
|
||||
Args:
|
||||
config_path: Optional path to config.json. If None, uses default in builds/v1_0/
|
||||
model_path: Optional path to model weights. If None, uses default in models/v1_0/
|
||||
|
||||
Returns:
|
||||
Initialized KokoroV1Wrapper instance
|
||||
|
||||
Raises:
|
||||
RuntimeError: If model initialization fails
|
||||
"""
|
||||
try:
|
||||
if config_path is None:
|
||||
config_path = str(Path(__file__).parent / "config.json")
|
||||
if model_path is None:
|
||||
model_path = str(Path(__file__).parent.parent.parent / "models/v1_0/kokoro-v1_0.pth")
|
||||
|
||||
logger.info(f"Initializing KokoroV1Wrapper with:")
|
||||
logger.info(f" Config: {config_path}")
|
||||
logger.info(f" Model: {model_path}")
|
||||
|
||||
if not Path(config_path).exists():
|
||||
raise RuntimeError(f"Config file not found: {config_path}")
|
||||
if not Path(model_path).exists():
|
||||
raise RuntimeError(f"Model file not found: {model_path}")
|
||||
|
||||
wrapper = KokoroV1Wrapper(
|
||||
config_path=config_path,
|
||||
model_path=model_path
|
||||
)
|
||||
logger.info("Successfully initialized KokoroV1Wrapper")
|
||||
return wrapper
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to initialize KokoroV1Wrapper: {str(e)}")
|
||||
raise RuntimeError(f"Failed to initialize v1.0 model: {str(e)}")
|
|
@ -12,7 +12,7 @@ class Settings(BaseSettings):
|
|||
# Application Settings
|
||||
output_dir: str = "output"
|
||||
output_dir_size_limit_mb: float = 500.0 # Maximum size of output directory in MB
|
||||
default_voice: str = "af"
|
||||
default_voice: str = "af_aoede" # Default to v1.0 voice
|
||||
use_gpu: bool = True # Whether to use GPU acceleration if available
|
||||
use_onnx: bool = False # Whether to use ONNX runtime
|
||||
allow_local_voice_saving: bool = False # Whether to allow saving combined voices locally
|
||||
|
|
|
@ -97,8 +97,12 @@ async def get_model_path(model_name: str) -> str:
|
|||
# Ensure model directory exists
|
||||
os.makedirs(model_dir, exist_ok=True)
|
||||
|
||||
# Search in model directory
|
||||
search_paths = [model_dir]
|
||||
# Search in model directory and version subdirectories
|
||||
search_paths = [
|
||||
model_dir,
|
||||
os.path.join(model_dir, "v1_0"),
|
||||
os.path.join(model_dir, "v0_19")
|
||||
]
|
||||
logger.debug(f"Searching for model in path: {model_dir}")
|
||||
|
||||
return await _find_file(model_name, search_paths)
|
||||
|
@ -178,8 +182,7 @@ async def load_voice_tensor(voice_path: str, device: str = "cpu") -> torch.Tenso
|
|||
data = await f.read()
|
||||
return torch.load(
|
||||
io.BytesIO(data),
|
||||
map_location=device,
|
||||
weights_only=True
|
||||
map_location=device
|
||||
)
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Failed to load voice tensor from {voice_path}: {e}")
|
||||
|
@ -242,8 +245,7 @@ async def load_model_weights(path: str, device: str = "cpu") -> dict:
|
|||
data = await f.read()
|
||||
return torch.load(
|
||||
io.BytesIO(data),
|
||||
map_location=device,
|
||||
weights_only=True
|
||||
map_location=device
|
||||
)
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Failed to load model weights from {path}: {e}")
|
||||
|
|
|
@ -9,7 +9,7 @@ import numpy as np
|
|||
import torch
|
||||
from loguru import logger
|
||||
|
||||
from ..builds.models import build_model
|
||||
from ..builds.v0_19.models import build_model
|
||||
from ..core import paths
|
||||
from ..core.model_config import model_config
|
||||
from ..core.config import settings
|
||||
|
|
143
api/src/inference/version_manager.py
Normal file
143
api/src/inference/version_manager.py
Normal file
|
@ -0,0 +1,143 @@
|
|||
"""Version-aware model management."""
|
||||
|
||||
import asyncio
|
||||
from pathlib import Path
|
||||
from typing import Dict, Optional, Union
|
||||
|
||||
import torch
|
||||
from loguru import logger
|
||||
|
||||
from ..builds.v0_19.models import build_model as build_v0_19
|
||||
from ..builds.v1_0.models import build_model as build_v1
|
||||
from ..core.config import settings
|
||||
|
||||
|
||||
# Global singleton instance and lock for thread-safe initialization
|
||||
_manager_instance = None
|
||||
_manager_lock = asyncio.Lock()
|
||||
|
||||
|
||||
class VersionManager:
|
||||
"""Manages different versions of Kokoro models."""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize version manager."""
|
||||
self.models: Dict[str, Union[dict, object]] = {}
|
||||
self._version_locks: Dict[str, asyncio.Lock] = {
|
||||
"v0.19": asyncio.Lock(),
|
||||
"v1.0": asyncio.Lock()
|
||||
}
|
||||
self._current_version = "v1.0" # Default to v1.0 with af_bella voice
|
||||
|
||||
async def get_model(self, version: Optional[str] = None) -> object:
|
||||
"""Get model for specified version.
|
||||
|
||||
Args:
|
||||
version: Model version ("v0.19" or "v1.0"). Uses default if None.
|
||||
|
||||
Returns:
|
||||
Model instance
|
||||
|
||||
Raises:
|
||||
ValueError: If version is invalid
|
||||
RuntimeError: If model loading fails
|
||||
"""
|
||||
if version is None:
|
||||
version = self._current_version
|
||||
|
||||
if version not in self._version_locks:
|
||||
raise ValueError(
|
||||
f"Invalid version: {version}. "
|
||||
f"Available versions: {', '.join(self._version_locks.keys())}"
|
||||
)
|
||||
|
||||
# Fast path - return existing model
|
||||
if version in self.models and self.models[version] is not None:
|
||||
return self.models[version]
|
||||
|
||||
# Slow path - load model with lock
|
||||
async with self._version_locks[version]:
|
||||
# Double-check pattern
|
||||
if version not in self.models or self.models[version] is None:
|
||||
try:
|
||||
if version == "v0.19":
|
||||
# Use existing model path logic for v0.19
|
||||
from ..core.model_config import model_config
|
||||
model_file = (model_config.onnx_model_file
|
||||
if settings.use_onnx
|
||||
else model_config.pytorch_model_file)
|
||||
from ..core.paths import get_model_path
|
||||
model_path = await get_model_path(model_file)
|
||||
self.models[version] = await build_v0_19(
|
||||
path=model_path,
|
||||
device="cuda" if settings.use_gpu else "cpu"
|
||||
)
|
||||
else: # v1.0
|
||||
# Use paths module for v1.0 model loading
|
||||
from ..core.paths import get_model_path
|
||||
model_path = await get_model_path("kokoro-v1_0.pth")
|
||||
self.models[version] = await build_v1(
|
||||
path=model_path,
|
||||
device="cuda" if settings.use_gpu else "cpu"
|
||||
)
|
||||
logger.info(f"Loaded {version} model")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to load {version} model: {e}")
|
||||
raise RuntimeError(f"Failed to load {version} model: {e}")
|
||||
|
||||
return self.models[version]
|
||||
|
||||
def set_default_version(self, version: str) -> None:
|
||||
"""Set default model version.
|
||||
|
||||
Args:
|
||||
version: Version to set as default ("v0.19" or "v1.0")
|
||||
|
||||
Raises:
|
||||
ValueError: If version is invalid
|
||||
"""
|
||||
if version not in self._version_locks:
|
||||
raise ValueError(
|
||||
f"Invalid version: {version}. "
|
||||
f"Available versions: {', '.join(self._version_locks.keys())}"
|
||||
)
|
||||
self._current_version = version
|
||||
logger.info(f"Set default version to {version}")
|
||||
|
||||
@property
|
||||
def current_version(self) -> str:
|
||||
"""Get current default version."""
|
||||
return self._current_version
|
||||
|
||||
@property
|
||||
def available_versions(self) -> list[str]:
|
||||
"""Get list of available versions."""
|
||||
return list(self._version_locks.keys())
|
||||
|
||||
def unload_all(self) -> None:
|
||||
"""Unload all model versions."""
|
||||
self.models.clear()
|
||||
logger.info("Unloaded all model versions")
|
||||
|
||||
|
||||
async def get_version_manager() -> VersionManager:
|
||||
"""Get global version manager instance.
|
||||
|
||||
Returns:
|
||||
VersionManager instance
|
||||
|
||||
Thread Safety:
|
||||
This function is thread-safe
|
||||
"""
|
||||
global _manager_instance
|
||||
|
||||
# Fast path - return existing instance
|
||||
if _manager_instance is not None:
|
||||
return _manager_instance
|
||||
|
||||
# Slow path - create new instance with lock
|
||||
async with _manager_lock:
|
||||
# Double-check pattern
|
||||
if _manager_instance is None:
|
||||
_manager_instance = VersionManager()
|
||||
return _manager_instance
|
|
@ -23,20 +23,33 @@ class VoiceManager:
|
|||
self._config = config or VoiceConfig()
|
||||
self._voice_cache: Dict[str, torch.Tensor] = {}
|
||||
|
||||
def get_voice_path(self, voice_name: str) -> Optional[str]:
|
||||
def get_voice_path(self, voice_name: str, version: Optional[str] = None) -> Optional[str]:
|
||||
"""Get path to voice file.
|
||||
|
||||
Args:
|
||||
voice_name: Name of voice
|
||||
version: Optional version to filter by ("v0.19" or "v1.0")
|
||||
|
||||
Returns:
|
||||
Path to voice file if exists, None otherwise
|
||||
"""
|
||||
api_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
|
||||
voice_path = os.path.join(api_dir, settings.voices_dir, f"{voice_name}.pt")
|
||||
return voice_path if os.path.exists(voice_path) else None
|
||||
|
||||
if version == "v1.0" or version is None:
|
||||
# Check v1.0 voices
|
||||
voice_path = os.path.join(api_dir, settings.voices_dir, "v1_0", f"{voice_name}.pt")
|
||||
if os.path.exists(voice_path):
|
||||
return voice_path
|
||||
|
||||
if version == "v0.19" or version is None:
|
||||
# Check v0.19 voices
|
||||
voice_path = os.path.join(api_dir, settings.voices_dir, "v0_19", f"{voice_name}.pt")
|
||||
if os.path.exists(voice_path):
|
||||
return voice_path
|
||||
|
||||
return None
|
||||
|
||||
async def load_voice(self, voice_name: str, device: str = "cpu") -> torch.Tensor:
|
||||
async def load_voice(self, voice_name: str, device: str = "cpu", version: Optional[str] = None) -> torch.Tensor:
|
||||
"""Load voice tensor.
|
||||
|
||||
Args:
|
||||
|
@ -64,12 +77,35 @@ class VoiceManager:
|
|||
except Exception as e:
|
||||
raise RuntimeError(f"Failed to load base voice {voice}: {e}")
|
||||
|
||||
return torch.mean(torch.stack(voice_tensors), dim=0)
|
||||
# Find maximum size
|
||||
max_size = max(tensor.size(0) for tensor in voice_tensors)
|
||||
|
||||
# Pad tensors to match size
|
||||
padded_tensors = []
|
||||
for tensor in voice_tensors:
|
||||
if tensor.size(0) < max_size:
|
||||
padding = torch.zeros(max_size - tensor.size(0), *tensor.size()[1:], device=tensor.device)
|
||||
padded_tensor = torch.cat([tensor, padding], dim=0)
|
||||
padded_tensors.append(padded_tensor)
|
||||
else:
|
||||
padded_tensors.append(tensor)
|
||||
|
||||
# Stack and average
|
||||
combined = torch.mean(torch.stack(padded_tensors), dim=0)
|
||||
|
||||
# For v1.0, attach combined voice ID
|
||||
if version == "v1.0" or (version is None and any("v1_0" in str(t.voice_id) if hasattr(t, 'voice_id') else False for t in voice_tensors)):
|
||||
combined.voice_id = voice_name
|
||||
|
||||
return combined
|
||||
|
||||
# Handle single voice
|
||||
voice_path = self.get_voice_path(voice_name)
|
||||
# Handle single voice with version
|
||||
voice_path = self.get_voice_path(voice_name, version)
|
||||
if not voice_path:
|
||||
raise RuntimeError(f"Voice not found: {voice_name}")
|
||||
available_voices = await self.list_voices(version)
|
||||
raise RuntimeError(
|
||||
f"Voice not found: {voice_name}. Available voices for version {version or 'any'}: {', '.join(sorted(available_voices))}"
|
||||
)
|
||||
|
||||
# Check cache
|
||||
cache_key = f"{voice_path}_{device}"
|
||||
|
@ -79,6 +115,9 @@ class VoiceManager:
|
|||
# Load voice tensor
|
||||
try:
|
||||
voice = await paths.load_voice_tensor(voice_path, device=device)
|
||||
# Attach voice name as attribute for v1.0 models
|
||||
if version == "v1.0" or (version is None and "v1_0" in voice_path):
|
||||
voice.voice_id = voice_name
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Failed to load voice {voice_name}: {e}")
|
||||
|
||||
|
@ -99,7 +138,7 @@ class VoiceManager:
|
|||
torch.cuda.empty_cache() # Clean up GPU memory if needed
|
||||
logger.debug(f"Removed LRU voice from cache: {oldest}")
|
||||
|
||||
async def combine_voices(self, voices: List[str], device: str = "cpu") -> str:
|
||||
async def combine_voices(self, voices: List[str], device: str = "cpu", version: Optional[str] = None) -> str:
|
||||
"""Combine multiple voices into a new voice.
|
||||
|
||||
Args:
|
||||
|
@ -125,11 +164,20 @@ class VoiceManager:
|
|||
# Load and combine voices
|
||||
combined_tensor = await self.load_voice(combined_name, device)
|
||||
|
||||
# Save to disk
|
||||
# Save to disk in version-specific directory
|
||||
api_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
|
||||
voices_dir = os.path.join(api_dir, settings.voices_dir)
|
||||
os.makedirs(voices_dir, exist_ok=True)
|
||||
base_voices_dir = os.path.join(api_dir, settings.voices_dir)
|
||||
|
||||
# Determine version directory
|
||||
if version == "v1.0":
|
||||
voices_dir = os.path.join(base_voices_dir, "v1_0")
|
||||
elif version == "v0.19":
|
||||
voices_dir = os.path.join(base_voices_dir, "v0_19")
|
||||
else:
|
||||
# Default to v1.0 if no version specified
|
||||
voices_dir = os.path.join(base_voices_dir, "v1_0")
|
||||
|
||||
os.makedirs(voices_dir, exist_ok=True)
|
||||
combined_path = os.path.join(voices_dir, f"{combined_name}.pt")
|
||||
try:
|
||||
torch.save(combined_tensor, combined_path)
|
||||
|
@ -144,22 +192,35 @@ class VoiceManager:
|
|||
|
||||
return combined_name
|
||||
|
||||
async def list_voices(self) -> List[str]:
|
||||
async def list_voices(self, version: Optional[str] = None) -> List[str]:
|
||||
"""List available voices.
|
||||
|
||||
Args:
|
||||
version: Optional version to filter by ("v0.19" or "v1.0")
|
||||
|
||||
Returns:
|
||||
List of voice names
|
||||
"""
|
||||
voices = set() # Use set to avoid duplicates
|
||||
try:
|
||||
# Get voices from disk
|
||||
api_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
|
||||
voices_dir = os.path.join(api_dir, settings.voices_dir)
|
||||
os.makedirs(voices_dir, exist_ok=True)
|
||||
base_voices_dir = os.path.join(api_dir, settings.voices_dir)
|
||||
|
||||
for entry in os.listdir(voices_dir):
|
||||
if entry.endswith(".pt"):
|
||||
voices.add(entry[:-3])
|
||||
if version == "v1.0" or version is None:
|
||||
# Check v1.0 voices
|
||||
v1_voices_dir = os.path.join(base_voices_dir, "v1_0")
|
||||
os.makedirs(v1_voices_dir, exist_ok=True)
|
||||
for entry in os.listdir(v1_voices_dir):
|
||||
if entry.endswith(".pt"):
|
||||
voices.add(entry[:-3])
|
||||
|
||||
if version == "v0.19" or version is None:
|
||||
# Check v0.19 voices
|
||||
v0_voices_dir = os.path.join(base_voices_dir, "v0_19")
|
||||
os.makedirs(v0_voices_dir, exist_ok=True)
|
||||
for entry in os.listdir(v0_voices_dir):
|
||||
if entry.endswith(".pt"):
|
||||
voices.add(entry[:-3])
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error listing voices: {e}")
|
||||
|
|
|
@ -17,6 +17,7 @@ from .core.config import settings
|
|||
from .routers.web_player import router as web_router
|
||||
from .routers.development import router as dev_router
|
||||
from .routers.openai_compatible import router as openai_router
|
||||
from .routers.openai_compatible_v2 import router as openai_router_v2
|
||||
from .routers.debug import router as debug_router
|
||||
|
||||
|
||||
|
@ -46,6 +47,7 @@ setup_logger()
|
|||
async def lifespan(app: FastAPI):
|
||||
"""Lifespan context manager for model initialization"""
|
||||
from .inference.model_manager import get_manager
|
||||
from .inference.version_manager import get_version_manager
|
||||
from .inference.voice_manager import get_manager as get_voice_manager
|
||||
from .services.temp_manager import cleanup_temp_files
|
||||
|
||||
|
@ -56,11 +58,17 @@ async def lifespan(app: FastAPI):
|
|||
|
||||
try:
|
||||
# Initialize managers globally
|
||||
model_manager = await get_manager()
|
||||
model_manager = await get_manager() # For v0.19 compatibility
|
||||
version_manager = await get_version_manager() # For v1.0 support
|
||||
voice_manager = await get_voice_manager()
|
||||
|
||||
# Initialize model with warmup and get status
|
||||
# Initialize v0.19 model with warmup and get status
|
||||
device, model, voicepack_count = await model_manager.initialize_with_warmup(voice_manager)
|
||||
|
||||
# Pre-load v1.0 model
|
||||
v1_model = await version_manager.get_model("v1.0")
|
||||
logger.info("Initialized v1.0 model")
|
||||
|
||||
except FileNotFoundError:
|
||||
logger.error("""
|
||||
Model files not found! You need to either:
|
||||
|
@ -94,6 +102,7 @@ Model files not found! You need to either:
|
|||
"""
|
||||
startup_msg += f"\nModel warmed up on {device}: {model}"
|
||||
startup_msg += f"\n{voicepack_count} voice packs loaded"
|
||||
startup_msg += f"\nKokoro v1.0 model loaded"
|
||||
|
||||
# Add web player info if enabled
|
||||
if settings.enable_web_player:
|
||||
|
@ -127,7 +136,8 @@ if settings.cors_enabled:
|
|||
)
|
||||
|
||||
# Include routers
|
||||
app.include_router(openai_router, prefix="/v1")
|
||||
app.include_router(openai_router, prefix="/v1") # Legacy v0.19 endpoints
|
||||
app.include_router(openai_router_v2) # New v2 endpoints with version support
|
||||
app.include_router(dev_router) # Development endpoints
|
||||
app.include_router(debug_router) # Debug endpoints
|
||||
if settings.enable_web_player:
|
||||
|
|
395
api/src/routers/openai_compatible_v2.py
Normal file
395
api/src/routers/openai_compatible_v2.py
Normal file
|
@ -0,0 +1,395 @@
|
|||
"""OpenAI-compatible router with version support"""
|
||||
|
||||
import json
|
||||
import os
|
||||
from typing import AsyncGenerator, Dict, List, Optional, Union
|
||||
|
||||
from fastapi import APIRouter, Depends, Header, HTTPException, Request, Response
|
||||
from fastapi.responses import StreamingResponse, FileResponse
|
||||
from loguru import logger
|
||||
from pydantic import BaseModel
|
||||
|
||||
from ..services.audio import AudioService
|
||||
from ..services.tts_service_v2 import TTSService
|
||||
from ..structures.schemas import OpenAISpeechRequest
|
||||
from ..core.config import settings
|
||||
|
||||
|
||||
class OpenAISpeechRequestV2(OpenAISpeechRequest):
|
||||
"""Extended OpenAI speech request with version support."""
|
||||
version: Optional[str] = None # "v0.19" or "v1.0"
|
||||
|
||||
|
||||
def load_openai_mappings() -> Dict:
|
||||
"""Load OpenAI voice and model mappings from JSON"""
|
||||
api_dir = os.path.dirname(os.path.dirname(__file__))
|
||||
mapping_path = os.path.join(api_dir, "core", "openai_mappings.json")
|
||||
try:
|
||||
with open(mapping_path, 'r') as f:
|
||||
return json.load(f)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to load OpenAI mappings: {e}")
|
||||
return {"models": {}, "voices": {}}
|
||||
|
||||
|
||||
# Global mappings
|
||||
_openai_mappings = load_openai_mappings()
|
||||
|
||||
|
||||
router = APIRouter(
|
||||
tags=["OpenAI Compatible TTS V2"],
|
||||
responses={404: {"description": "Not found"}},
|
||||
)
|
||||
|
||||
# Global TTSService instance with lock
|
||||
_tts_service = None
|
||||
_init_lock = None
|
||||
|
||||
|
||||
async def get_tts_service() -> TTSService:
|
||||
"""Get global TTSService instance"""
|
||||
global _tts_service, _init_lock
|
||||
|
||||
if _init_lock is None:
|
||||
import asyncio
|
||||
_init_lock = asyncio.Lock()
|
||||
|
||||
if _tts_service is None:
|
||||
async with _init_lock:
|
||||
if _tts_service is None:
|
||||
_tts_service = await TTSService.create()
|
||||
logger.info("Created global TTSService V2 instance")
|
||||
|
||||
return _tts_service
|
||||
|
||||
|
||||
async def process_voices(
|
||||
voice_input: Union[str, List[str]],
|
||||
tts_service: TTSService,
|
||||
version: Optional[str] = None
|
||||
) -> str:
|
||||
"""Process voice input into a combined voice, handling both string and list formats"""
|
||||
if isinstance(voice_input, str):
|
||||
mapped_voice = _openai_mappings["voices"].get(voice_input)
|
||||
if mapped_voice:
|
||||
voice_input = mapped_voice
|
||||
voices = [v.strip() for v in voice_input.split("+") if v.strip()]
|
||||
else:
|
||||
voices = [_openai_mappings["voices"].get(v, v) for v in voice_input]
|
||||
voices = [v.strip() for v in voices if v.strip()]
|
||||
|
||||
if not voices:
|
||||
raise ValueError("No voices provided")
|
||||
|
||||
if len(voices) == 1:
|
||||
available_voices = await tts_service.list_voices(version)
|
||||
if voices[0] not in available_voices:
|
||||
raise ValueError(
|
||||
f"Voice '{voices[0]}' not found for version {version or 'any'}. "
|
||||
f"Available voices: {', '.join(sorted(available_voices))}"
|
||||
)
|
||||
return voices[0]
|
||||
|
||||
# For voice combinations, validate all voices exist for the specified version
|
||||
available_voices = await tts_service.list_voices(version)
|
||||
for voice in voices:
|
||||
if voice not in available_voices:
|
||||
raise ValueError(
|
||||
f"Base voice '{voice}' not found for version {version or 'any'}. "
|
||||
f"Available voices: {', '.join(sorted(available_voices))}"
|
||||
)
|
||||
|
||||
return await tts_service.combine_voices(voices=voices, version=version)
|
||||
|
||||
|
||||
async def stream_audio_chunks(
|
||||
tts_service: TTSService,
|
||||
request: OpenAISpeechRequestV2,
|
||||
client_request: Request
|
||||
) -> AsyncGenerator[bytes, None]:
|
||||
"""Stream audio chunks as they're generated with client disconnect handling"""
|
||||
voice_to_use = await process_voices(request.voice, tts_service, request.version)
|
||||
|
||||
try:
|
||||
async for chunk in tts_service.generate_audio_stream(
|
||||
text=request.input,
|
||||
voice=voice_to_use,
|
||||
speed=request.speed,
|
||||
version=request.version,
|
||||
output_format=request.response_format,
|
||||
):
|
||||
is_disconnected = client_request.is_disconnected
|
||||
if callable(is_disconnected):
|
||||
is_disconnected = await is_disconnected()
|
||||
if is_disconnected:
|
||||
logger.info("Client disconnected, stopping audio generation")
|
||||
break
|
||||
yield chunk
|
||||
except Exception as e:
|
||||
logger.error(f"Error in audio streaming: {str(e)}")
|
||||
raise
|
||||
|
||||
|
||||
@router.post("/v2/audio/speech")
|
||||
async def create_speech(
|
||||
request: OpenAISpeechRequestV2,
|
||||
client_request: Request,
|
||||
x_raw_response: str = Header(None, alias="x-raw-response"),
|
||||
):
|
||||
"""OpenAI-compatible endpoint for text-to-speech with version support"""
|
||||
if request.model not in _openai_mappings["models"]:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail={
|
||||
"error": "invalid_model",
|
||||
"message": f"Unsupported model: {request.model}",
|
||||
"type": "invalid_request_error"
|
||||
}
|
||||
)
|
||||
|
||||
try:
|
||||
tts_service = await get_tts_service()
|
||||
voice_to_use = await process_voices(request.voice, tts_service, request.version)
|
||||
|
||||
content_type = {
|
||||
"mp3": "audio/mpeg",
|
||||
"opus": "audio/opus",
|
||||
"aac": "audio/aac",
|
||||
"flac": "audio/flac",
|
||||
"wav": "audio/wav",
|
||||
"pcm": "audio/pcm",
|
||||
}.get(request.response_format, f"audio/{request.response_format}")
|
||||
|
||||
if request.stream:
|
||||
generator = stream_audio_chunks(tts_service, request, client_request)
|
||||
|
||||
if request.return_download_link:
|
||||
from ..services.temp_manager import TempFileWriter
|
||||
|
||||
temp_writer = TempFileWriter(request.response_format)
|
||||
await temp_writer.__aenter__()
|
||||
|
||||
download_path = temp_writer.download_path
|
||||
|
||||
headers = {
|
||||
"Content-Disposition": f"attachment; filename=speech.{request.response_format}",
|
||||
"X-Accel-Buffering": "no",
|
||||
"Cache-Control": "no-cache",
|
||||
"Transfer-Encoding": "chunked",
|
||||
"X-Download-Path": download_path
|
||||
}
|
||||
|
||||
async def dual_output():
|
||||
try:
|
||||
async for chunk in generator:
|
||||
if chunk:
|
||||
await temp_writer.write(chunk)
|
||||
yield chunk
|
||||
|
||||
await temp_writer.finalize()
|
||||
except Exception as e:
|
||||
logger.error(f"Error in dual output streaming: {e}")
|
||||
await temp_writer.__aexit__(type(e), e, e.__traceback__)
|
||||
raise
|
||||
finally:
|
||||
if not temp_writer._finalized:
|
||||
await temp_writer.__aexit__(None, None, None)
|
||||
|
||||
return StreamingResponse(
|
||||
dual_output(),
|
||||
media_type=content_type,
|
||||
headers=headers
|
||||
)
|
||||
|
||||
return StreamingResponse(
|
||||
generator,
|
||||
media_type=content_type,
|
||||
headers={
|
||||
"Content-Disposition": f"attachment; filename=speech.{request.response_format}",
|
||||
"X-Accel-Buffering": "no",
|
||||
"Cache-Control": "no-cache",
|
||||
"Transfer-Encoding": "chunked"
|
||||
}
|
||||
)
|
||||
else:
|
||||
audio, _ = await tts_service.generate_audio(
|
||||
text=request.input,
|
||||
voice=voice_to_use,
|
||||
speed=request.speed,
|
||||
version=request.version
|
||||
)
|
||||
|
||||
content = await AudioService.convert_audio(
|
||||
audio, 24000, request.response_format,
|
||||
is_first_chunk=True,
|
||||
is_last_chunk=True
|
||||
)
|
||||
|
||||
return Response(
|
||||
content=content,
|
||||
media_type=content_type,
|
||||
headers={
|
||||
"Content-Disposition": f"attachment; filename=speech.{request.response_format}",
|
||||
"Cache-Control": "no-cache",
|
||||
},
|
||||
)
|
||||
|
||||
except ValueError as e:
|
||||
logger.warning(f"Invalid request: {str(e)}")
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail={
|
||||
"error": "validation_error",
|
||||
"message": str(e),
|
||||
"type": "invalid_request_error"
|
||||
}
|
||||
)
|
||||
except RuntimeError as e:
|
||||
logger.error(f"Processing error: {str(e)}")
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail={
|
||||
"error": "processing_error",
|
||||
"message": str(e),
|
||||
"type": "server_error"
|
||||
}
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error in speech generation: {str(e)}")
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail={
|
||||
"error": "processing_error",
|
||||
"message": str(e),
|
||||
"type": "server_error"
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@router.get("/v2/audio/versions")
|
||||
async def list_versions():
|
||||
"""List available model versions"""
|
||||
try:
|
||||
tts_service = await get_tts_service()
|
||||
return {
|
||||
"versions": tts_service.available_versions,
|
||||
"current": tts_service.current_version
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Error listing versions: {str(e)}")
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail={
|
||||
"error": "server_error",
|
||||
"message": "Failed to retrieve version list",
|
||||
"type": "server_error"
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@router.post("/v2/audio/version")
|
||||
async def set_version(version: str):
|
||||
"""Set default model version"""
|
||||
try:
|
||||
tts_service = await get_tts_service()
|
||||
tts_service.set_version(version)
|
||||
return {
|
||||
"message": f"Set default version to {version}",
|
||||
"versions": tts_service.available_versions,
|
||||
"current": tts_service.current_version
|
||||
}
|
||||
except ValueError as e:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail={
|
||||
"error": "validation_error",
|
||||
"message": str(e),
|
||||
"type": "invalid_request_error"
|
||||
}
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error setting version: {str(e)}")
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail={
|
||||
"error": "server_error",
|
||||
"message": "Failed to set version",
|
||||
"type": "server_error"
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@router.get("/v2/audio/voices")
|
||||
async def list_voices(version: Optional[str] = None):
|
||||
"""List all available voices for text-to-speech.
|
||||
|
||||
Args:
|
||||
version: Optional version to filter voices by ("v0.19" or "v1.0")
|
||||
"""
|
||||
try:
|
||||
tts_service = await get_tts_service()
|
||||
voices = await tts_service.list_voices(version)
|
||||
return {
|
||||
"voices": voices,
|
||||
"version": version or tts_service.current_version
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Error listing voices: {str(e)}")
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail={
|
||||
"error": "server_error",
|
||||
"message": "Failed to retrieve voice list",
|
||||
"type": "server_error"
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@router.post("/v2/audio/voices/combine")
|
||||
async def combine_voices(request: Union[str, List[str]], version: Optional[str] = None):
|
||||
"""Combine multiple voices into a new voice.
|
||||
|
||||
Args:
|
||||
request: Either a string with voices separated by + or a list of voice names
|
||||
version: Optional version to filter voices by ("v0.19" or "v1.0")
|
||||
"""
|
||||
try:
|
||||
tts_service = await get_tts_service()
|
||||
combined_voice = await process_voices(request, tts_service, version)
|
||||
voices = await tts_service.list_voices(version)
|
||||
return {
|
||||
"voices": voices,
|
||||
"voice": combined_voice,
|
||||
"version": version or tts_service.current_version
|
||||
}
|
||||
|
||||
except ValueError as e:
|
||||
logger.warning(f"Invalid voice combination request: {str(e)}")
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail={
|
||||
"error": "validation_error",
|
||||
"message": str(e),
|
||||
"type": "invalid_request_error"
|
||||
}
|
||||
)
|
||||
except RuntimeError as e:
|
||||
logger.error(f"Voice combination processing error: {str(e)}")
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail={
|
||||
"error": "processing_error",
|
||||
"message": "Failed to process voice combination request",
|
||||
"type": "server_error"
|
||||
}
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error in voice combination: {str(e)}")
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail={
|
||||
"error": "server_error",
|
||||
"message": "An unexpected error occurred",
|
||||
"type": "server_error"
|
||||
}
|
||||
)
|
294
api/src/services/tts_service_v2.py
Normal file
294
api/src/services/tts_service_v2.py
Normal file
|
@ -0,0 +1,294 @@
|
|||
"""TTS service with version support."""
|
||||
|
||||
import time
|
||||
from typing import List, Tuple, Optional, AsyncGenerator, Union
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from loguru import logger
|
||||
|
||||
from ..core.config import settings
|
||||
from ..inference.version_manager import get_version_manager
|
||||
from ..inference.voice_manager import get_manager as get_voice_manager
|
||||
from .audio import AudioNormalizer, AudioService
|
||||
from .text_processing.text_processor import process_text_chunk, smart_split
|
||||
from .text_processing import tokenize
|
||||
|
||||
|
||||
class TTSService:
|
||||
"""Text-to-speech service with version support."""
|
||||
|
||||
def __init__(self, output_dir: str = None):
|
||||
"""Initialize service."""
|
||||
self.output_dir = output_dir
|
||||
self.version_manager = None
|
||||
self._voice_manager = None
|
||||
|
||||
@classmethod
|
||||
async def create(cls, output_dir: str = None) -> 'TTSService':
|
||||
"""Create and initialize TTSService instance."""
|
||||
service = cls(output_dir)
|
||||
service.version_manager = await get_version_manager()
|
||||
service._voice_manager = await get_voice_manager()
|
||||
return service
|
||||
|
||||
async def _process_chunk(
|
||||
self,
|
||||
tokens: List[int],
|
||||
voice_tensor: torch.Tensor,
|
||||
speed: float,
|
||||
version: Optional[str] = None,
|
||||
output_format: Optional[str] = None,
|
||||
is_first: bool = False,
|
||||
is_last: bool = False,
|
||||
normalizer: Optional[AudioNormalizer] = None,
|
||||
) -> AsyncGenerator[Union[np.ndarray, bytes], None]:
|
||||
"""Process tokens into audio."""
|
||||
try:
|
||||
# Handle stream finalization
|
||||
if is_last:
|
||||
if not output_format:
|
||||
yield np.array([], dtype=np.float32)
|
||||
return
|
||||
|
||||
final_chunk = await AudioService.convert_audio(
|
||||
np.array([0], dtype=np.float32),
|
||||
24000,
|
||||
output_format,
|
||||
is_first_chunk=False,
|
||||
normalizer=normalizer,
|
||||
is_last_chunk=True
|
||||
)
|
||||
if final_chunk is not None:
|
||||
yield final_chunk
|
||||
return
|
||||
|
||||
# Skip empty chunks
|
||||
if not tokens:
|
||||
return
|
||||
|
||||
# Get model for specified version
|
||||
model = await self.version_manager.get_model(version)
|
||||
|
||||
if version == "v1.0":
|
||||
# For v1.0, we need to handle the generator
|
||||
try:
|
||||
# Split long sequences to avoid index out of bounds
|
||||
max_length = 500 # v1.0 model context limit
|
||||
if len(tokens) > max_length:
|
||||
logger.warning(f"Truncating sequence from {len(tokens)} to {max_length} tokens")
|
||||
tokens = tokens[:max_length]
|
||||
|
||||
# Process all chunks from the generator
|
||||
async for audio in model.forward(tokens, voice_tensor, speed=speed):
|
||||
if audio is None:
|
||||
continue
|
||||
|
||||
# Convert tensor to numpy if needed
|
||||
if isinstance(audio, torch.Tensor):
|
||||
audio = audio.cpu().numpy()
|
||||
|
||||
# Convert audio if needed
|
||||
if output_format:
|
||||
converted = await AudioService.convert_audio(
|
||||
audio,
|
||||
24000,
|
||||
output_format,
|
||||
is_first_chunk=is_first,
|
||||
normalizer=normalizer,
|
||||
is_last_chunk=is_last
|
||||
)
|
||||
if converted is not None:
|
||||
yield converted
|
||||
else:
|
||||
yield audio
|
||||
except Exception as e:
|
||||
logger.error(f"Generation failed: {str(e)}")
|
||||
return
|
||||
else:
|
||||
# For v0.19, use existing generate method
|
||||
audio = await model.generate(tokens, voice_tensor, speed=speed)
|
||||
|
||||
if audio is None:
|
||||
logger.error("Model generated None for audio chunk")
|
||||
return
|
||||
|
||||
if len(audio) == 0:
|
||||
logger.error("Model generated empty audio chunk")
|
||||
return
|
||||
|
||||
# Convert audio if needed
|
||||
if output_format:
|
||||
try:
|
||||
converted = await AudioService.convert_audio(
|
||||
audio,
|
||||
24000,
|
||||
output_format,
|
||||
is_first_chunk=is_first,
|
||||
normalizer=normalizer,
|
||||
is_last_chunk=is_last
|
||||
)
|
||||
if converted is not None:
|
||||
yield converted
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to convert audio: {str(e)}")
|
||||
return
|
||||
else:
|
||||
yield audio
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to process tokens: {str(e)}")
|
||||
return
|
||||
|
||||
async def generate_audio_stream(
|
||||
self,
|
||||
text: str,
|
||||
voice: str,
|
||||
speed: float = 1.0,
|
||||
version: Optional[str] = None,
|
||||
output_format: str = "wav",
|
||||
) -> AsyncGenerator[bytes, None]:
|
||||
"""Generate and stream audio chunks."""
|
||||
stream_normalizer = AudioNormalizer()
|
||||
voice_tensor = None
|
||||
chunk_index = 0
|
||||
|
||||
try:
|
||||
# Get model and load voice
|
||||
model = await self.version_manager.get_model(version)
|
||||
device = "cuda" if settings.use_gpu else "cpu"
|
||||
voice_tensor = await self._voice_manager.load_voice(voice, device=device, version=version)
|
||||
|
||||
# Process text in chunks with smart splitting
|
||||
async for chunk_text, tokens in smart_split(text):
|
||||
try:
|
||||
# Process audio for chunk
|
||||
async for result in self._process_chunk(
|
||||
tokens,
|
||||
voice_tensor,
|
||||
speed,
|
||||
version=version,
|
||||
output_format=output_format,
|
||||
is_first=(chunk_index == 0),
|
||||
is_last=False,
|
||||
normalizer=stream_normalizer
|
||||
):
|
||||
if result is not None:
|
||||
yield result
|
||||
chunk_index += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to process audio for chunk: '{chunk_text[:100]}...'. Error: {str(e)}")
|
||||
continue
|
||||
|
||||
# Only finalize if we successfully processed at least one chunk
|
||||
if chunk_index > 0:
|
||||
try:
|
||||
async for final_result in self._process_chunk(
|
||||
[],
|
||||
voice_tensor,
|
||||
speed,
|
||||
version=version,
|
||||
output_format=output_format,
|
||||
is_first=False,
|
||||
is_last=True,
|
||||
normalizer=stream_normalizer
|
||||
):
|
||||
if final_result is not None:
|
||||
logger.debug("Yielding final chunk to finalize audio")
|
||||
yield final_result
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to process final chunk: {str(e)}")
|
||||
else:
|
||||
logger.warning("No audio chunks were successfully processed")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in audio generation stream: {str(e)}")
|
||||
raise
|
||||
finally:
|
||||
if voice_tensor is not None:
|
||||
del voice_tensor
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
async def generate_audio(
|
||||
self,
|
||||
text: str,
|
||||
voice: str,
|
||||
speed: float = 1.0,
|
||||
version: Optional[str] = None,
|
||||
) -> Tuple[np.ndarray, float]:
|
||||
"""Generate complete audio for text using streaming internally."""
|
||||
start_time = time.time()
|
||||
chunks = []
|
||||
voice_tensor = None
|
||||
|
||||
try:
|
||||
# Get model and load voice
|
||||
model = await self.version_manager.get_model(version)
|
||||
device = "cuda" if settings.use_gpu else "cpu"
|
||||
voice_tensor = await self._voice_manager.load_voice(voice, device=device, version=version)
|
||||
|
||||
if version == "v1.0":
|
||||
# For v1.0, use streaming internally
|
||||
async for chunk in self.generate_audio_stream(
|
||||
text, voice, speed, version, output_format=None
|
||||
):
|
||||
if chunk is not None and isinstance(chunk, np.ndarray):
|
||||
chunks.append(chunk)
|
||||
else:
|
||||
# For v0.19, use direct generation
|
||||
async for chunk_text, tokens in smart_split(text):
|
||||
try:
|
||||
audio = await model.generate(tokens, voice_tensor, speed=speed)
|
||||
if audio is not None:
|
||||
chunks.append(audio)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to generate audio for chunk: '{chunk_text[:100]}...'. Error: {str(e)}")
|
||||
continue
|
||||
|
||||
if not chunks:
|
||||
raise ValueError("No audio chunks were generated successfully")
|
||||
|
||||
# Concatenate chunks
|
||||
audio = np.concatenate(chunks) if len(chunks) > 1 else chunks[0]
|
||||
processing_time = time.time() - start_time
|
||||
return audio, processing_time
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in audio generation: {str(e)}")
|
||||
raise
|
||||
finally:
|
||||
if voice_tensor is not None:
|
||||
del voice_tensor
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
async def combine_voices(self, voices: List[str], version: Optional[str] = None) -> str:
|
||||
"""Combine multiple voices.
|
||||
|
||||
Args:
|
||||
voices: List of voice names to combine
|
||||
version: Optional version to filter voices by
|
||||
"""
|
||||
return await self._voice_manager.combine_voices(voices, version=version)
|
||||
|
||||
async def list_voices(self, version: Optional[str] = None) -> List[str]:
|
||||
"""List available voices.
|
||||
|
||||
Args:
|
||||
version: Optional version to filter voices by
|
||||
"""
|
||||
return await self._voice_manager.list_voices(version)
|
||||
|
||||
def set_version(self, version: str) -> None:
|
||||
"""Set default model version."""
|
||||
self.version_manager.set_default_version(version)
|
||||
|
||||
@property
|
||||
def current_version(self) -> str:
|
||||
"""Get current model version."""
|
||||
return self.version_manager.current_version
|
||||
|
||||
@property
|
||||
def available_versions(self) -> List[str]:
|
||||
"""Get list of available model versions."""
|
||||
return self.version_manager.available_versions
|
BIN
api/src/voices/v0_19/af_jadzia.pt
Normal file
BIN
api/src/voices/v0_19/af_jadzia.pt
Normal file
Binary file not shown.
BIN
api/src/voices/v1_0/af_bella.pt
Normal file
BIN
api/src/voices/v1_0/af_bella.pt
Normal file
Binary file not shown.
BIN
api/src/voices/v1_0/af_heart.pt
Normal file
BIN
api/src/voices/v1_0/af_heart.pt
Normal file
Binary file not shown.
BIN
api/src/voices/v1_0/af_nicole.pt
Normal file
BIN
api/src/voices/v1_0/af_nicole.pt
Normal file
Binary file not shown.
BIN
api/src/voices/v1_0/af_sarah.pt
Normal file
BIN
api/src/voices/v1_0/af_sarah.pt
Normal file
Binary file not shown.
BIN
api/src/voices/v1_0/af_sky.pt
Normal file
BIN
api/src/voices/v1_0/af_sky.pt
Normal file
Binary file not shown.
BIN
api/src/voices/v1_0/am_adam.pt
Normal file
BIN
api/src/voices/v1_0/am_adam.pt
Normal file
Binary file not shown.
BIN
api/src/voices/v1_0/am_michael.pt
Normal file
BIN
api/src/voices/v1_0/am_michael.pt
Normal file
Binary file not shown.
BIN
api/src/voices/v1_0/am_santa.pt
Normal file
BIN
api/src/voices/v1_0/am_santa.pt
Normal file
Binary file not shown.
BIN
api/src/voices/v1_0/bf_emma.pt
Normal file
BIN
api/src/voices/v1_0/bf_emma.pt
Normal file
Binary file not shown.
BIN
api/src/voices/v1_0/bf_isabella.pt
Normal file
BIN
api/src/voices/v1_0/bf_isabella.pt
Normal file
Binary file not shown.
BIN
api/src/voices/v1_0/bm_george.pt
Normal file
BIN
api/src/voices/v1_0/bm_george.pt
Normal file
Binary file not shown.
BIN
api/src/voices/v1_0/bm_lewis.pt
Normal file
BIN
api/src/voices/v1_0/bm_lewis.pt
Normal file
Binary file not shown.
BIN
api/src/voices/v1_0/ef_dora.pt
Normal file
BIN
api/src/voices/v1_0/ef_dora.pt
Normal file
Binary file not shown.
BIN
api/src/voices/v1_0/em_alex.pt
Normal file
BIN
api/src/voices/v1_0/em_alex.pt
Normal file
Binary file not shown.
BIN
api/src/voices/v1_0/em_santa.pt
Normal file
BIN
api/src/voices/v1_0/em_santa.pt
Normal file
Binary file not shown.
BIN
api/src/voices/v1_0/ff_siwis.pt
Normal file
BIN
api/src/voices/v1_0/ff_siwis.pt
Normal file
Binary file not shown.
BIN
api/src/voices/v1_0/hf_alpha.pt
Normal file
BIN
api/src/voices/v1_0/hf_alpha.pt
Normal file
Binary file not shown.
BIN
api/src/voices/v1_0/hf_beta.pt
Normal file
BIN
api/src/voices/v1_0/hf_beta.pt
Normal file
Binary file not shown.
BIN
api/src/voices/v1_0/hm_omega.pt
Normal file
BIN
api/src/voices/v1_0/hm_omega.pt
Normal file
Binary file not shown.
BIN
api/src/voices/v1_0/hm_psi.pt
Normal file
BIN
api/src/voices/v1_0/hm_psi.pt
Normal file
Binary file not shown.
BIN
api/src/voices/v1_0/if_sara.pt
Normal file
BIN
api/src/voices/v1_0/if_sara.pt
Normal file
Binary file not shown.
BIN
api/src/voices/v1_0/im_nicola.pt
Normal file
BIN
api/src/voices/v1_0/im_nicola.pt
Normal file
Binary file not shown.
BIN
api/src/voices/v1_0/jf_alpha.pt
Normal file
BIN
api/src/voices/v1_0/jf_alpha.pt
Normal file
Binary file not shown.
BIN
api/src/voices/v1_0/jf_gongitsune.pt
Normal file
BIN
api/src/voices/v1_0/jf_gongitsune.pt
Normal file
Binary file not shown.
BIN
api/src/voices/v1_0/jf_nezumi.pt
Normal file
BIN
api/src/voices/v1_0/jf_nezumi.pt
Normal file
Binary file not shown.
BIN
api/src/voices/v1_0/jf_tebukuro.pt
Normal file
BIN
api/src/voices/v1_0/jf_tebukuro.pt
Normal file
Binary file not shown.
BIN
api/src/voices/v1_0/jm_kumo.pt
Normal file
BIN
api/src/voices/v1_0/jm_kumo.pt
Normal file
Binary file not shown.
BIN
api/src/voices/v1_0/pf_dora.pt
Normal file
BIN
api/src/voices/v1_0/pf_dora.pt
Normal file
Binary file not shown.
BIN
api/src/voices/v1_0/pm_alex.pt
Normal file
BIN
api/src/voices/v1_0/pm_alex.pt
Normal file
Binary file not shown.
BIN
api/src/voices/v1_0/pm_santa.pt
Normal file
BIN
api/src/voices/v1_0/pm_santa.pt
Normal file
Binary file not shown.
BIN
api/src/voices/v1_0/zf_xiaobei.pt
Normal file
BIN
api/src/voices/v1_0/zf_xiaobei.pt
Normal file
Binary file not shown.
BIN
api/src/voices/v1_0/zf_xiaoni.pt
Normal file
BIN
api/src/voices/v1_0/zf_xiaoni.pt
Normal file
Binary file not shown.
BIN
api/src/voices/v1_0/zf_xiaoxiao.pt
Normal file
BIN
api/src/voices/v1_0/zf_xiaoxiao.pt
Normal file
Binary file not shown.
BIN
api/src/voices/v1_0/zf_xiaoyi.pt
Normal file
BIN
api/src/voices/v1_0/zf_xiaoyi.pt
Normal file
Binary file not shown.
BIN
api/src/voices/v1_0/zm_yunjian.pt
Normal file
BIN
api/src/voices/v1_0/zm_yunjian.pt
Normal file
Binary file not shown.
BIN
api/src/voices/v1_0/zm_yunxi.pt
Normal file
BIN
api/src/voices/v1_0/zm_yunxi.pt
Normal file
Binary file not shown.
BIN
api/src/voices/v1_0/zm_yunxia.pt
Normal file
BIN
api/src/voices/v1_0/zm_yunxia.pt
Normal file
Binary file not shown.
BIN
api/src/voices/v1_0/zm_yunyang.pt
Normal file
BIN
api/src/voices/v1_0/zm_yunyang.pt
Normal file
Binary file not shown.
229
api/tests/test_openai_endpoints_v2.py
Normal file
229
api/tests/test_openai_endpoints_v2.py
Normal file
|
@ -0,0 +1,229 @@
|
|||
"""Tests for OpenAI-compatible v2 endpoints."""
|
||||
|
||||
import pytest
|
||||
from fastapi.testclient import TestClient
|
||||
from loguru import logger
|
||||
|
||||
from ..main import app
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def client():
|
||||
"""Test client fixture."""
|
||||
return TestClient(app)
|
||||
|
||||
|
||||
def test_health_check(client):
|
||||
"""Test health check endpoint."""
|
||||
response = client.get("/health")
|
||||
assert response.status_code == 200
|
||||
assert response.json() == {"status": "healthy"}
|
||||
|
||||
|
||||
def test_list_versions(client):
|
||||
"""Test version listing endpoint."""
|
||||
response = client.get("/v2/audio/versions")
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert "versions" in data
|
||||
assert "current" in data
|
||||
assert "v0.19" in data["versions"]
|
||||
assert "v1.0" in data["versions"]
|
||||
|
||||
|
||||
def test_set_version(client):
|
||||
"""Test version setting endpoint."""
|
||||
# Set to v1.0
|
||||
response = client.post("/v2/audio/version", json="v1.0")
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert data["current"] == "v1.0"
|
||||
|
||||
# Set back to v0.19
|
||||
response = client.post("/v2/audio/version", json="v0.19")
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert data["current"] == "v0.19"
|
||||
|
||||
# Test invalid version
|
||||
response = client.post("/v2/audio/version", json="invalid_version")
|
||||
assert response.status_code == 400
|
||||
|
||||
|
||||
def test_list_voices(client):
|
||||
"""Test voice listing endpoint."""
|
||||
response = client.get("/v2/audio/voices")
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert "voices" in data
|
||||
assert len(data["voices"]) > 0
|
||||
|
||||
|
||||
def test_combine_voices(client):
|
||||
"""Test voice combination endpoint."""
|
||||
# Test with string input
|
||||
response = client.post("/v2/audio/voices/combine", json="af_bella+af_nicole")
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert "voice" in data
|
||||
assert "voices" in data
|
||||
|
||||
# Test with list input
|
||||
response = client.post("/v2/audio/voices/combine", json=["af_bella", "af_nicole"])
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert "voice" in data
|
||||
assert "voices" in data
|
||||
|
||||
|
||||
def test_speech_generation_v0_19(client):
|
||||
"""Test speech generation with v0.19."""
|
||||
request_data = {
|
||||
"model": "tts-1",
|
||||
"input": "Hello, world!",
|
||||
"voice": "af_bella",
|
||||
"response_format": "wav",
|
||||
"speed": 1.0,
|
||||
"stream": False,
|
||||
"version": "v0.19"
|
||||
}
|
||||
|
||||
response = client.post("/v2/audio/speech", json=request_data)
|
||||
assert response.status_code == 200
|
||||
assert response.headers["content-type"] == "audio/wav"
|
||||
assert len(response.content) > 0
|
||||
|
||||
|
||||
def test_speech_generation_v1_0(client):
|
||||
"""Test speech generation with v1.0."""
|
||||
request_data = {
|
||||
"model": "tts-1",
|
||||
"input": "Hello, world!",
|
||||
"voice": "af_bella",
|
||||
"response_format": "wav",
|
||||
"speed": 1.0,
|
||||
"stream": False,
|
||||
"version": "v1.0"
|
||||
}
|
||||
|
||||
response = client.post("/v2/audio/speech", json=request_data)
|
||||
assert response.status_code == 200
|
||||
assert response.headers["content-type"] == "audio/wav"
|
||||
assert len(response.content) > 0
|
||||
|
||||
|
||||
def test_streaming_speech_v0_19(client):
|
||||
"""Test streaming speech generation with v0.19."""
|
||||
request_data = {
|
||||
"model": "tts-1",
|
||||
"input": "Hello, world!",
|
||||
"voice": "af_bella",
|
||||
"response_format": "wav",
|
||||
"speed": 1.0,
|
||||
"stream": True,
|
||||
"version": "v0.19"
|
||||
}
|
||||
|
||||
with client.stream("POST", "/v2/audio/speech", json=request_data) as response:
|
||||
assert response.status_code == 200
|
||||
content = b""
|
||||
for chunk in response.iter_bytes():
|
||||
assert len(chunk) > 0
|
||||
content += chunk
|
||||
assert len(content) > 0
|
||||
|
||||
|
||||
def test_streaming_speech_v1_0(client):
|
||||
"""Test streaming speech generation with v1.0."""
|
||||
request_data = {
|
||||
"model": "tts-1",
|
||||
"input": "Hello, world!",
|
||||
"voice": "af_bella",
|
||||
"response_format": "wav",
|
||||
"speed": 1.0,
|
||||
"stream": True,
|
||||
"version": "v1.0"
|
||||
}
|
||||
|
||||
with client.stream("POST", "/v2/audio/speech", json=request_data) as response:
|
||||
assert response.status_code == 200
|
||||
content = b""
|
||||
for chunk in response.iter_bytes():
|
||||
assert len(chunk) > 0
|
||||
content += chunk
|
||||
assert len(content) > 0
|
||||
|
||||
|
||||
def test_invalid_model(client):
|
||||
"""Test invalid model handling."""
|
||||
request_data = {
|
||||
"model": "invalid-model",
|
||||
"input": "Hello, world!",
|
||||
"voice": "af_bella",
|
||||
"response_format": "wav",
|
||||
"version": "v1.0"
|
||||
}
|
||||
|
||||
response = client.post("/v2/audio/speech", json=request_data)
|
||||
assert response.status_code == 400
|
||||
data = response.json()
|
||||
assert "error" in data
|
||||
assert data["error"] == "invalid_model"
|
||||
|
||||
|
||||
def test_invalid_voice(client):
|
||||
"""Test invalid voice handling."""
|
||||
request_data = {
|
||||
"model": "tts-1",
|
||||
"input": "Hello, world!",
|
||||
"voice": "invalid_voice",
|
||||
"response_format": "wav",
|
||||
"version": "v1.0"
|
||||
}
|
||||
|
||||
response = client.post("/v2/audio/speech", json=request_data)
|
||||
assert response.status_code == 400
|
||||
data = response.json()
|
||||
assert "error" in data
|
||||
assert data["error"] == "validation_error"
|
||||
|
||||
|
||||
def test_invalid_version(client):
|
||||
"""Test invalid version handling."""
|
||||
request_data = {
|
||||
"model": "tts-1",
|
||||
"input": "Hello, world!",
|
||||
"voice": "af_bella",
|
||||
"response_format": "wav",
|
||||
"version": "invalid_version"
|
||||
}
|
||||
|
||||
response = client.post("/v2/audio/speech", json=request_data)
|
||||
assert response.status_code == 400
|
||||
data = response.json()
|
||||
assert "error" in data
|
||||
assert data["error"] == "validation_error"
|
||||
|
||||
|
||||
def test_download_link(client):
|
||||
"""Test download link functionality."""
|
||||
request_data = {
|
||||
"model": "tts-1",
|
||||
"input": "Hello, world!",
|
||||
"voice": "af_bella",
|
||||
"response_format": "wav",
|
||||
"speed": 1.0,
|
||||
"stream": True,
|
||||
"return_download_link": True,
|
||||
"version": "v1.0"
|
||||
}
|
||||
|
||||
with client.stream("POST", "/v2/audio/speech", json=request_data) as response:
|
||||
assert response.status_code == 200
|
||||
assert "X-Download-Path" in response.headers
|
||||
download_path = response.headers["X-Download-Path"]
|
||||
|
||||
# Try downloading the file
|
||||
download_response = client.get(f"/download/{download_path}")
|
||||
assert download_response.status_code == 200
|
||||
assert len(download_response.content) > 0
|
207
api/tests/test_tts_service_v2.py
Normal file
207
api/tests/test_tts_service_v2.py
Normal file
|
@ -0,0 +1,207 @@
|
|||
"""Tests for TTSService v2 with version support."""
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
from loguru import logger
|
||||
|
||||
from ..services.tts_service_v2 import TTSService
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
async def tts_service():
|
||||
"""Fixture for TTSService instance."""
|
||||
service = await TTSService.create()
|
||||
yield service
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_service_initialization(tts_service):
|
||||
"""Test TTSService initialization."""
|
||||
assert tts_service is not None
|
||||
assert tts_service.version_manager is not None
|
||||
assert tts_service._voice_manager is not None
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_version_selection(tts_service):
|
||||
"""Test version selection in TTSService."""
|
||||
# Default version should be v0.19
|
||||
assert tts_service.current_version == "v0.19"
|
||||
|
||||
# Change version
|
||||
tts_service.set_version("v1.0")
|
||||
assert tts_service.current_version == "v1.0"
|
||||
|
||||
# List available versions
|
||||
versions = tts_service.available_versions
|
||||
assert "v0.19" in versions
|
||||
assert "v1.0" in versions
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_audio_generation_v0_19(tts_service):
|
||||
"""Test audio generation with v0.19."""
|
||||
text = "Hello, world!"
|
||||
voice = "af_bella" # Use a known test voice
|
||||
|
||||
# Set version explicitly
|
||||
tts_service.set_version("v0.19")
|
||||
|
||||
# Generate audio
|
||||
audio, processing_time = await tts_service.generate_audio(
|
||||
text=text,
|
||||
voice=voice,
|
||||
speed=1.0,
|
||||
version="v0.19"
|
||||
)
|
||||
|
||||
assert isinstance(audio, np.ndarray)
|
||||
assert len(audio) > 0
|
||||
assert processing_time > 0
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_audio_generation_v1_0(tts_service):
|
||||
"""Test audio generation with v1.0."""
|
||||
text = "Hello, world!"
|
||||
voice = "af_bella" # Use a known test voice
|
||||
|
||||
# Set version explicitly
|
||||
tts_service.set_version("v1.0")
|
||||
|
||||
# Generate audio
|
||||
audio, processing_time = await tts_service.generate_audio(
|
||||
text=text,
|
||||
voice=voice,
|
||||
speed=1.0,
|
||||
version="v1.0"
|
||||
)
|
||||
|
||||
assert isinstance(audio, np.ndarray)
|
||||
assert len(audio) > 0
|
||||
assert processing_time > 0
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_streaming_v0_19(tts_service):
|
||||
"""Test audio streaming with v0.19."""
|
||||
text = "Hello, world!"
|
||||
voice = "af_bella"
|
||||
chunks = []
|
||||
|
||||
async for chunk in tts_service.generate_audio_stream(
|
||||
text=text,
|
||||
voice=voice,
|
||||
speed=1.0,
|
||||
version="v0.19",
|
||||
output_format="wav"
|
||||
):
|
||||
assert chunk is not None
|
||||
assert len(chunk) > 0
|
||||
chunks.append(chunk)
|
||||
|
||||
assert len(chunks) > 0
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_streaming_v1_0(tts_service):
|
||||
"""Test audio streaming with v1.0."""
|
||||
text = "Hello, world!"
|
||||
voice = "af_bella"
|
||||
chunks = []
|
||||
|
||||
async for chunk in tts_service.generate_audio_stream(
|
||||
text=text,
|
||||
voice=voice,
|
||||
speed=1.0,
|
||||
version="v1.0",
|
||||
output_format="wav"
|
||||
):
|
||||
assert chunk is not None
|
||||
assert len(chunk) > 0
|
||||
chunks.append(chunk)
|
||||
|
||||
assert len(chunks) > 0
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_voice_compatibility(tts_service):
|
||||
"""Test voice compatibility across versions."""
|
||||
# List voices
|
||||
voices = await tts_service.list_voices()
|
||||
assert len(voices) > 0
|
||||
|
||||
# Test a voice with both versions
|
||||
test_voice = "af_bella"
|
||||
assert test_voice in voices
|
||||
|
||||
# Test with v0.19
|
||||
audio_v0, _ = await tts_service.generate_audio(
|
||||
text="Test",
|
||||
voice=test_voice,
|
||||
version="v0.19"
|
||||
)
|
||||
assert isinstance(audio_v0, np.ndarray)
|
||||
assert len(audio_v0) > 0
|
||||
|
||||
# Test with v1.0
|
||||
audio_v1, _ = await tts_service.generate_audio(
|
||||
text="Test",
|
||||
voice=test_voice,
|
||||
version="v1.0"
|
||||
)
|
||||
assert isinstance(audio_v1, np.ndarray)
|
||||
assert len(audio_v1) > 0
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_invalid_version(tts_service):
|
||||
"""Test handling of invalid version."""
|
||||
with pytest.raises(ValueError):
|
||||
await tts_service.generate_audio(
|
||||
text="Test",
|
||||
voice="af_bella",
|
||||
version="invalid_version"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_invalid_voice(tts_service):
|
||||
"""Test handling of invalid voice."""
|
||||
with pytest.raises(ValueError):
|
||||
await tts_service.generate_audio(
|
||||
text="Test",
|
||||
voice="invalid_voice",
|
||||
version="v1.0"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_empty_text(tts_service):
|
||||
"""Test handling of empty text."""
|
||||
with pytest.raises(ValueError):
|
||||
await tts_service.generate_audio(
|
||||
text="",
|
||||
voice="af_bella",
|
||||
version="v1.0"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_voice_combination(tts_service):
|
||||
"""Test voice combination with different versions."""
|
||||
voices = ["af_bella", "af_nicole"]
|
||||
|
||||
# Combine voices
|
||||
combined_voice = await tts_service.combine_voices(voices)
|
||||
assert combined_voice is not None
|
||||
|
||||
# Test combined voice with both versions
|
||||
for version in ["v0.19", "v1.0"]:
|
||||
audio, _ = await tts_service.generate_audio(
|
||||
text="Test combined voice",
|
||||
voice=combined_voice,
|
||||
version=version
|
||||
)
|
||||
assert isinstance(audio, np.ndarray)
|
||||
assert len(audio) > 0
|
104
api/tests/test_version_manager.py
Normal file
104
api/tests/test_version_manager.py
Normal file
|
@ -0,0 +1,104 @@
|
|||
"""Tests for version manager functionality."""
|
||||
|
||||
import pytest
|
||||
from loguru import logger
|
||||
|
||||
from ..inference.version_manager import get_version_manager
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_version_manager_initialization():
|
||||
"""Test version manager initialization."""
|
||||
manager = await get_version_manager()
|
||||
assert manager is not None
|
||||
assert manager.current_version == "v0.19" # Default version
|
||||
assert set(manager.available_versions) == {"v0.19", "v1.0"}
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_version_switching():
|
||||
"""Test switching between model versions."""
|
||||
manager = await get_version_manager()
|
||||
|
||||
# Switch to v1.0
|
||||
manager.set_default_version("v1.0")
|
||||
assert manager.current_version == "v1.0"
|
||||
|
||||
# Switch back to v0.19
|
||||
manager.set_default_version("v0.19")
|
||||
assert manager.current_version == "v0.19"
|
||||
|
||||
# Test invalid version
|
||||
with pytest.raises(ValueError):
|
||||
manager.set_default_version("invalid_version")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_model_loading():
|
||||
"""Test loading models for different versions."""
|
||||
manager = await get_version_manager()
|
||||
|
||||
# Load v0.19 model
|
||||
v0_model = await manager.get_model("v0.19")
|
||||
assert v0_model is not None
|
||||
|
||||
# Load v1.0 model
|
||||
v1_model = await manager.get_model("v1.0")
|
||||
assert v1_model is not None
|
||||
|
||||
# Models should be cached
|
||||
v0_model_cached = await manager.get_model("v0.19")
|
||||
assert v0_model_cached is v0_model
|
||||
|
||||
v1_model_cached = await manager.get_model("v1.0")
|
||||
assert v1_model_cached is v1_model
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_model_unloading():
|
||||
"""Test unloading all models."""
|
||||
manager = await get_version_manager()
|
||||
|
||||
# Load both models
|
||||
await manager.get_model("v0.19")
|
||||
await manager.get_model("v1.0")
|
||||
|
||||
# Unload all
|
||||
manager.unload_all()
|
||||
assert not manager.models
|
||||
|
||||
# Models should reload when requested
|
||||
v0_model = await manager.get_model("v0.19")
|
||||
assert v0_model is not None
|
||||
|
||||
v1_model = await manager.get_model("v1.0")
|
||||
assert v1_model is not None
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_invalid_model_request():
|
||||
"""Test requesting invalid model version."""
|
||||
manager = await get_version_manager()
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
await manager.get_model("invalid_version")
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_default_version_model():
|
||||
"""Test getting model with default version."""
|
||||
manager = await get_version_manager()
|
||||
|
||||
# Get model without specifying version
|
||||
default_model = await manager.get_model()
|
||||
assert default_model is not None
|
||||
|
||||
# Should match explicitly requesting v0.19
|
||||
v0_model = await manager.get_model("v0.19")
|
||||
assert default_model is v0_model
|
||||
|
||||
# Change default and verify
|
||||
manager.set_default_version("v1.0")
|
||||
new_default_model = await manager.get_model()
|
||||
v1_model = await manager.get_model("v1.0")
|
||||
assert new_default_model is v1_model
|
|
@ -12,13 +12,21 @@ REPO="kokoro-fastapi"
|
|||
# Create and use a new builder that supports multi-platform builds
|
||||
docker buildx create --name multiplatform-builder --use || true
|
||||
|
||||
# Enable BuildKit garbage collection
|
||||
export BUILDKIT_STEP_LOG_MAX_SIZE=10485760
|
||||
export DOCKER_BUILDKIT=1
|
||||
|
||||
# Build CPU image with multi-platform support
|
||||
echo "Building CPU image..."
|
||||
docker buildx build --platform linux/amd64,linux/arm64 \
|
||||
-t ${REGISTRY}/${OWNER}/${REPO}-cpu:${VERSION} \
|
||||
-t ${REGISTRY}/${OWNER}/${REPO}-cpu:latest \
|
||||
-f docker/cpu/Dockerfile \
|
||||
--push .
|
||||
--push \
|
||||
--build-arg BUILDKIT_INLINE_CACHE=1 .
|
||||
|
||||
# Clean up between builds
|
||||
docker builder prune -f
|
||||
|
||||
# Build GPU image with multi-platform support
|
||||
echo "Building GPU image..."
|
||||
|
@ -26,7 +34,8 @@ docker buildx build --platform linux/amd64,linux/arm64 \
|
|||
-t ${REGISTRY}/${OWNER}/${REPO}-gpu:${VERSION} \
|
||||
-t ${REGISTRY}/${OWNER}/${REPO}-gpu:latest \
|
||||
-f docker/gpu/Dockerfile \
|
||||
--push .
|
||||
--push \
|
||||
--build-arg BUILDKIT_INLINE_CACHE=1 .
|
||||
|
||||
echo "Build complete!"
|
||||
echo "Created images:"
|
||||
|
|
121
docs/architecture/adr_kokoro_v1_integration.md
Normal file
121
docs/architecture/adr_kokoro_v1_integration.md
Normal file
|
@ -0,0 +1,121 @@
|
|||
# Architectural Decision Record: Kokoro v1.0 Integration
|
||||
|
||||
## Context
|
||||
|
||||
We are integrating Kokoro v1.0 while maintaining backward compatibility with v0.19. The v1.0 release introduces significant architectural changes including a new KModel/KPipeline design, language-blind model architecture, and built-in vocab management.
|
||||
|
||||
## Decision
|
||||
|
||||
We will implement a hybrid architecture that:
|
||||
|
||||
1. Maintains existing streaming infrastructure
|
||||
2. Supports both v0.19 and v1.0 models
|
||||
3. Adapts the new KModel/KPipeline interface to our system
|
||||
|
||||
### Key Components
|
||||
|
||||
#### 1. Version-Specific Model Builds
|
||||
```
|
||||
api/src/builds/
|
||||
├── v0_19/ # Current implementation
|
||||
└── v1_0/ # New implementation using KModel
|
||||
```
|
||||
|
||||
#### 2. Model Manager Interface
|
||||
```python
|
||||
class ModelManager:
|
||||
def __init__(self):
|
||||
self.models = {} # version -> model
|
||||
|
||||
async def get_model(self, version: str):
|
||||
if version not in self.models:
|
||||
if version == "v0.19":
|
||||
from ..builds.v0_19.models import build_model
|
||||
elif version == "v1.0":
|
||||
from ..builds.v1_0.models import build_model
|
||||
self.models[version] = await build_model()
|
||||
return self.models[version]
|
||||
```
|
||||
|
||||
#### 3. Voice Management
|
||||
```
|
||||
api/src/voices/
|
||||
├── v0_19/
|
||||
└── v1_0/
|
||||
```
|
||||
|
||||
### Integration Strategy
|
||||
|
||||
1. Model Integration
|
||||
- Wrap KModel in our build system
|
||||
- Adapt to new forward pass interface
|
||||
- Handle phoneme mapping internally
|
||||
|
||||
2. Pipeline Integration
|
||||
```python
|
||||
class V1ModelWrapper:
|
||||
def __init__(self, kmodel):
|
||||
self.model = kmodel
|
||||
self.pipeline = KPipeline(model=kmodel)
|
||||
|
||||
async def forward(self, text, voice):
|
||||
# Adapt v1.0 interface to our streaming system
|
||||
generator = self.pipeline(text, voice=voice)
|
||||
for gs, ps, audio in generator:
|
||||
yield audio
|
||||
```
|
||||
|
||||
3. API Layer
|
||||
- Add version parameter to endpoints
|
||||
- Default to v1.0 if not specified
|
||||
- Maintain backward compatibility
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
- Clean separation between v0.19 and v1.0 implementations
|
||||
- Minimal changes to existing streaming infrastructure
|
||||
- Simple version switching mechanism
|
||||
- Local voice management maintained
|
||||
|
||||
### Negative
|
||||
- Some code duplication between versions
|
||||
- Additional wrapper layer for v1.0
|
||||
- Need to maintain two parallel implementations
|
||||
|
||||
### Neutral
|
||||
- Similar memory footprint (models ~few hundred MB)
|
||||
- Comparable inference speed expected
|
||||
- No major architectural bottlenecks
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
1. Directory Structure Setup
|
||||
- Create version-specific directories
|
||||
- Move current implementation to v0_19/
|
||||
|
||||
2. V1.0 Integration
|
||||
- Implement KModel wrapper
|
||||
- Add version-aware model manager
|
||||
- Setup voice directory structure
|
||||
|
||||
3. Testing Focus
|
||||
- Basic inference for both versions
|
||||
- Voice compatibility
|
||||
- Streaming performance
|
||||
- Version switching
|
||||
- API endpoint compatibility
|
||||
|
||||
## Migration Path
|
||||
|
||||
1. Initial Release
|
||||
- Both versions available
|
||||
- v0.19 as default
|
||||
|
||||
2. Transition Period
|
||||
- v1.0 as default
|
||||
- v0.19 still available
|
||||
|
||||
3. Future
|
||||
- Consider deprecation timeline for v0.19
|
||||
- Document migration path for users
|
92
docs/architecture/file_migration_plan.md
Normal file
92
docs/architecture/file_migration_plan.md
Normal file
|
@ -0,0 +1,92 @@
|
|||
# Kokoro v1.0 File Migration Plan
|
||||
|
||||
## Source Files (Kokoro-82M)
|
||||
|
||||
```
|
||||
Kokoro-82M/
|
||||
├── config.json # Model configuration
|
||||
├── kokoro-v1_0.pth # Model weights
|
||||
└── voices/ # Voice files
|
||||
├── af_alloy.pt
|
||||
├── af_aoede.pt
|
||||
├── af_bella.pt
|
||||
└── ...
|
||||
```
|
||||
|
||||
## Target Structure
|
||||
|
||||
```
|
||||
api/src/builds/
|
||||
├── v0_19/ # Current implementation
|
||||
│ ├── config.json # Move current config.json here
|
||||
│ ├── models.py # Move current models.py here
|
||||
│ ├── istftnet.py # Move current istftnet.py here
|
||||
│ └── plbert.py # Move current plbert.py here
|
||||
└── v1_0/ # New implementation
|
||||
├── config.json # From Kokoro-82M/config.json
|
||||
├── models.py # To be created - build system integration
|
||||
└── wrapper.py # To be created - KModel/KPipeline wrapper
|
||||
|
||||
api/src/models/
|
||||
└── v1_0/ # Model weights directory
|
||||
└── kokoro-v1_0.pth # From Kokoro-82M/kokoro-v1_0.pth
|
||||
|
||||
api/src/voices/
|
||||
├── v0_19/ # Current voices
|
||||
│ ├── af_bella.pt
|
||||
│ ├── af_nicole.pt
|
||||
│ └── ...
|
||||
└── v1_0/ # From Kokoro-82M/voices/
|
||||
├── af_alloy.pt
|
||||
├── af_aoede.pt
|
||||
└── ...
|
||||
```
|
||||
|
||||
## Migration Steps
|
||||
|
||||
1. Create Directory Structure
|
||||
- Create api/src/builds/v0_19/
|
||||
- Create api/src/builds/v1_0/
|
||||
- Create api/src/models/v1_0/
|
||||
- Create api/src/voices/v0_19/
|
||||
- Create api/src/voices/v1_0/
|
||||
|
||||
2. Move Current Implementation to v0.19
|
||||
- Move api/src/builds/config.json -> api/src/builds/v0_19/config.json
|
||||
- Move api/src/builds/models.py -> api/src/builds/v0_19/models.py
|
||||
- Move api/src/builds/istftnet.py -> api/src/builds/v0_19/istftnet.py
|
||||
- Move api/src/builds/plbert.py -> api/src/builds/v0_19/plbert.py
|
||||
- Move api/src/voices/*.pt -> api/src/voices/v0_19/
|
||||
|
||||
3. Copy v1.0 Files
|
||||
- Copy Kokoro-82M/config.json -> api/src/builds/v1_0/config.json
|
||||
- Copy Kokoro-82M/kokoro-v1_0.pth -> api/src/models/v1_0/kokoro-v1_0.pth
|
||||
- Copy Kokoro-82M/voices/*.pt -> api/src/voices/v1_0/
|
||||
|
||||
4. Create New Implementation Files
|
||||
- Create api/src/builds/v1_0/wrapper.py for KModel integration
|
||||
- Create api/src/builds/v1_0/models.py for build system integration
|
||||
|
||||
## Implementation Notes
|
||||
|
||||
1. Voice Management
|
||||
- Keep voice files local, no HF downloads
|
||||
- Maintain compatibility with both versions
|
||||
- Consider voice file format differences
|
||||
|
||||
2. Model Integration
|
||||
- Use kokoro package for v1.0 model loading
|
||||
- Model weights accessed from api/src/models/v1_0/
|
||||
- Adapt to our streaming infrastructure
|
||||
- Handle version-specific configurations
|
||||
|
||||
3. Testing Considerations
|
||||
- Verify file permissions after moves
|
||||
- Test voice loading from both directories
|
||||
- Ensure backward compatibility
|
||||
- Validate streaming performance
|
||||
|
||||
4. Code Updates
|
||||
- Update model loading paths in wrapper.py to point to api/src/models/v1_0/
|
||||
- Maintain separation between model weights and build configuration
|
||||
- Ensure proper error handling for missing files
|
49
docs/architecture/implementation_checklist.md
Normal file
49
docs/architecture/implementation_checklist.md
Normal file
|
@ -0,0 +1,49 @@
|
|||
# Kokoro v1.0 Implementation Checklist
|
||||
|
||||
## 1. Directory Setup
|
||||
- [ ] Create api/src/models/v1_0/
|
||||
- [ ] Create api/src/builds/v0_19/
|
||||
- [ ] Create api/src/builds/v1_0/
|
||||
- [ ] Create api/src/voices/v0_19/
|
||||
- [ ] Create api/src/voices/v1_0/
|
||||
|
||||
## 2. Move Current Implementation to v0.19
|
||||
- [ ] Move api/src/builds/config.json -> api/src/builds/v0_19/config.json
|
||||
- [ ] Move api/src/builds/models.py -> api/src/builds/v0_19/models.py
|
||||
- [ ] Move api/src/builds/istftnet.py -> api/src/builds/v0_19/istftnet.py
|
||||
- [ ] Move api/src/builds/plbert.py -> api/src/builds/v0_19/plbert.py
|
||||
- [ ] Move current voices to api/src/voices/v0_19/
|
||||
|
||||
## 3. Copy v1.0 Files from Kokoro-82M
|
||||
- [ ] Copy kokoro-v1_0.pth -> api/src/models/v1_0/kokoro-v1_0.pth
|
||||
- [ ] Copy config.json -> api/src/builds/v1_0/config.json
|
||||
- [ ] Copy voices/*.pt -> api/src/voices/v1_0/
|
||||
|
||||
## 4. Create v1.0 Implementation
|
||||
- [ ] Create api/src/builds/v1_0/wrapper.py
|
||||
- [ ] Create api/src/builds/v1_0/models.py
|
||||
- [ ] Update imports in moved files
|
||||
- [ ] Install kokoro package dependencies
|
||||
|
||||
## 5. Update Model Manager
|
||||
- [ ] Update model manager to support both versions
|
||||
- [ ] Add version-specific model loading
|
||||
- [ ] Add version selection parameter to API endpoints
|
||||
|
||||
## 6. Testing
|
||||
- [ ] Test v0.19 functionality still works
|
||||
- [ ] Test v1.0 model loading
|
||||
- [ ] Test voice loading for both versions
|
||||
- [ ] Test streaming functionality
|
||||
- [ ] Test version switching
|
||||
|
||||
## 7. Documentation
|
||||
- [ ] Update API documentation with version parameter
|
||||
- [ ] Document voice compatibility
|
||||
- [ ] Add migration guide for users
|
||||
|
||||
## Notes
|
||||
- Model weights go in api/src/models/v1_0/
|
||||
- Build configs and code go in api/src/builds/v1_0/
|
||||
- Keep voice files local, no HF downloads
|
||||
- Test each step before proceeding to next
|
68
docs/architecture/kokoro_v1_refactor.md
Normal file
68
docs/architecture/kokoro_v1_refactor.md
Normal file
|
@ -0,0 +1,68 @@
|
|||
# Kokoro Version Support Architecture
|
||||
|
||||
## Overview
|
||||
|
||||
Simple architecture for supporting both Kokoro v0.19 and v1.0 models, allowing version selection via API.
|
||||
|
||||
## Directory Structure
|
||||
|
||||
```
|
||||
api/src/builds/
|
||||
├── v0_19/ # Current implementation
|
||||
│ ├── config.json
|
||||
│ ├── models.py
|
||||
│ ├── istftnet.py
|
||||
│ └── plbert.py
|
||||
└── v1_0/ # New v1.0 implementation
|
||||
├── config.json
|
||||
├── models.py
|
||||
├── istftnet.py
|
||||
└── albert.py
|
||||
```
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
1. Move Current Implementation
|
||||
- Relocate existing files to v0_19/
|
||||
- Update imports
|
||||
|
||||
2. Add v1.0 Implementation
|
||||
- Copy reference implementation
|
||||
- Adapt to our structure
|
||||
- Keep voice management local
|
||||
|
||||
3. Model Manager Updates
|
||||
```python
|
||||
class ModelManager:
|
||||
def __init__(self):
|
||||
self.models = {} # version -> model
|
||||
|
||||
async def get_model(self, version: str):
|
||||
if version not in self.models:
|
||||
if version == "v0.19":
|
||||
from ..builds.v0_19.models import build_model
|
||||
elif version == "v1.0":
|
||||
from ..builds.v1_0.models import build_model
|
||||
self.models[version] = await build_model()
|
||||
return self.models[version]
|
||||
```
|
||||
|
||||
4. API Integration
|
||||
- Add version parameter to endpoints
|
||||
- Default to v1.0 if not specified
|
||||
|
||||
## Voice Management
|
||||
- Simple directory structure:
|
||||
```
|
||||
api/src/voices/
|
||||
├── v0_19/
|
||||
└── v1_0/
|
||||
```
|
||||
- Keep voice files local, no HF downloads
|
||||
|
||||
## Testing
|
||||
- Basic functionality tests for each version
|
||||
- Version switching tests
|
||||
- Voice compatibility tests
|
||||
|
||||
No need to over-optimize - models and voices are small enough to keep things simple.
|
116
docs/architecture/kokoro_v1_technical_notes.md
Normal file
116
docs/architecture/kokoro_v1_technical_notes.md
Normal file
|
@ -0,0 +1,116 @@
|
|||
# Kokoro v1.0 Technical Integration Notes
|
||||
|
||||
## Core Components
|
||||
|
||||
1. KModel Class
|
||||
- Main model class with unified interface
|
||||
- Handles both weights and inference
|
||||
- Language-blind design (phoneme focused)
|
||||
- No external language processing
|
||||
|
||||
2. Key Architecture Changes
|
||||
- Uses CustomAlbert instead of PLBert
|
||||
- New ProsodyPredictor implementation
|
||||
- Different phoneme handling approach
|
||||
- Built-in vocab management
|
||||
|
||||
## Integration Points
|
||||
|
||||
1. Model Loading
|
||||
```python
|
||||
# v1.0 approach
|
||||
model = KModel(config_path, model_path)
|
||||
# vs our current
|
||||
model = await build_model(path, device)
|
||||
```
|
||||
|
||||
2. Forward Pass Differences
|
||||
```python
|
||||
# v1.0
|
||||
audio = model(phonemes, ref_s, speed=1.0)
|
||||
# vs our current
|
||||
audio = model.decoder(asr, F0_pred, N_pred, ref_s)
|
||||
```
|
||||
|
||||
3. Key Dependencies
|
||||
- transformers (for AlbertConfig)
|
||||
- torch
|
||||
- No external phoneme processing
|
||||
|
||||
## Configuration Changes
|
||||
|
||||
1. v1.0 Config Structure
|
||||
```json
|
||||
{
|
||||
"vocab": {...}, # Built-in phoneme mapping
|
||||
"n_token": X,
|
||||
"plbert": {...}, # Albert config
|
||||
"hidden_dim": X,
|
||||
"style_dim": X,
|
||||
"istftnet": {...}
|
||||
}
|
||||
```
|
||||
|
||||
2. Voice Management
|
||||
- No HF downloads
|
||||
- Local voice file management
|
||||
- Simpler voice structure
|
||||
|
||||
## Implementation Strategy
|
||||
|
||||
1. Core Changes
|
||||
- Keep our streaming infrastructure
|
||||
- Adapt to new model interface
|
||||
- Maintain our voice management
|
||||
|
||||
2. Key Adaptations Needed
|
||||
- Wrap KModel in our build system
|
||||
- Handle phoneme mapping internally
|
||||
- Adapt to new prosody prediction
|
||||
|
||||
3. Compatibility Layer
|
||||
```python
|
||||
class V1ModelWrapper:
|
||||
def __init__(self, kmodel):
|
||||
self.model = kmodel
|
||||
|
||||
async def forward(self, phonemes, ref_s):
|
||||
# Adapt v1.0 interface to our system
|
||||
return self.model(phonemes, ref_s)
|
||||
```
|
||||
|
||||
## Technical Considerations
|
||||
|
||||
1. Memory Usage
|
||||
- Models ~few hundred MB
|
||||
- Voices ~few hundred KB
|
||||
- No need for complex memory management
|
||||
|
||||
2. Performance
|
||||
- Similar inference speed expected
|
||||
- No major architectural bottlenecks
|
||||
- Keep existing streaming optimizations
|
||||
|
||||
3. Integration Points
|
||||
- Model loading/initialization
|
||||
- Voice file management
|
||||
- Inference pipeline
|
||||
- Streaming output
|
||||
|
||||
## Migration Notes
|
||||
|
||||
1. Key Files to Port
|
||||
- model.py -> v1_0/models.py
|
||||
- istftnet.py -> v1_0/istftnet.py
|
||||
- Add albert.py for CustomAlbert
|
||||
|
||||
2. Config Updates
|
||||
- Add version selection
|
||||
- Keep config structure similar
|
||||
- Add v1.0 specific params
|
||||
|
||||
3. Testing Focus
|
||||
- Basic inference
|
||||
- Voice compatibility
|
||||
- Streaming performance
|
||||
- Version switching
|
175
docs/architecture/v1_wrapper_spec.md
Normal file
175
docs/architecture/v1_wrapper_spec.md
Normal file
|
@ -0,0 +1,175 @@
|
|||
# Kokoro v1.0 Wrapper Technical Specification
|
||||
|
||||
## Overview
|
||||
|
||||
This document details the technical implementation of the KokoroV1Wrapper class that integrates the Kokoro v1.0 KModel/KPipeline architecture with our existing system.
|
||||
|
||||
## Class Implementation
|
||||
|
||||
```python
|
||||
from pathlib import Path
|
||||
from kokoro import KModel, KPipeline
|
||||
|
||||
class KokoroV1Wrapper:
|
||||
"""Wrapper for Kokoro v1.0 KModel/KPipeline integration.
|
||||
|
||||
This wrapper manages:
|
||||
1. Model initialization and weight loading
|
||||
2. Pipeline creation and caching per language
|
||||
3. Streaming audio generation
|
||||
"""
|
||||
|
||||
def __init__(self, config_path: str, model_path: str):
|
||||
"""Initialize KModel with config and weights.
|
||||
|
||||
Args:
|
||||
config_path: Path to config.json in builds/v1_0/
|
||||
model_path: Path to model weights in models/v1_0/
|
||||
"""
|
||||
self.model = KModel() # Will load config and weights
|
||||
self.pipelines = {} # lang_code -> KPipeline cache
|
||||
|
||||
def get_pipeline(self, lang_code: str) -> KPipeline:
|
||||
"""Get or create a KPipeline for the given language code.
|
||||
|
||||
Args:
|
||||
lang_code: Language code for phoneme processing
|
||||
|
||||
Returns:
|
||||
KPipeline instance for the language
|
||||
"""
|
||||
if lang_code not in self.pipelines:
|
||||
self.pipelines[lang_code] = KPipeline(
|
||||
lang_code=lang_code,
|
||||
model=self.model
|
||||
)
|
||||
return self.pipelines[lang_code]
|
||||
|
||||
async def forward(self, text: str, voice: str, lang_code: str):
|
||||
"""Generate audio using the appropriate pipeline.
|
||||
|
||||
Args:
|
||||
text: Input text to synthesize
|
||||
voice: Voice ID to use
|
||||
lang_code: Language code for phoneme processing
|
||||
|
||||
Yields:
|
||||
Audio chunks as torch.FloatTensor
|
||||
"""
|
||||
pipeline = self.get_pipeline(lang_code)
|
||||
generator = pipeline(text, voice=voice)
|
||||
for gs, ps, audio in generator:
|
||||
yield audio
|
||||
|
||||
class ModelManager:
|
||||
"""Manages multiple model versions and their initialization."""
|
||||
|
||||
def __init__(self):
|
||||
self.models = {}
|
||||
|
||||
async def get_model(self, version: str):
|
||||
"""Get or initialize a model for the specified version.
|
||||
|
||||
Args:
|
||||
version: Model version ("v0.19" or "v1.0")
|
||||
|
||||
Returns:
|
||||
Initialized model instance
|
||||
"""
|
||||
if version not in self.models:
|
||||
if version == "v0.19":
|
||||
from ..builds.v0_19.models import build_model
|
||||
self.models[version] = await build_model()
|
||||
elif version == "v1.0":
|
||||
from ..builds.v1_0.wrapper import KokoroV1Wrapper
|
||||
|
||||
# Config in builds directory
|
||||
config_path = Path(__file__).parent / "builds/v1_0/config.json"
|
||||
|
||||
# Model weights in models directory
|
||||
model_path = Path(__file__).parent / "models/v1_0/kokoro-v1_0.pth"
|
||||
|
||||
self.models[version] = KokoroV1Wrapper(
|
||||
config_path=str(config_path),
|
||||
model_path=str(model_path)
|
||||
)
|
||||
return self.models[version]
|
||||
```
|
||||
|
||||
## Key Design Points
|
||||
|
||||
1. Model Management
|
||||
- KModel handles weights and inference
|
||||
- Config and weights loaded from separate directories
|
||||
- Language-blind design (phoneme focused)
|
||||
|
||||
2. Pipeline Caching
|
||||
- One KPipeline per language code
|
||||
- Pipelines created on demand and cached
|
||||
- Reuses single KModel instance
|
||||
|
||||
3. Streaming Integration
|
||||
- Maintains compatibility with existing streaming system
|
||||
- Yields audio chunks progressively
|
||||
- Handles both quiet and loud pipeline modes
|
||||
|
||||
4. Version Control
|
||||
- Clear separation between v0.19 and v1.0
|
||||
- Version-specific model initialization
|
||||
- Shared model manager interface
|
||||
|
||||
## Usage Example
|
||||
|
||||
```python
|
||||
# Initialize model manager
|
||||
manager = ModelManager()
|
||||
|
||||
# Get v1.0 model
|
||||
model = await manager.get_model("v1.0")
|
||||
|
||||
# Generate audio
|
||||
async for audio in model.forward(
|
||||
text="Hello world",
|
||||
voice="af_bella",
|
||||
lang_code="en"
|
||||
):
|
||||
# Process audio chunk
|
||||
process_audio(audio)
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
1. File Access
|
||||
- Verify config.json exists in builds/v1_0/
|
||||
- Verify model weights exist in models/v1_0/
|
||||
- Handle missing or corrupt files
|
||||
|
||||
2. Pipeline Creation
|
||||
- Validate language codes
|
||||
- Handle initialization failures
|
||||
- Clean up failed pipeline instances
|
||||
|
||||
3. Voice Loading
|
||||
- Verify voice file existence
|
||||
- Handle voice format compatibility
|
||||
- Manage voice loading failures
|
||||
|
||||
## Testing Strategy
|
||||
|
||||
1. Unit Tests
|
||||
- Model initialization
|
||||
- Pipeline creation and caching
|
||||
- Audio generation
|
||||
- Error handling
|
||||
|
||||
2. Integration Tests
|
||||
- End-to-end audio generation
|
||||
- Streaming performance
|
||||
- Memory usage
|
||||
- Multi-language support
|
||||
|
||||
3. Performance Tests
|
||||
- Pipeline creation overhead
|
||||
- Memory usage patterns
|
||||
- Streaming latency
|
||||
- Voice loading speed
|
|
@ -4,20 +4,22 @@ from pathlib import Path
|
|||
|
||||
from openai import OpenAI
|
||||
|
||||
# gets OPENAI_API_KEY from your environment variables
|
||||
# By default, the library tries to take OPENAI_API_KEY
|
||||
# from your environment variables,
|
||||
# so set explicitly to any string to allow it to skip the check
|
||||
openai = OpenAI(base_url="http://localhost:8880/v1", api_key="not-needed-for-local")
|
||||
|
||||
speech_file_path = Path(__file__).parent / "speech.mp3"
|
||||
|
||||
|
||||
def main() -> None:
|
||||
stream_to_speakers()
|
||||
|
||||
# Create text-to-speech audio file
|
||||
with openai.audio.speech.with_streaming_response.create(
|
||||
model="kokoro",
|
||||
voice="af_bella",
|
||||
voice="af_sky+af_bella",
|
||||
input="the quick brown fox jumped over the lazy dogs",
|
||||
response_format="mp3",
|
||||
) as response:
|
||||
response.stream_to_file(speech_file_path)
|
||||
|
||||
|
|
Binary file not shown.
|
@ -37,6 +37,7 @@ dependencies = [
|
|||
"semchunk>=3.0.1",
|
||||
"mutagen>=1.47.0",
|
||||
"psutil>=6.1.1",
|
||||
"kokoro==0.3.1"
|
||||
]
|
||||
|
||||
[project.optional-dependencies]
|
||||
|
|
|
@ -5,7 +5,7 @@ PROJECT_ROOT=$(pwd)
|
|||
|
||||
# Set environment variables
|
||||
export USE_GPU=false
|
||||
export USE_ONNX=true
|
||||
export USE_ONNX=false
|
||||
export PYTHONPATH=$PROJECT_ROOT:$PROJECT_ROOT/api
|
||||
export MODEL_DIR=$PROJECT_ROOT/api/src/models
|
||||
export VOICES_DIR=$PROJECT_ROOT/api/src/voices
|
||||
|
|
749
uv.lock
generated
749
uv.lock
generated
|
@ -110,6 +110,47 @@ wheels = [
|
|||
{ url = "https://files.pythonhosted.org/packages/ed/20/bc79bc575ba2e2a7f70e8a1155618bb1301eaa5132a8271373a6903f73f8/babel-2.16.0-py3-none-any.whl", hash = "sha256:368b5b98b37c06b7daf6696391c3240c938b37767d4584413e8438c5c435fa8b", size = 9587599 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "blis"
|
||||
version = "1.2.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "numpy" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/e5/69/d4829c5b5cd244e6140a0754a0f73cc725c6e138f609b4c5d1982e699906/blis-1.2.0.tar.gz", hash = "sha256:f25f99d7f3cad72c86a7499212ee833fb5062d80ad1763a935e0e498bc147c69", size = 2375955 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/54/ff/c55d9d42a622b95fca27f82d4674cd19ad86941dc893f0898ebcccdab105/blis-1.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:76998702acbb782e9bb298a5c446aaa1ed4652dbade853baa6a7a26f7b98105b", size = 6973751 },
|
||||
{ url = "https://files.pythonhosted.org/packages/fd/bc/5993eb63fc8a2784fb3a82320bd65df958d7250047f77f467508da896296/blis-1.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1c290c1ba6cb5b633abe59b2fb9ae2ea5dcd7508202f65658fe816bb7e129485", size = 1280762 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ff/65/3dae66f7aec4fe92726f33180cb8780d6a9bc49de25b3ee413275ff1aaf3/blis-1.2.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cd81489e4b1a4a6bc51f5578795bc9150a2e8b9babead1074ca51398aff51852", size = 3186927 },
|
||||
{ url = "https://files.pythonhosted.org/packages/4c/96/a420114cb430a790a038ca5a67171b5b124b2b1b0463be2e93bfa8c3378d/blis-1.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4896cc4c10c9856c9faaf89401dcb87894da06a18b4b986064acd737a6ed3e60", size = 11526130 },
|
||||
{ url = "https://files.pythonhosted.org/packages/a1/a3/a626f0e90683667a83cb735fe9638e4ffd0004a188287868a79771fb257f/blis-1.2.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:60a29dcb1bba49cae70088d480b95042d4fbbe6b380f2f7c9e70b2781dc126dd", size = 4225925 },
|
||||
{ url = "https://files.pythonhosted.org/packages/c9/70/655b6017396074b1c05010d9127c18eb5f404b22e2b819f1e6da50f202fe/blis-1.2.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:fc1de26073302a3713e487ea85d1ecd0bce204f6b102da498c3cd08528a1d69e", size = 14694282 },
|
||||
{ url = "https://files.pythonhosted.org/packages/af/e0/4ac06562b5dce221fbe20a1f0acd47f67454c377d00b1de0dd44de67116d/blis-1.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:cc2aa5ce96f33162779e88add93b5051437f9c2701d24ee0d2dd89da9a9c23b1", size = 6247380 },
|
||||
{ url = "https://files.pythonhosted.org/packages/3c/3f/62bc963d7cad6d5d4038ca0fed236559abd67c1afca33a2d5644412470f7/blis-1.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:debafb46ad8b5e2d18932770639aa1d22b61580a07ec718e9efcf50c76e180d6", size = 6976662 },
|
||||
{ url = "https://files.pythonhosted.org/packages/70/4e/4d030d66d3de8dbe12217b4bb0fc67264df9befea07f6c164d33a23b0b09/blis-1.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:eb27e94b9dbd9c23595b95155607a57ad814bebd3cc1bf8551bee4af60e1b5d7", size = 1281766 },
|
||||
{ url = "https://files.pythonhosted.org/packages/c8/32/9994aa6a2cc00f97a71cb6079364c3108da35e19203affcd9c541309728a/blis-1.2.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f8ed98669144fb8ee30052f7259d0cb78b7b3755d9589d98cbb7986d22473ab7", size = 3304018 },
|
||||
{ url = "https://files.pythonhosted.org/packages/73/e7/95ae571ccfe5c43fb65fce5921e8a6213c4443a2e18a9ca5b6bad2fc8aab/blis-1.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:08f62b6f114370d8449b4836ebd157980a5718a5c39266af9cdff67a9602a421", size = 11659934 },
|
||||
{ url = "https://files.pythonhosted.org/packages/21/09/e99e3575eb3609db01948a4bbc3abce03e47be53c18338aa7a657bc92f1b/blis-1.2.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:cc5c25fb12fd134812ea47e3fcbbd64d46d0717d307c5c2fb32a45ac8daf3226", size = 4360532 },
|
||||
{ url = "https://files.pythonhosted.org/packages/fc/94/2575e8e7716f25265ea17a7272c4dc5b0d32b4d2c52aafbf5425cfbf998c/blis-1.2.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:78a6498c748a42494a2cf58be489616a42ba0b925bc92ab23c3721dc779a4739", size = 14827891 },
|
||||
{ url = "https://files.pythonhosted.org/packages/c4/d9/b647ef53c33c82c1fa2ed217c5793de551a38fb1e5b2430f59c3ecba4c86/blis-1.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:5ad68bc972f210a0227d9742bf6325600bb95c8188f97850634f6d97c3a08107", size = 6230482 },
|
||||
{ url = "https://files.pythonhosted.org/packages/fb/98/79df1711d96b38a3cf72b2abad412191fe2ada986b6203a1237dcd7aac9a/blis-1.2.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:99df869b8998303cf78e9f408f0350b0c5cd12d733caa8df99682f046b83ea35", size = 6989668 },
|
||||
{ url = "https://files.pythonhosted.org/packages/1e/bb/3f84de3303873783f6c2dee121d0a36fae641332db73b046cc93cb7b717e/blis-1.2.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4001df564c43c8f2260b13c4f06327dee23831b178f65884c22b879062ebca14", size = 1282523 },
|
||||
{ url = "https://files.pythonhosted.org/packages/91/4d/d0a599555fd97d3229d3c3fd8c7e5b531ca5863421370e99b46d70bce883/blis-1.2.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6af5dec35acfc044e29b89bb9202e74edc747344f5a46fc27e8a8998f8229610", size = 3260765 },
|
||||
{ url = "https://files.pythonhosted.org/packages/4d/59/b7571c5fa57b2198b5240f8cd790daf5749491cc17706e3a4b1528a75185/blis-1.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:986f125ad0215e975a0895505728644dff2669a739f6c2faf89436e3fcae21ac", size = 11616436 },
|
||||
{ url = "https://files.pythonhosted.org/packages/fe/50/9c1311aa73d9812e3c78ebeec7c4fb0b15fdecfcc9a4866f1e3c06d0f331/blis-1.2.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:ea1f4ce1541cddbc9b0574a5969df2a518c5a6d4aa8787782dab5d82233a1458", size = 4309834 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d1/12/02f3afacf790a93e4d9f367cc5cdd95ed0348e5d2927bc4d9c7d1d70d1ae/blis-1.2.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6358168c4218a36e49c244c714f50248a1ef981874ae7bc785d68e76d55c57b5", size = 14789989 },
|
||||
{ url = "https://files.pythonhosted.org/packages/c0/3a/ce0a98664d6283276fa986685e308c1dc1feb634241b2d3828ceaaa5a128/blis-1.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:067f4f99fb3dc0cf50bbbf0ee4b850f13e64fbb84fdaab0864fd97af0bee0ced", size = 6258036 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "catalogue"
|
||||
version = "2.0.10"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/38/b4/244d58127e1cdf04cf2dc7d9566f0d24ef01d5ce21811bab088ecc62b5ea/catalogue-2.0.10.tar.gz", hash = "sha256:4f56daa940913d3f09d589c191c74e5a6d51762b3a9e37dd53b7437afd6cda15", size = 19561 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/9e/96/d32b941a501ab566a16358d68b6eb4e4acc373fab3c3c4d7d9e649f7b4bb/catalogue-2.0.10-py3-none-any.whl", hash = "sha256:58c2de0020aa90f4a2da7dfad161bf7b3b054c86a5f09fcedc0b2b740c109a9f", size = 17325 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "certifi"
|
||||
version = "2024.12.14"
|
||||
|
@ -268,6 +309,18 @@ wheels = [
|
|||
{ url = "https://files.pythonhosted.org/packages/52/32/f8f97b5fb1d3c14ab8b0895082f4eccc280e6787d2b9a5c9d91a34f203f4/clldutils-3.21.0-py2.py3-none-any.whl", hash = "sha256:204851d8897ab590f059e272bcce222f5e15f75e7099e2e50cdc69b388a02792", size = 1723784 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cloudpathlib"
|
||||
version = "0.20.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "typing-extensions", marker = "python_full_version < '3.11' or (extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu')" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/71/0b/a47d78ed2816db100543b504fdbfc2070f422aac858e6bcf775713e37b8a/cloudpathlib-0.20.0.tar.gz", hash = "sha256:f6ef7ca409a510f7ba4639ba50ab3fc5b6dee82d6dff0d7f5715fd0c9ab35891", size = 45149 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/1f/6e/b64600156934dab14cc8b403095a9ea8bd722aad2e775673c68346b76220/cloudpathlib-0.20.0-py3-none-any.whl", hash = "sha256:7af3bcefbf73392ae7f31c08b3660ec31607f8c01b7f6262d4d73469a845f641", size = 52547 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "colorama"
|
||||
version = "0.4.6"
|
||||
|
@ -301,6 +354,19 @@ wheels = [
|
|||
{ url = "https://files.pythonhosted.org/packages/e3/51/9b208e85196941db2f0654ad0357ca6388ab3ed67efdbfc799f35d1f83aa/colorlog-6.9.0-py3-none-any.whl", hash = "sha256:5906e71acd67cb07a71e779c47c4bcb45fb8c2993eebe9e5adcd6a6f1b283eff", size = 11424 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "confection"
|
||||
version = "0.1.5"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "pydantic" },
|
||||
{ name = "srsly" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/51/d3/57c6631159a1b48d273b40865c315cf51f89df7a9d1101094ef12e3a37c2/confection-0.1.5.tar.gz", hash = "sha256:8e72dd3ca6bd4f48913cd220f10b8275978e740411654b6e8ca6d7008c590f0e", size = 38924 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/0c/00/3106b1854b45bd0474ced037dfe6b73b90fe68a68968cef47c23de3d43d2/confection-0.1.5-py3-none-any.whl", hash = "sha256:e29d3c3f8eac06b3f77eb9dfb4bf2fc6bcc9622a98ca00a698e3d019c6430b14", size = 35451 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "contourpy"
|
||||
version = "1.3.1"
|
||||
|
@ -451,6 +517,46 @@ wheels = [
|
|||
{ url = "https://files.pythonhosted.org/packages/13/84/069db1325f5a6dd034524c0099da3978de8fdb6242ce63223ead188da940/csvw-3.5.1-py2.py3-none-any.whl", hash = "sha256:8dd3864aae51bfd943713a62ec2c6688d3f406a9627b5f16de2479b1281febe5", size = 59881 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "curated-tokenizers"
|
||||
version = "0.0.9"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "regex" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/fc/fa/b2d55f0d53c7c7f5dc0b6dbb48cc4344ee84fb572f23de28040bf2cde89d/curated-tokenizers-0.0.9.tar.gz", hash = "sha256:c93d47e54ab3528a6db2796eeb4bdce5d44e8226c671e42c2f23522ab1d0ce25", size = 2237055 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/01/64/07c176505994cdd3ea3d7b1e56ccaa0f14f506be72dc5bad9a627995f048/curated_tokenizers-0.0.9-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:19d3a2570dbbd08bbdae4c79d187fb150ea3b663c2f060bd1e4a050a1358cfd1", size = 732854 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ae/9b/3862be9b9bc97bedfd159fc30ff81f531132de59e324b9b41c264702cbf7/curated_tokenizers-0.0.9-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:799b8a9a1603b7d12683017409bf338bff925aa9806fbad0925ac550501afdf8", size = 702897 },
|
||||
{ url = "https://files.pythonhosted.org/packages/54/f9/54b7f83a6fbb3d34e45aa1a095c743b174186b28d375714b87b48accaf89/curated_tokenizers-0.0.9-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cfc4541c3e5738d74dbf859eb87c26112178b7a91be1d99a4bdced8182f4a73", size = 706575 },
|
||||
{ url = "https://files.pythonhosted.org/packages/bc/b3/c1ef2c0587a926a2a4f2fec4ea8338e34068845decbfc64e5f554b5d01a0/curated_tokenizers-0.0.9-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a61acd1c66aea2198702b2a1418a6f3bf1241e3e302c1295a5878e892010642", size = 731650 },
|
||||
{ url = "https://files.pythonhosted.org/packages/e1/4e/108a03b27d7e3646a9f74c73efbf8d94feda16e22d49b35d198814f3e13a/curated_tokenizers-0.0.9-cp310-cp310-win_amd64.whl", hash = "sha256:00a9eff167481494f967ad0efc5c53164d460d4f40d816f6c132f69c8584a735", size = 730887 },
|
||||
{ url = "https://files.pythonhosted.org/packages/1f/f2/e4fa27a078ca6d7db87f82124695ce8822104285d4f8b3ec9900ab18c2df/curated_tokenizers-0.0.9-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:899128d78177ca0ac668addc33b430020f737dd08bc6bf3753ff4d9ba0e41e75", size = 733560 },
|
||||
{ url = "https://files.pythonhosted.org/packages/38/e3/88c6681df8319fef9670c99e8dafbc3e89403f199cf6d009a407856e9ebc/curated_tokenizers-0.0.9-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2d1fef3e861df50bd6337364a87f447fbd0a6f01c095cec121b7404d15512138", size = 703331 },
|
||||
{ url = "https://files.pythonhosted.org/packages/a9/c7/52452cf5f200f95711c15b474d6230fed40330621c0e423c4ce7e02af1fd/curated_tokenizers-0.0.9-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76ff13e8c19f7cdb03441ca5ec9ce85f133da7fd5b9cc574d8d18af41ba8a50a", size = 709477 },
|
||||
{ url = "https://files.pythonhosted.org/packages/31/69/70f6295dd03fed67afa8520b066026764d719fe123ddd108137ee4c9a732/curated_tokenizers-0.0.9-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4079b1cb2220cb76deb271fa55f4867be3764f15e8fdb1bfc0a2041081570224", size = 735551 },
|
||||
{ url = "https://files.pythonhosted.org/packages/75/ef/ea0e193b1775688263ac9261128b616cbc11cb052feb068b4974626d2715/curated_tokenizers-0.0.9-cp311-cp311-win_amd64.whl", hash = "sha256:c661d09ffe7a4a9175f28d76034e01c87df9df6fedb998151abbf201f28f1aa0", size = 730824 },
|
||||
{ url = "https://files.pythonhosted.org/packages/10/3e/c10474a21ed0166f94cebb46fe96cf07fdf7f399d84e6157ec4dfbd97b53/curated_tokenizers-0.0.9-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e66aedfeae0c91f3f3e2980b17933b3d08f3fba6c8ba7057b9b05d596e8a0b27", size = 734544 },
|
||||
{ url = "https://files.pythonhosted.org/packages/34/fb/d6e57b1155bee398f43de58ecdcdda44957e9635183312ac0820a19fc94d/curated_tokenizers-0.0.9-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2abbb571666a9c9b3a15f9df022e25ed1137e9fa8346788aaa747c00f940a3c6", size = 703466 },
|
||||
{ url = "https://files.pythonhosted.org/packages/1b/7c/2d24633275f2854c144652ee6ef97ae85d444855b6da5aa1203678541fa5/curated_tokenizers-0.0.9-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:64b9991a9720a0ce8cc72d29791fd73f2cc2bef0241b002fd2a756ec8a629143", size = 706194 },
|
||||
{ url = "https://files.pythonhosted.org/packages/21/24/12ae8f92d0e319ed07dd9c3ee5d24e71dd6ff3dd8d4dbe2126a6e5cbf7a1/curated_tokenizers-0.0.9-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:35fb208a01f2b3f22172596915d229859549a2d76e484be976dd728b1ca3bdec", size = 734029 },
|
||||
{ url = "https://files.pythonhosted.org/packages/0f/fc/776b7464029ea126bf55df2a5edd437178ad8e5c0126f953891dfa603f9c/curated_tokenizers-0.0.9-cp312-cp312-win_amd64.whl", hash = "sha256:209d756694c7fb000a0b642016eb6e71c740cfce293adcbf3384aa2a1e701eb2", size = 731507 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "curated-transformers"
|
||||
version = "0.1.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "torch", version = "2.5.1", source = { registry = "https://download.pytorch.org/whl/cpu" }, marker = "extra == 'extra-14-kokoro-fastapi-cpu'" },
|
||||
{ name = "torch", version = "2.5.1", source = { registry = "https://pypi.org/simple" }, marker = "(extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu') or (extra != 'extra-14-kokoro-fastapi-cpu' and extra != 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "torch", version = "2.5.1+cu121", source = { registry = "https://download.pytorch.org/whl/cu121" }, marker = "extra == 'extra-14-kokoro-fastapi-gpu'" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/70/06/6c12c149a7f737dacc76b4c3949dbc7ff87d622567b86996896ae4d104aa/curated-transformers-0.1.1.tar.gz", hash = "sha256:4671f03314df30efda2ec2b59bc7692ea34fcea44cb65382342c16684e8a2119", size = 16313 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/d6/67/3b72b3fdfcadab61bc8f59c17e63770e526ffabd583ed32f174a7c01af85/curated_transformers-0.1.1-py2.py3-none-any.whl", hash = "sha256:d716063d73d803c6925d2dab56fde9b9ab8e89e663c2c0587804944ba488ff01", size = 25972 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cycler"
|
||||
version = "0.12.1"
|
||||
|
@ -460,6 +566,42 @@ wheels = [
|
|||
{ url = "https://files.pythonhosted.org/packages/e7/05/c19819d5e3d95294a6f5947fb9b9629efb316b96de511b418c53d245aae6/cycler-0.12.1-py3-none-any.whl", hash = "sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30", size = 8321 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cymem"
|
||||
version = "2.0.11"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/f2/4a/1acd761fb6ac4c560e823ce40536a62f886f2d59b2763b5c3fc7e9d92101/cymem-2.0.11.tar.gz", hash = "sha256:efe49a349d4a518be6b6c6b255d4a80f740a341544bde1a807707c058b88d0bd", size = 10346 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/6d/55/f453f2b2f560e057f20eb2acdaafbf6488d72a6e8a36a4aef30f6053a51c/cymem-2.0.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1b4dd8f8c2475c7c9948eefa89c790d83134600858d8d43b90276efd8df3882e", size = 41886 },
|
||||
{ url = "https://files.pythonhosted.org/packages/a6/9d/03299eff35bd4fd80db33e4fd516661b82bb7b898cb677829acf22391ede/cymem-2.0.11-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d46ba0d2e0f749195297d16f2286b55af7d7c084db2b853fdfccece2c000c5dc", size = 41696 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d3/0c/90aa41f258a67ea210886c5c73f88dc9f120b7a20e6b5d92c5ce73a68276/cymem-2.0.11-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:739c4336b9d04ce9761851e9260ef77508d4a86ee3060e41302bfb6fa82c37de", size = 203719 },
|
||||
{ url = "https://files.pythonhosted.org/packages/52/d1/dc4a72aa2049c34a53a220290b1a59fadae61929dff3a6e1a830a22971fe/cymem-2.0.11-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a69c470c2fb118161f49761f9137384f46723c77078b659bba33858e19e46b49", size = 204763 },
|
||||
{ url = "https://files.pythonhosted.org/packages/69/51/86ed323585530558bcdda1324c570abe032db2c1d5afd1c5e8e3e8fde63a/cymem-2.0.11-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:40159f6c92627438de970fd761916e745d70dfd84a7dcc28c1627eb49cee00d8", size = 193964 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ed/0c/aee4ad2996a4e24342228ccf44d7835c7784042f0ee0c47ad33be1443f18/cymem-2.0.11-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f503f98e6aa333fffbe657a6854f13a9c3de68860795ae21171284213b9c5c09", size = 195002 },
|
||||
{ url = "https://files.pythonhosted.org/packages/eb/d5/eda823d639258d2ed1db83403c991a9a57d5a4ddea3bf08e59060809a9aa/cymem-2.0.11-cp310-cp310-win_amd64.whl", hash = "sha256:7f05ed5920cc92d6b958ec5da55bd820d326fe9332b90660e6fa67e3b476ceb1", size = 39079 },
|
||||
{ url = "https://files.pythonhosted.org/packages/03/e3/d98e3976f4ffa99cddebc1ce379d4d62e3eb1da22285267f902c99cc3395/cymem-2.0.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3ee54039aad3ef65de82d66c40516bf54586287b46d32c91ea0530c34e8a2745", size = 42005 },
|
||||
{ url = "https://files.pythonhosted.org/packages/41/b4/7546faf2ab63e59befc95972316d62276cec153f7d4d60e7b0d5e08f0602/cymem-2.0.11-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4c05ef75b5db217be820604e43a47ccbbafea98ab6659d07cea92fa3c864ea58", size = 41747 },
|
||||
{ url = "https://files.pythonhosted.org/packages/7d/4e/042f372e5b3eb7f5f3dd7677161771d301de2b6fa3f7c74e1cebcd502552/cymem-2.0.11-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a8d5381e5793ce531bac0dbc00829c8381f18605bb67e4b61d34f8850463da40", size = 217647 },
|
||||
{ url = "https://files.pythonhosted.org/packages/48/cb/2207679e4b92701f78cf141e1ab4f81f55247dbe154eb426b842a0a993de/cymem-2.0.11-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2b9d3f42d7249ac81802135cad51d707def058001a32f73fc7fbf3de7045ac7", size = 218857 },
|
||||
{ url = "https://files.pythonhosted.org/packages/31/7a/76ae3b7a39ab2531029d281e43fcfcaad728c2341b150a81a3a1f5587cf3/cymem-2.0.11-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:39b78f2195d20b75c2d465732f6b8e8721c5d4eb012777c2cb89bdb45a043185", size = 206148 },
|
||||
{ url = "https://files.pythonhosted.org/packages/25/f9/d0fc0191ac79f15638ddb59237aa76f234691374d7d7950e10f384bd8a25/cymem-2.0.11-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:2203bd6525a80d8fd0c94654a263af21c0387ae1d5062cceaebb652bf9bad7bc", size = 207112 },
|
||||
{ url = "https://files.pythonhosted.org/packages/56/c8/75f75889401b20f4c3a7c5965dda09df42913e904ddc2ffe7ef3bdf25061/cymem-2.0.11-cp311-cp311-win_amd64.whl", hash = "sha256:aa54af7314de400634448da1f935b61323da80a49484074688d344fb2036681b", size = 39360 },
|
||||
{ url = "https://files.pythonhosted.org/packages/71/67/0d74f7e9d79f934368a78fb1d1466b94bebdbff14f8ae94dd3e4ea8738bb/cymem-2.0.11-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a0fbe19ce653cd688842d81e5819dc63f911a26e192ef30b0b89f0ab2b192ff2", size = 42621 },
|
||||
{ url = "https://files.pythonhosted.org/packages/4a/d6/f7a19c63b48efc3f00a3ee8d69070ac90202e1e378f6cf81b8671f0cf762/cymem-2.0.11-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:de72101dc0e6326f6a2f73e05a438d1f3c6110d41044236d0fbe62925091267d", size = 42249 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d7/60/cdc434239813eef547fb99b6d0bafe31178501702df9b77c4108c9a216f6/cymem-2.0.11-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bee4395917f6588b8ac1699499128842768b391fe8896e8626950b4da5f9a406", size = 224758 },
|
||||
{ url = "https://files.pythonhosted.org/packages/1d/68/8fa6efae17cd3b2ba9a2f83b824867c5b65b06f7aec3f8a0d0cabdeffb9b/cymem-2.0.11-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5b02f2b17d760dc3fe5812737b1ce4f684641cdd751d67761d333a3b5ea97b83", size = 227995 },
|
||||
{ url = "https://files.pythonhosted.org/packages/e4/f3/ceda70bf6447880140602285b7c6fa171cb7c78b623d35345cc32505cd06/cymem-2.0.11-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:04ee6b4041ddec24512d6e969ed6445e57917f01e73b9dabbe17b7e6b27fef05", size = 215325 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d3/47/6915eaa521e1ce7a0ba480eecb6870cb4f681bcd64ced88c2f0ed7a744b4/cymem-2.0.11-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e1048dae7e627ee25f22c87bb670b13e06bc0aecc114b89b959a798d487d1bf4", size = 216447 },
|
||||
{ url = "https://files.pythonhosted.org/packages/7b/be/8e02bdd31e557f642741a06c8e886782ef78f0b00daffd681922dc9bbc88/cymem-2.0.11-cp312-cp312-win_amd64.whl", hash = "sha256:0c269c7a867d74adeb9db65fa1d226342aacf44d64b7931282f0b0eb22eb6275", size = 39283 },
|
||||
{ url = "https://files.pythonhosted.org/packages/bd/90/b064e2677e27a35cf3605146abc3285d4f599cc1b6c18fc445ae876dd1e3/cymem-2.0.11-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4a311c82f743275c84f708df89ac5bf60ddefe4713d532000c887931e22941f", size = 42389 },
|
||||
{ url = "https://files.pythonhosted.org/packages/fd/60/7aa0561a6c1f0d42643b02c4fdeb2a16181b0ff4e85d73d2d80c6689e92a/cymem-2.0.11-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:02ed92bead896cca36abad00502b14fa651bdf5d8319461126a2d5ac8c9674c5", size = 41948 },
|
||||
{ url = "https://files.pythonhosted.org/packages/5f/4e/88a29cc5575374982e527b4ebcab3781bdc826ce693c6418a0f836544246/cymem-2.0.11-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:44ddd3588379f8f376116384af99e3fb5f90091d90f520c341942618bf22f05e", size = 219382 },
|
||||
{ url = "https://files.pythonhosted.org/packages/9b/3a/8f96e167e93b7f7ec105ed7b25c77bbf215d15bcbf4a24082cdc12234cd6/cymem-2.0.11-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87ec985623624bbd298762d8163fc194a096cb13282731a017e09ff8a60bb8b1", size = 222974 },
|
||||
{ url = "https://files.pythonhosted.org/packages/6a/fc/ce016bb0c66a4776345fac7508fddec3b739b9dd4363094ac89cce048832/cymem-2.0.11-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e3385a47285435848e0ed66cfd29b35f3ed8703218e2b17bd7a0c053822f26bf", size = 213426 },
|
||||
{ url = "https://files.pythonhosted.org/packages/5c/c8/accf7cc768f751447a5050b14a195af46798bc22767ac25f49b02861b1eb/cymem-2.0.11-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5461e65340d6572eb64deadce79242a446a1d39cb7bf70fe7b7e007eb0d799b0", size = 219195 },
|
||||
{ url = "https://files.pythonhosted.org/packages/74/65/c162fbac63e867a055240b6600b92ef96c0eb7a1895312ac53c4be93d056/cymem-2.0.11-cp313-cp313-win_amd64.whl", hash = "sha256:25da111adf425c29af0cfd9fecfec1c71c8d82e2244a85166830a0817a66ada7", size = 39090 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dill"
|
||||
version = "0.3.9"
|
||||
|
@ -487,6 +629,12 @@ wheels = [
|
|||
{ url = "https://files.pythonhosted.org/packages/a7/f9/e014eb5740dfc6ebe6105f4c38890f361e5b0e1537a9f04bb4f34432efb9/dlinfo-1.2.1-py3-none-any.whl", hash = "sha256:a97d7cc66d997b4ac491f0e8068eb324790994834951a9beb5a4619835b361d9", size = 3559 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "docopt"
|
||||
version = "0.6.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/a2/55/8f8cab2afd404cf578136ef2cc5dfb50baa1761b68c9da1fb1e4eed343c9/docopt-0.6.2.tar.gz", hash = "sha256:49b3a825280bd66b3aa83585ef59c4a8c82f2c8a522dbe754a8bc8d08c85c491", size = 25901 }
|
||||
|
||||
[[package]]
|
||||
name = "ebooklib"
|
||||
version = "0.18"
|
||||
|
@ -998,9 +1146,28 @@ wheels = [
|
|||
{ url = "https://files.pythonhosted.org/packages/3a/1d/50ad811d1c5dae091e4cf046beba925bcae0a610e79ae4c538f996f63ed5/kiwisolver-1.4.8-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:65ea09a5a3faadd59c2ce96dc7bf0f364986a315949dc6374f04396b0d60e09b", size = 71762 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "kokoro"
|
||||
version = "0.3.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "huggingface-hub" },
|
||||
{ name = "misaki", extra = ["en"] },
|
||||
{ name = "numpy" },
|
||||
{ name = "scipy" },
|
||||
{ name = "torch", version = "2.5.1", source = { registry = "https://download.pytorch.org/whl/cpu" }, marker = "extra == 'extra-14-kokoro-fastapi-cpu'" },
|
||||
{ name = "torch", version = "2.5.1", source = { registry = "https://pypi.org/simple" }, marker = "(extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu') or (extra != 'extra-14-kokoro-fastapi-cpu' and extra != 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "torch", version = "2.5.1+cu121", source = { registry = "https://download.pytorch.org/whl/cu121" }, marker = "extra == 'extra-14-kokoro-fastapi-gpu'" },
|
||||
{ name = "transformers" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/86/97/e7e92f4da82b59395978e68ff703f4de4bfb0433e9e883ea57c27b4cbebf/kokoro-0.3.1.tar.gz", hash = "sha256:4f564805d971e8eb6ebc78755a0c54adea7a1f6497022d78e1d4b2ef72362499", size = 18940 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/39/9d/6da0fc8a8e0059ddac41e25b6b79c8a8f8b38511fe8ade78fbe5fa365306/kokoro-0.3.1-py3-none-any.whl", hash = "sha256:60a427dc79aef045a8ac12c8f5a6c5a9f149e7c817fee86ada369915a65d1e85", size = 21462 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "kokoro-fastapi"
|
||||
version = "0.1.0"
|
||||
version = "0.1.4"
|
||||
source = { editable = "." }
|
||||
dependencies = [
|
||||
{ name = "aiofiles" },
|
||||
|
@ -1008,6 +1175,7 @@ dependencies = [
|
|||
{ name = "ebooklib" },
|
||||
{ name = "fastapi" },
|
||||
{ name = "html2text" },
|
||||
{ name = "kokoro" },
|
||||
{ name = "loguru" },
|
||||
{ name = "matplotlib" },
|
||||
{ name = "munch" },
|
||||
|
@ -1059,6 +1227,7 @@ requires-dist = [
|
|||
{ name = "gradio", marker = "extra == 'test'", specifier = ">=5" },
|
||||
{ name = "html2text", specifier = ">=2024.2.26" },
|
||||
{ name = "httpx", marker = "extra == 'test'", specifier = "==0.26.0" },
|
||||
{ name = "kokoro", specifier = "==0.3.1" },
|
||||
{ name = "loguru", specifier = "==0.7.3" },
|
||||
{ name = "matplotlib", specifier = ">=3.10.0" },
|
||||
{ name = "munch", specifier = "==4.0.0" },
|
||||
|
@ -1091,6 +1260,30 @@ requires-dist = [
|
|||
{ name = "uvicorn", specifier = "==0.34.0" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "langcodes"
|
||||
version = "3.5.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "language-data" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/3a/7a/5a97e327063409a5caa21541e6d08ae4a0f2da328447e9f2c7b39e179226/langcodes-3.5.0.tar.gz", hash = "sha256:1eef8168d07e51e131a2497ffecad4b663f6208e7c3ae3b8dc15c51734a6f801", size = 191030 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/c3/6b/068c2ea7a712bf805c62445bd9e9c06d7340358ef2824150eceac027444b/langcodes-3.5.0-py3-none-any.whl", hash = "sha256:853c69d1a35e0e13da2f427bb68fb2fa4a8f4fb899e0c62ad8df8d073dcfed33", size = 182974 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "language-data"
|
||||
version = "1.3.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "marisa-trie" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/dd/ce/3f144716a9f2cbf42aa86ebc8b085a184be25c80aa453eea17c294d239c1/language_data-1.3.0.tar.gz", hash = "sha256:7600ef8aa39555145d06c89f0c324bf7dab834ea0b0a439d8243762e3ebad7ec", size = 5129310 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/5d/e9/5a5ffd9b286db82be70d677d0a91e4d58f7912bb8dd026ddeeb4abe70679/language_data-1.3.0-py3-none-any.whl", hash = "sha256:e2ee943551b5ae5f89cd0e801d1fc3835bb0ef5b7e9c3a4e8e17b2b214548fbf", size = 5385760 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "language-tags"
|
||||
version = "1.2.0"
|
||||
|
@ -1195,6 +1388,61 @@ wheels = [
|
|||
{ url = "https://files.pythonhosted.org/packages/ba/b2/6a22fb5c0885da3b00e116aee81f0b829ec9ac8f736cd414b4a09413fc7d/lxml-5.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:6e91cf736959057f7aac7adfc83481e03615a8e8dd5758aa1d95ea69e8931dba", size = 3487557 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "marisa-trie"
|
||||
version = "1.2.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "setuptools" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/31/15/9d9743897e4450b2de199ee673b50cb018980c4ced477d41cf91304a85e3/marisa_trie-1.2.1.tar.gz", hash = "sha256:3a27c408e2aefc03e0f1d25b2ff2afb85aac3568f6fa2ae2a53b57a2e87ce29d", size = 416124 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/e4/83/ccf5b33f2123f3110705c608f8e0caa82002626511aafafc58f82e50d322/marisa_trie-1.2.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a2eb41d2f9114d8b7bd66772c237111e00d2bae2260824560eaa0a1e291ce9e8", size = 362200 },
|
||||
{ url = "https://files.pythonhosted.org/packages/9d/74/f7ce1fc2ee480c7f8ceadd9b992caceaba442a97e5e99d6aea00d3635a0b/marisa_trie-1.2.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9e956e6a46f604b17d570901e66f5214fb6f658c21e5e7665deace236793cef6", size = 192309 },
|
||||
{ url = "https://files.pythonhosted.org/packages/e4/52/5dbbc13e57ce54c2ef0d04962d7d8f66edc69ed34310c734a2913199a581/marisa_trie-1.2.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:bd45142501300e7538b2e544905580918b67b1c82abed1275fe4c682c95635fa", size = 174713 },
|
||||
{ url = "https://files.pythonhosted.org/packages/57/49/2580372f3f980aea95c23d05b2c1d3bbb9ee1ab8cfd441545153e44f1be7/marisa_trie-1.2.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a8443d116c612cfd1961fbf76769faf0561a46d8e317315dd13f9d9639ad500c", size = 1314808 },
|
||||
{ url = "https://files.pythonhosted.org/packages/5a/ba/e12a4d450f265414cc68df6a116a78beece72b95f774f04d29cd48e08d19/marisa_trie-1.2.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:875a6248e60fbb48d947b574ffa4170f34981f9e579bde960d0f9a49ea393ecc", size = 1346678 },
|
||||
{ url = "https://files.pythonhosted.org/packages/b2/81/8e130cb1eea741fd17694d821096f7ec9841f0e3d3c69b740257f5eeafa8/marisa_trie-1.2.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:746a7c60a17fccd3cfcfd4326926f02ea4fcdfc25d513411a0c4fc8e4a1ca51f", size = 1307254 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d7/d0/3deb5ea2bf7e4d845339875dbb31f3c3f66c8d6568723db1d137fb08a91c/marisa_trie-1.2.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e70869737cc0e5bd903f620667da6c330d6737048d1f44db792a6af68a1d35be", size = 2194712 },
|
||||
{ url = "https://files.pythonhosted.org/packages/9c/5f/b38d728dd30954816497b53425cfaddaf7b93ac0912db5911888f191b07a/marisa_trie-1.2.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:06b099dd743676dbcd8abd8465ceac8f6d97d8bfaabe2c83b965495523b4cef2", size = 2355625 },
|
||||
{ url = "https://files.pythonhosted.org/packages/7e/4f/61c0faa9ae9e53600a1b7a0c367bc9db1a4fdc625402ec232c755a05e094/marisa_trie-1.2.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:d2a82eb21afdaf22b50d9b996472305c05ca67fc4ff5a026a220320c9c961db6", size = 2290290 },
|
||||
{ url = "https://files.pythonhosted.org/packages/7c/7d/713b970fb3043248881ed776dbf4d54918398aa5dde843a38711d0d62c8f/marisa_trie-1.2.1-cp310-cp310-win32.whl", hash = "sha256:8951e7ce5d3167fbd085703b4cbb3f47948ed66826bef9a2173c379508776cf5", size = 130743 },
|
||||
{ url = "https://files.pythonhosted.org/packages/cc/94/3d619cc82c30daeacd18a88674f4e6540ebfb7b4b7752ca0552793be80cf/marisa_trie-1.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:5685a14b3099b1422c4f59fa38b0bf4b5342ee6cc38ae57df9666a0b28eeaad3", size = 151891 },
|
||||
{ url = "https://files.pythonhosted.org/packages/4a/93/ffb01dfa22b6eee918e798e0bc3487427036c608aa4c065725f31aaf4104/marisa_trie-1.2.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ed3fb4ed7f2084597e862bcd56c56c5529e773729a426c083238682dba540e98", size = 362823 },
|
||||
{ url = "https://files.pythonhosted.org/packages/6d/1d/5c36500ac350c278c9bdfd88e17fa846fa4136d75597c167141ed973cdf2/marisa_trie-1.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0fe69fb9ffb2767746181f7b3b29bbd3454d1d24717b5958e030494f3d3cddf3", size = 192741 },
|
||||
{ url = "https://files.pythonhosted.org/packages/e8/04/87dd0840f3f720e511eba56193c02bf64d7d96df1ca9f6d19994f55154be/marisa_trie-1.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4728ed3ae372d1ea2cdbd5eaa27b8f20a10e415d1f9d153314831e67d963f281", size = 174995 },
|
||||
{ url = "https://files.pythonhosted.org/packages/c9/51/9e903a7e13b7593e2e675d0ec4c390ca076dc5df1c1a0d5e85a513b886a3/marisa_trie-1.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8cf4f25cf895692b232f49aa5397af6aba78bb679fb917a05fce8d3cb1ee446d", size = 1384728 },
|
||||
{ url = "https://files.pythonhosted.org/packages/e8/3f/7362a5ac60c2b0aad0f52cd57e7bd0c708f20d2660d8df85360f3d8f1c4b/marisa_trie-1.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7cca7f96236ffdbf49be4b2e42c132e3df05968ac424544034767650913524de", size = 1412620 },
|
||||
{ url = "https://files.pythonhosted.org/packages/1f/bc/aaa3eaf6875f78a204a8da9692d56e3a36f89997dad2c388628385614576/marisa_trie-1.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d7eb20bf0e8b55a58d2a9b518aabc4c18278787bdba476c551dd1c1ed109e509", size = 1361555 },
|
||||
{ url = "https://files.pythonhosted.org/packages/18/98/e11b5a6206c5d110f32adab37fa84a85410d684e9c731acdd5c9250e2ce4/marisa_trie-1.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:b1ec93f0d1ee6d7ab680a6d8ea1a08bf264636358e92692072170032dda652ba", size = 2257717 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d2/9d/6b4a40867875e738a67c5b29f83e2e490a66bd9067ace3dd9a5c497e2b7f/marisa_trie-1.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e2699255d7ac610dee26d4ae7bda5951d05c7d9123a22e1f7c6a6f1964e0a4e4", size = 2417044 },
|
||||
{ url = "https://files.pythonhosted.org/packages/fe/61/e25613c72f2931757334b8bcf6b501569ef713f5ee9c6c7688ec460bd720/marisa_trie-1.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c484410911182457a8a1a0249d0c09c01e2071b78a0a8538cd5f7fa45589b13a", size = 2351960 },
|
||||
{ url = "https://files.pythonhosted.org/packages/19/0a/a90ccaf3eb476d13ec261f80c6c52defaf10ebc7f35eb2bcd7dfb533aef7/marisa_trie-1.2.1-cp311-cp311-win32.whl", hash = "sha256:ad548117744b2bcf0e3d97374608be0a92d18c2af13d98b728d37cd06248e571", size = 130446 },
|
||||
{ url = "https://files.pythonhosted.org/packages/fc/98/574b4e143e0a2f5f71af8716b6c4a8a46220f75a6e0847ce7d11ee0ba4aa/marisa_trie-1.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:436f62d27714970b9cdd3b3c41bdad046f260e62ebb0daa38125ef70536fc73b", size = 152037 },
|
||||
{ url = "https://files.pythonhosted.org/packages/4e/bf/8bd4ac8436b33fd46c9e1ffe3c2a131cd9744cc1649dbbe13308f744ef2b/marisa_trie-1.2.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:638506eacf20ca503fff72221a7e66a6eadbf28d6a4a6f949fcf5b1701bb05ec", size = 360041 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ab/dd/4d3151e302e66ae387885f6ec265bd189e096b0c43c1379bfd9a3b9d2543/marisa_trie-1.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:de1665eaafefa48a308e4753786519888021740501a15461c77bdfd57638e6b4", size = 190520 },
|
||||
{ url = "https://files.pythonhosted.org/packages/00/28/ae5991c74fb90b173167a366a634c83445f948ad044d37287b478d6b457e/marisa_trie-1.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f713af9b8aa66a34cd3a78c7d150a560a75734713abe818a69021fd269e927fa", size = 174175 },
|
||||
{ url = "https://files.pythonhosted.org/packages/5a/6a/fbfa89a8680eaabc6847a6c421e65427c43182db0c4bdb60e1516c81c822/marisa_trie-1.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2a7d00f53f4945320b551bccb826b3fb26948bde1a10d50bb9802fabb611b10", size = 1354995 },
|
||||
{ url = "https://files.pythonhosted.org/packages/9e/4c/2ba0b385e5f64ca4ddb0c10ec52ddf881bc4521f135948786fc339d1d6c8/marisa_trie-1.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98042040d1d6085792e8d0f74004fc0f5f9ca6091c298f593dd81a22a4643854", size = 1390989 },
|
||||
{ url = "https://files.pythonhosted.org/packages/6b/22/0791ed3045c91d0938345a86be472fc7c188b894f16c5dfad2ef31e7f882/marisa_trie-1.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6532615111eec2c79e711965ece0bc95adac1ff547a7fff5ffca525463116deb", size = 1328810 },
|
||||
{ url = "https://files.pythonhosted.org/packages/9d/7d/3f566e563abae6efce7fc311c63282a447c611739b3cd66c0e36077c86f8/marisa_trie-1.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:20948e40ab2038e62b7000ca6b4a913bc16c91a2c2e6da501bd1f917eeb28d51", size = 2230222 },
|
||||
{ url = "https://files.pythonhosted.org/packages/a5/0b/38fbb4611b5d1030242ddc2aa62e524438c8076e26f87395dbbf222dc62d/marisa_trie-1.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:66b23e5b35dd547f85bf98db7c749bc0ffc57916ade2534a6bbc32db9a4abc44", size = 2383620 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ae/17/4553c63de29904d5d2521a24cad817bc7883cfa90506ab702ec4dae59a7b/marisa_trie-1.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6704adf0247d2dda42e876b793be40775dff46624309ad99bc7537098bee106d", size = 2329202 },
|
||||
{ url = "https://files.pythonhosted.org/packages/45/08/6307a630e63cd763fe77ac56516faa67fa9cd342060691e40fabc84be6b0/marisa_trie-1.2.1-cp312-cp312-win32.whl", hash = "sha256:3ad356442c2fea4c2a6f514738ddf213d23930f942299a2b2c05df464a00848a", size = 129652 },
|
||||
{ url = "https://files.pythonhosted.org/packages/a1/fe/67c357bfd92710d95a16b86e1453c663d565415d7f7838781c79ff7e1a7e/marisa_trie-1.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:f2806f75817392cedcacb24ac5d80b0350dde8d3861d67d045c1d9b109764114", size = 150845 },
|
||||
{ url = "https://files.pythonhosted.org/packages/2a/a4/a110cd9952f0e72da7bafea1f0084b18b9e03952110d9083bfda52279f5c/marisa_trie-1.2.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:b5ea16e69bfda0ac028c921b58de1a4aaf83d43934892977368579cd3c0a2554", size = 354439 },
|
||||
{ url = "https://files.pythonhosted.org/packages/3c/a5/a6099eb1c3fd8d7e93408c45501e1d08536ac57dfef02ec331f78e1ace18/marisa_trie-1.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9f627f4e41be710b6cb6ed54b0128b229ac9d50e2054d9cde3af0fef277c23cf", size = 188187 },
|
||||
{ url = "https://files.pythonhosted.org/packages/7c/cc/f637127e2beffa920d21f7fc45b4029575bcd1b28a90c0d90cb2b08c2205/marisa_trie-1.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5e649f3dc8ab5476732094f2828cc90cac3be7c79bc0c8318b6fda0c1d248db4", size = 171484 },
|
||||
{ url = "https://files.pythonhosted.org/packages/6d/0f/29f2ad7260b956570f69f25a542efa51ba76eb76ecd53c63ee9d21987c3d/marisa_trie-1.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:46e528ee71808c961baf8c3ce1c46a8337ec7a96cc55389d11baafe5b632f8e9", size = 1319770 },
|
||||
{ url = "https://files.pythonhosted.org/packages/f2/12/0b69ed61fba59551a5f3d569af367afae614db7214ce1da12946ba9a433a/marisa_trie-1.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36aa4401a1180615f74d575571a6550081d84fc6461e9aefc0bb7b2427af098e", size = 1356488 },
|
||||
{ url = "https://files.pythonhosted.org/packages/33/23/483b110db7ffe8729d6ebea2bf74258aef51f10fef5775f99e4bac7aef69/marisa_trie-1.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ce59bcd2cda9bb52b0e90cc7f36413cd86c3d0ce7224143447424aafb9f4aa48", size = 1302334 },
|
||||
{ url = "https://files.pythonhosted.org/packages/1c/6f/46c2be99ce925985127fdf78900f1673bce8cb72debfebee6dccd11032c6/marisa_trie-1.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f4cd800704a5fc57e53c39c3a6b0c9b1519ebdbcb644ede3ee67a06eb542697d", size = 2202624 },
|
||||
{ url = "https://files.pythonhosted.org/packages/fd/b6/ef642327dbd4ec35be55d5682520b8f70fca98a54024f441ef2732f6b305/marisa_trie-1.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2428b495003c189695fb91ceeb499f9fcced3a2dce853e17fa475519433c67ff", size = 2364206 },
|
||||
{ url = "https://files.pythonhosted.org/packages/69/04/ef8197a79d0ab5043b781cc9b457bd11b81d4204fe78adf7625a67f48c21/marisa_trie-1.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:735c363d9aaac82eaf516a28f7c6b95084c2e176d8231c87328dc80e112a9afa", size = 2304801 },
|
||||
{ url = "https://files.pythonhosted.org/packages/03/72/f87564d653daf31d8f33d9bf0121e99ccc21f18f5c485fb404ba06abc10e/marisa_trie-1.2.1-cp313-cp313-win32.whl", hash = "sha256:eba6ca45500ca1a042466a0684aacc9838e7f20fe2605521ee19f2853062798f", size = 128799 },
|
||||
{ url = "https://files.pythonhosted.org/packages/27/40/5f9eb8b73030cc4b0d6817176e66079a62a2ddd9d5530da54f8011473428/marisa_trie-1.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:aa7cd17e1c690ce96c538b2f4aae003d9a498e65067dd433c52dd069009951d4", size = 149035 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "markdown"
|
||||
version = "3.7"
|
||||
|
@ -1315,6 +1563,26 @@ wheels = [
|
|||
{ url = "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8", size = 9979 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "misaki"
|
||||
version = "0.6.3"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "regex" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/7a/b4/26933a0a95839c6cd1dc09097cf080924784053858262f96863ef1fe9171/misaki-0.6.3.tar.gz", hash = "sha256:f33903e959d4dbe5aee9e69748600cf31c0f0e98125d63a27ac8288d6860d641", size = 3330180 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/39/d1/a5f0739474c7dca5b6911c99bd78579255aa01b9cada28f5c36a68a85074/misaki-0.6.3-py3-none-any.whl", hash = "sha256:0b8fb39f1c7184a97ec82663f6ecfcc118c2e73b91b9c9d8cd26252510dd6d25", size = 3385188 },
|
||||
]
|
||||
|
||||
[package.optional-dependencies]
|
||||
en = [
|
||||
{ name = "num2words" },
|
||||
{ name = "phonemizer" },
|
||||
{ name = "spacy" },
|
||||
{ name = "spacy-curated-transformers" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mpire"
|
||||
version = "2.10.2"
|
||||
|
@ -1372,6 +1640,42 @@ wheels = [
|
|||
{ url = "https://files.pythonhosted.org/packages/56/b3/7c69b37f03260a061883bec0e7b05be7117c1b1c85f5212c72c8c2bc3c8c/munch-4.0.0-py2.py3-none-any.whl", hash = "sha256:71033c45db9fb677a0b7eb517a4ce70ae09258490e419b0e7f00d1e386ecb1b4", size = 9950 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "murmurhash"
|
||||
version = "1.0.12"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/64/d9/e7c6a7d4e9b5320c17e54af6f9edd2f521c6f86bbbb72aba571f641a9793/murmurhash-1.0.12.tar.gz", hash = "sha256:467b7ee31c1f79f46d00436a1957fc52a0e5801369dd2f30eb7655f380735b5f", size = 13233 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/74/4c/bc0a79c7b0ebec63256ac547e2cecbae73badcd26e874231ff901665e8fc/murmurhash-1.0.12-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a3f492bbf6f879b6eaf9da4be7471f4b68a3e3ae525aac0f35c2ae27ec91265c", size = 26857 },
|
||||
{ url = "https://files.pythonhosted.org/packages/2c/dc/824bd5cf239d6b6997f83dd94c4a99a48f5f2a6267174cf191ddb844f997/murmurhash-1.0.12-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3493e0c10a64fa72026af2ea2271d8b3511a438de3c6a771b7a57771611b9c08", size = 26974 },
|
||||
{ url = "https://files.pythonhosted.org/packages/51/b2/67f4e99f9b577187ec1376ff37478da87b88f2f8092c1f1351b18cb29fc6/murmurhash-1.0.12-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:95989ddbb187b9934e5b0e7f450793a445814b6c293a7bf92df56913c3a87c1e", size = 126364 },
|
||||
{ url = "https://files.pythonhosted.org/packages/4e/10/c7efbc91842ec6d519296129071cc55ba50d5e120e796dac536e4c299dc3/murmurhash-1.0.12-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2efef9f9aad98ec915a830f0c53d14ce6807ccc6e14fd2966565ef0b71cfa086", size = 124315 },
|
||||
{ url = "https://files.pythonhosted.org/packages/92/87/dc7dbca647909721006405b8f956628dfbd2fd4f7701f6dfcd5e40f29b4f/murmurhash-1.0.12-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b3147d171a5e5d2953b5eead21d15ea59b424844b4504a692c4b9629191148ed", size = 120355 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ff/36/cf13614b4bb62a956c62a3d8cd81fb4e0dd35e982dd7d7d028522ba4d9d9/murmurhash-1.0.12-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:736c869bef5023540dde52a9338085ac823eda3f09591ba1b4ed2c09c8b378db", size = 119979 },
|
||||
{ url = "https://files.pythonhosted.org/packages/05/b9/06bfba06b9fb4855db2cbbeb72eac7a879209c2b989b3de5d3383c49ca04/murmurhash-1.0.12-cp310-cp310-win_amd64.whl", hash = "sha256:b81feb5bfd13bce638ccf910c685b04ad0537635918d04c83b291ce0441776da", size = 25373 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d3/f4/0208624de330224f3a8981c030007fc4a3583ca6b4d4dd3275364c1d06e6/murmurhash-1.0.12-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8b236b76a256690e745b63b679892878ec4f01deeeda8d311482a9b183d2d452", size = 26793 },
|
||||
{ url = "https://files.pythonhosted.org/packages/2f/a4/a387486e79bcc04f3d3b123195fd4cca74a7ba439d6c45b35c5366c66586/murmurhash-1.0.12-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8bc3756dd657ed90c1354705e66513c11516929fe726e7bc91c79734d190f394", size = 26884 },
|
||||
{ url = "https://files.pythonhosted.org/packages/9f/38/ec45a33c519feb802cdf0fe9dd1b1e6c15897c43d29c738eaae61da8ae5d/murmurhash-1.0.12-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fd41e4c3d7936b69010d76e5edff363bf40fd918d86287a14e924363d7828522", size = 136101 },
|
||||
{ url = "https://files.pythonhosted.org/packages/0b/d5/6f1b561d8b14ef01d28d9cec278870bec01d8a569cfbc694e68ac05a5615/murmurhash-1.0.12-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36be2831df750163495e471d24aeef6aca1b2a3c4dfb05f40114859db47ff3f2", size = 134309 },
|
||||
{ url = "https://files.pythonhosted.org/packages/e8/78/2df6cdce439f6b8509d7947b8c47e7fe2589671899eb6399f4e2f602fe1f/murmurhash-1.0.12-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:b078c10f9c82cbd144b1200061fbfa7f99af9d5d8d7f7d8a324370169e3da7c2", size = 131134 },
|
||||
{ url = "https://files.pythonhosted.org/packages/43/0b/f0a5a622c505786d3d1dc1ad3e7f6b6fbfcae2665b205e07b3882185c39f/murmurhash-1.0.12-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:307ca8da5f038635ded9de722fe11f07f06a2b76442ae272dcccbff6086de487", size = 128630 },
|
||||
{ url = "https://files.pythonhosted.org/packages/de/30/ceb9217cdba72bc0bf8466e373e12e5a42945cc85eda0a7c479e319e07ae/murmurhash-1.0.12-cp311-cp311-win_amd64.whl", hash = "sha256:1b4ab5ba5ba909959659989f3bf57903f31f49906fe40f00aec81e32eea69a88", size = 25417 },
|
||||
{ url = "https://files.pythonhosted.org/packages/38/c7/0dc2914c24adb9466b69606dfdee7bbfed13476f4dda3753e0185cfbbe1f/murmurhash-1.0.12-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:1a4c97c8ffbedb62b760c3c2f77b5b8cb0e0ac0ec83a74d2f289e113e3e92ed5", size = 27120 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ae/d7/aea56101f225eb021cfd47245d55680605665b556aba95eecee937b4d4d6/murmurhash-1.0.12-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9574f0b634f059158bb89734a811e435ac9ad2335c02a7abb59f1875dcce244c", size = 27081 },
|
||||
{ url = "https://files.pythonhosted.org/packages/f4/68/4b723e0f318e92b0b4779f41ff5d9446e1dc0e68aca2f0043e1fab3fc1be/murmurhash-1.0.12-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:701cc0ce91809b4d7c2e0518be759635205e1e181325792044f5a8118019f716", size = 138552 },
|
||||
{ url = "https://files.pythonhosted.org/packages/13/40/eed53da76a428f404ec9db6d0983691c61d2744fea7070c6b31caca31ac4/murmurhash-1.0.12-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e1c9de2167a9d408d121ebc918bcb20b2718ec956f3aae0ded53d9bb224bb8e", size = 138589 },
|
||||
{ url = "https://files.pythonhosted.org/packages/12/e8/1b9164e62f75bf23d6af5262421985f45bce9bd5c4970a62b83ea7cb62df/murmurhash-1.0.12-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:94a52972835bdae8af18147c67c398ff3ea1d875f5b8dca1e1aa0fadb892f546", size = 129244 },
|
||||
{ url = "https://files.pythonhosted.org/packages/a7/20/c91f06d3692705bc7ec16c219143cc56062afd8080756d55e0678a7b704c/murmurhash-1.0.12-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:cc88004c8615dcabe31d21142689f719fdf549ba782850bef389cf227a1df575", size = 128944 },
|
||||
{ url = "https://files.pythonhosted.org/packages/b2/66/7d74a9f547dd719e86245e2a3d126140335861b8e362bdd22c7f9842a2b8/murmurhash-1.0.12-cp312-cp312-win_amd64.whl", hash = "sha256:8c5b8804c07a76f779e67f83aad37bc2189a0e65ebdd3f2b305242d489d31e03", size = 25554 },
|
||||
{ url = "https://files.pythonhosted.org/packages/12/77/bec7e3f00b0e23bfa027704d4023fea808d8cc0e593dd4247f4579de6776/murmurhash-1.0.12-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:63f10c6d6ef9ee85073dd896d2c4e0ab161bc6b8e7e9201c69f8061f9f1b6468", size = 26474 },
|
||||
{ url = "https://files.pythonhosted.org/packages/57/46/8dd3631cfb58435004678179a70352d3258b159c3f110e4f11fb23b1f776/murmurhash-1.0.12-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:66356f6308fd2a44a8ab056f020acd5bc22302f23ef5cce3705f2493e0fe9c3c", size = 26418 },
|
||||
{ url = "https://files.pythonhosted.org/packages/50/8f/ae8ee91c1b9ecdc4d849382af64b10e5d3c79ee7fcf8af13400d32092ae7/murmurhash-1.0.12-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bdb2104aa3471324724abf5a3a76fc94bcbeaf023bb6a6dd94da567b8633d8a6", size = 133342 },
|
||||
{ url = "https://files.pythonhosted.org/packages/cd/19/3ce034b0c068e8f88ea0ac28e196359aacf3c52718fbce1f7dbcf088261f/murmurhash-1.0.12-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a7ef5fb37e72536458ac4a6f486fb374c60ac4c4862d9195d3d4b58239a91de", size = 133062 },
|
||||
{ url = "https://files.pythonhosted.org/packages/a0/0c/f9a868eb39751b9dceb9e35d91e8cf321dad8ce9e3e965d067b0fff0547a/murmurhash-1.0.12-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:8bd5524de195991ce3551b14286ec0b730cc9dd2e10565dad2ae470eec082028", size = 124858 },
|
||||
{ url = "https://files.pythonhosted.org/packages/b6/fc/52bcb3afc95733f30ec76e07cae4b5d0081fa049ade418303faeee619766/murmurhash-1.0.12-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:19de30edaaa2217cd0c41b6cf6bbfa418be5d7fdf267ca92e5e3710d4daac593", size = 124686 },
|
||||
{ url = "https://files.pythonhosted.org/packages/cf/99/9cdea62dec8ea26a35a8231ee09c83b16f9dedf16280a7a15f41d2061706/murmurhash-1.0.12-cp313-cp313-win_amd64.whl", hash = "sha256:7dc4ebdfed7ef8ed70519962ac9b704e91978ee14e049f1ff37bca2f579ce84d", size = 24702 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mutagen"
|
||||
version = "1.47.0"
|
||||
|
@ -1390,6 +1694,18 @@ wheels = [
|
|||
{ url = "https://files.pythonhosted.org/packages/b9/54/dd730b32ea14ea797530a4479b2ed46a6fb250f682a9cfb997e968bf0261/networkx-3.4.2-py3-none-any.whl", hash = "sha256:df5d4365b724cf81b8c6a7312509d0c22386097011ad1abe274afd5e9d3bbc5f", size = 1723263 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num2words"
|
||||
version = "0.5.14"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "docopt" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/f6/58/ad645bd38b4b648eb2fc2ba1b909398e54eb0cbb6a7dbd2b4953e38c9621/num2words-0.5.14.tar.gz", hash = "sha256:b066ec18e56b6616a3b38086b5747daafbaa8868b226a36127e0451c0cf379c6", size = 218213 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/d6/5b/545e9267a1cc080c8a1be2746113a063e34bcdd0f5173fd665a5c13cb234/num2words-0.5.14-py3-none-any.whl", hash = "sha256:1c8e5b00142fc2966fd8d685001e36c4a9911e070d1b120e1beb721fa1edb33d", size = 163525 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "numpy"
|
||||
version = "1.26.4"
|
||||
|
@ -1584,7 +1900,7 @@ version = "9.1.0.70"
|
|||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "nvidia-cublas-cu12", version = "12.1.3.1", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-14-kokoro-fastapi-gpu'" },
|
||||
{ name = "nvidia-cublas-cu12", version = "12.4.5.8", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-14-kokoro-fastapi-cpu'" },
|
||||
{ name = "nvidia-cublas-cu12", version = "12.4.5.8", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-14-kokoro-fastapi-cpu' or extra != 'extra-14-kokoro-fastapi-gpu'" },
|
||||
]
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/9f/fd/713452cd72343f682b1c7b9321e23829f00b842ceaedcda96e742ea0b0b3/nvidia_cudnn_cu12-9.1.0.70-py3-none-manylinux2014_x86_64.whl", hash = "sha256:165764f44ef8c61fcdfdfdbe769d687e06374059fbb388b6c89ecb0e28793a6f", size = 664752741 },
|
||||
|
@ -1687,9 +2003,9 @@ resolution-markers = [
|
|||
"(python_full_version == '3.12.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
]
|
||||
dependencies = [
|
||||
{ name = "nvidia-cublas-cu12", version = "12.1.3.1", source = { registry = "https://pypi.org/simple" } },
|
||||
{ name = "nvidia-cusparse-cu12", version = "12.1.0.106", source = { registry = "https://pypi.org/simple" } },
|
||||
{ name = "nvidia-nvjitlink-cu12" },
|
||||
{ name = "nvidia-cublas-cu12", version = "12.1.3.1", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-14-kokoro-fastapi-gpu'" },
|
||||
{ name = "nvidia-cusparse-cu12", version = "12.1.0.106", source = { registry = "https://pypi.org/simple" }, marker = "extra == 'extra-14-kokoro-fastapi-gpu'" },
|
||||
{ name = "nvidia-nvjitlink-cu12", marker = "extra == 'extra-14-kokoro-fastapi-gpu'" },
|
||||
]
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/bc/1d/8de1e5c67099015c834315e333911273a8c6aaba78923dd1d1e25fc5f217/nvidia_cusolver_cu12-11.4.5.107-py3-none-manylinux1_x86_64.whl", hash = "sha256:8a7ec542f0412294b15072fa7dab71d31334014a69f953004ea7a118206fe0dd", size = 124161928 },
|
||||
|
@ -1736,7 +2052,7 @@ resolution-markers = [
|
|||
"(python_full_version == '3.12.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
]
|
||||
dependencies = [
|
||||
{ name = "nvidia-nvjitlink-cu12" },
|
||||
{ name = "nvidia-nvjitlink-cu12", marker = "extra == 'extra-14-kokoro-fastapi-gpu'" },
|
||||
]
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/65/5b/cfaeebf25cd9fdec14338ccb16f6b2c4c7fa9163aefcf057d86b9cc248bb/nvidia_cusparse_cu12-12.1.0.106-py3-none-manylinux1_x86_64.whl", hash = "sha256:f3b50f42cf363f86ab21f720998517a659a48131e8d538dc02f8768237bd884c", size = 195958278 },
|
||||
|
@ -2099,6 +2415,33 @@ wheels = [
|
|||
{ url = "https://files.pythonhosted.org/packages/88/5f/e351af9a41f866ac3f1fac4ca0613908d9a41741cfcf2228f4ad853b697d/pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669", size = 20556 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "preshed"
|
||||
version = "3.0.9"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "cymem" },
|
||||
{ name = "murmurhash" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/f2/4e/76dbf784e7d4ed069f91a4c249b1d6ec6856ef0c0b2fd96992895d458b15/preshed-3.0.9.tar.gz", hash = "sha256:721863c5244ffcd2651ad0928951a2c7c77b102f4e11a251ad85d37ee7621660", size = 14478 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/38/7f/a7d3eeaee67ecebbe51866c1aae6310e34cefa0a64821aed963a0a167b51/preshed-3.0.9-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4f96ef4caf9847b2bb9868574dcbe2496f974e41c2b83d6621c24fb4c3fc57e3", size = 132225 },
|
||||
{ url = "https://files.pythonhosted.org/packages/61/4e/f251271ee9f0e0eb0ebe219a8df57ff8511a3b7a83e79e24d37105034164/preshed-3.0.9-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a61302cf8bd30568631adcdaf9e6b21d40491bd89ba8ebf67324f98b6c2a2c05", size = 127791 },
|
||||
{ url = "https://files.pythonhosted.org/packages/eb/8b/6c8a153ea39b4750c20ed48dd9be4bf9d8c0b4e7822fc63c68cd2891703d/preshed-3.0.9-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99499e8a58f58949d3f591295a97bca4e197066049c96f5d34944dd21a497193", size = 150279 },
|
||||
{ url = "https://files.pythonhosted.org/packages/42/59/8f65ad22c13020ff281529e415c32a56cfa691d24b0eca2eb3d756e4d644/preshed-3.0.9-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea6b6566997dc3acd8c6ee11a89539ac85c77275b4dcefb2dc746d11053a5af8", size = 156914 },
|
||||
{ url = "https://files.pythonhosted.org/packages/f3/72/108426ca3b6e7f16db30b3b9396e3fa45a3fd5a76f6532ab04beada2e4e3/preshed-3.0.9-cp310-cp310-win_amd64.whl", hash = "sha256:bfd523085a84b1338ff18f61538e1cfcdedc4b9e76002589a301c364d19a2e36", size = 122224 },
|
||||
{ url = "https://files.pythonhosted.org/packages/c0/1e/05fa559f53b635d96b233b63e93accb75215025b997486f7290991bec6c3/preshed-3.0.9-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e7c2364da27f2875524ce1ca754dc071515a9ad26eb5def4c7e69129a13c9a59", size = 132972 },
|
||||
{ url = "https://files.pythonhosted.org/packages/a8/b3/1a73ba16bab53043fd19dd0a7838ae05c705dccb329404dd4ad5925767f1/preshed-3.0.9-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:182138033c0730c683a6d97e567ceb8a3e83f3bff5704f300d582238dbd384b3", size = 128751 },
|
||||
{ url = "https://files.pythonhosted.org/packages/2c/9a/919d3708f6fa98d9eab1a186e6b30ab25a4595907bbc1fea5c1e8faa9b9d/preshed-3.0.9-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:345a10be3b86bcc6c0591d343a6dc2bfd86aa6838c30ced4256dfcfa836c3a64", size = 150050 },
|
||||
{ url = "https://files.pythonhosted.org/packages/db/69/d9ab108dc670b5be9e292bbd555f39e6eb0a4baab25cd28f792850d5e65b/preshed-3.0.9-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:51d0192274aa061699b284f9fd08416065348edbafd64840c3889617ee1609de", size = 157159 },
|
||||
{ url = "https://files.pythonhosted.org/packages/e4/fc/78cdbdb79f5d6d45949e72c32445d6c060977ad50a1dcfc0392622165f7c/preshed-3.0.9-cp311-cp311-win_amd64.whl", hash = "sha256:96b857d7a62cbccc3845ac8c41fd23addf052821be4eb987f2eb0da3d8745aa1", size = 122323 },
|
||||
{ url = "https://files.pythonhosted.org/packages/fe/7e/a41595876f644d8bd2c3d5422d7211e876b1848a8cc0c03cce33d9cd048a/preshed-3.0.9-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b4fe6720012c62e6d550d6a5c1c7ad88cacef8388d186dad4bafea4140d9d198", size = 133196 },
|
||||
{ url = "https://files.pythonhosted.org/packages/e7/68/1b4772ff3232e71b63a9206936eb1f75e976ebf4e4e24dc9b3ea7b68369b/preshed-3.0.9-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:e04f05758875be9751e483bd3c519c22b00d3b07f5a64441ec328bb9e3c03700", size = 128594 },
|
||||
{ url = "https://files.pythonhosted.org/packages/f3/52/48eefe876a3841c5850bd955daf145d0e408567c8f46a997bce136dc259d/preshed-3.0.9-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a55091d0e395f1fdb62ab43401bb9f8b46c7d7794d5b071813c29dc1ab22fd0", size = 149220 },
|
||||
{ url = "https://files.pythonhosted.org/packages/55/ea/9e6c1a7b1d623f6340379290d603a3b8a71ce52a93f842fbf7547f7f1812/preshed-3.0.9-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7de8f5138bcac7870424e09684dc3dd33c8e30e81b269f6c9ede3d8c7bb8e257", size = 156809 },
|
||||
{ url = "https://files.pythonhosted.org/packages/db/e4/d074efb7e8a8873d346d2fb8dd43e19b1eae0697351c0d79cff947cba46e/preshed-3.0.9-cp312-cp312-win_amd64.whl", hash = "sha256:24229c77364628743bc29c5620c5d6607ed104f0e02ae31f8a030f99a78a5ceb", size = 122428 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "protobuf"
|
||||
version = "5.29.3"
|
||||
|
@ -2808,6 +3151,18 @@ wheels = [
|
|||
{ url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "smart-open"
|
||||
version = "7.1.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "wrapt" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/21/30/1f41c3d3b8cec82024b4b277bfd4e5b18b765ae7279eb9871fa25c503778/smart_open-7.1.0.tar.gz", hash = "sha256:a4f09f84f0f6d3637c6543aca7b5487438877a21360e7368ccf1f704789752ba", size = 72044 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/7a/18/9a8d9f01957aa1f8bbc5676d54c2e33102d247e146c1a3679d3bd5cc2e3a/smart_open-7.1.0-py3-none-any.whl", hash = "sha256:4b8489bb6058196258bafe901730c7db0dcf4f083f316e97269c66f45502055b", size = 61746 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sniffio"
|
||||
version = "1.3.1"
|
||||
|
@ -2836,6 +3191,83 @@ wheels = [
|
|||
{ url = "https://files.pythonhosted.org/packages/03/69/eda7a076709ada14a11604347ea3b80b3888101c93bbc739071d2f553df5/soundfile-0.13.0-py2.py3-none-win_amd64.whl", hash = "sha256:9fd67b1867fb7ce4a1bf1fd6600dfe9bf2af26b7ae3671719196c1d5632fa462", size = 1019114 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "spacy"
|
||||
version = "3.8.4"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "catalogue" },
|
||||
{ name = "cymem" },
|
||||
{ name = "jinja2" },
|
||||
{ name = "langcodes" },
|
||||
{ name = "murmurhash" },
|
||||
{ name = "numpy" },
|
||||
{ name = "packaging" },
|
||||
{ name = "preshed" },
|
||||
{ name = "pydantic" },
|
||||
{ name = "requests" },
|
||||
{ name = "setuptools" },
|
||||
{ name = "spacy-legacy" },
|
||||
{ name = "spacy-loggers" },
|
||||
{ name = "srsly" },
|
||||
{ name = "thinc" },
|
||||
{ name = "tqdm" },
|
||||
{ name = "typer" },
|
||||
{ name = "wasabi" },
|
||||
{ name = "weasel" },
|
||||
]
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/e8/51/c0862063e8338a2cc769e787f0448c92a87ac87abfe2987ecc84d8246f51/spacy-3.8.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bbe074f58ac21156dab821b5a145f72c2a50933098bc371ab5a63f47e8ac5fc6", size = 6611302 },
|
||||
{ url = "https://files.pythonhosted.org/packages/0f/8c/9e565966f016371bff7d020eac41248085c3ebbc7b50c565d45e0f8a439c/spacy-3.8.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d0474188edcdad2be647df04cfdc4351dea877caf41bd8c93b36e3b9124794c7", size = 6306323 },
|
||||
{ url = "https://files.pythonhosted.org/packages/0f/24/599a37ee61458471a1f812a4970c082f7d86089ff2ea8517ffeac0dba86d/spacy-3.8.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94c589ba39dbd035a7205c05f6efa2b60bdd1faae0f5fc9c0d657eb903b885ac", size = 29152257 },
|
||||
{ url = "https://files.pythonhosted.org/packages/5f/3d/9364f30a7e768270adf3480fe8e198f425e2c9033aa24aa12f845fdaad3c/spacy-3.8.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:17a758d53761b54c9500aca34cd27399730848dce2b74edf76a0893b5c711ed3", size = 29913582 },
|
||||
{ url = "https://files.pythonhosted.org/packages/1b/b9/31e2d2a3357fbb75aa9cb2dc5ff720077c4af6962034a9a3d8ac8550bc43/spacy-3.8.4-cp310-cp310-win_amd64.whl", hash = "sha256:e34003ab0c8e447ec40f74f03b95fa018e8c81ef2fc067a2e8398edd96a6ab0a", size = 12206276 },
|
||||
{ url = "https://files.pythonhosted.org/packages/4c/d9/439aed2e686b30aef877dea6181852810ae9dc2fcbc9003e9eedb543e463/spacy-3.8.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:276ef77ce197a1eb73f86e6b6f527886cfd8211f38688eb3c246e07e51f8ae22", size = 6574422 },
|
||||
{ url = "https://files.pythonhosted.org/packages/90/95/0823540c856b61193cb2d0c8feb863d5130e1571c79140317004ad786612/spacy-3.8.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1080908078a78299f8924e5df6c61e9f9db2f6930ed945e91b048e9af41d58a1", size = 6265045 },
|
||||
{ url = "https://files.pythonhosted.org/packages/f9/36/4f95922a22c32bd6fdda50ae5780c55b72d75ff76fd94cafa24950601330/spacy-3.8.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4540e4599df47e2d7525b8da1515d29da72db339ba8553b2f8d30842179806ea", size = 30570081 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d9/12/fc55ad34ef246f33951ecec0be86ba906830dbd443c763e90c6e19f51e1b/spacy-3.8.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e6b04dce1b52c9a890c651cc50a38349ffaf93f03740059653c124cbf2a3187d", size = 31112610 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ca/45/7b43e89b30fe73e32fd8b8ab80c407d326761530a88abd823ec8623772a6/spacy-3.8.4-cp311-cp311-win_amd64.whl", hash = "sha256:5837d60329d2da90008ccb24e65b03fb53d6729918ed6fda9d15868d36961c00", size = 12201808 },
|
||||
{ url = "https://files.pythonhosted.org/packages/bf/0b/a610dcf958c349b361bf36ca25208db4dd25b5d82a00670161fae56970c6/spacy-3.8.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:381e482cde64a4346cd5a33e3f30881def405ad97c609620b01f694ef77f4f68", size = 6293285 },
|
||||
{ url = "https://files.pythonhosted.org/packages/64/20/1db5a7384dba9e979b912dae57a8fb8ab986e3859da60ab1cb086c430dbe/spacy-3.8.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:e5ef210fef16bf95cbb2db381a53089e53aac240d79a401ec67a3f900f3ca538", size = 5978267 },
|
||||
{ url = "https://files.pythonhosted.org/packages/f2/cb/b565f6e04fb9b2d19c3de105dec659c6d98cba17bfd371e455c11c206040/spacy-3.8.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96e70f3089f62c661895433991d1818678aa23ecd92c78eac3190892b3840787", size = 31815281 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d0/e4/ede5b6741d1e180ab52efdcfede0abf81e69429c0ce5616c05a066f588da/spacy-3.8.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:10b63bd107230f214283c12b0e38c64978deb64f9fa4ed5538ec4951a019e629", size = 32106479 },
|
||||
{ url = "https://files.pythonhosted.org/packages/73/94/d9a904e86021fdfa2d403a55e277c58911979aa894114abbf300ee8c1bfd/spacy-3.8.4-cp312-cp312-win_amd64.whl", hash = "sha256:3eba8fb4388f3e21e9a3af14462422ea432a8aa37327886de7603654267a1b2b", size = 11787648 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "spacy-curated-transformers"
|
||||
version = "0.3.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "curated-tokenizers" },
|
||||
{ name = "curated-transformers" },
|
||||
{ name = "torch", version = "2.5.1", source = { registry = "https://download.pytorch.org/whl/cpu" }, marker = "extra == 'extra-14-kokoro-fastapi-cpu'" },
|
||||
{ name = "torch", version = "2.5.1", source = { registry = "https://pypi.org/simple" }, marker = "(extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu') or (extra != 'extra-14-kokoro-fastapi-cpu' and extra != 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "torch", version = "2.5.1+cu121", source = { registry = "https://download.pytorch.org/whl/cu121" }, marker = "extra == 'extra-14-kokoro-fastapi-gpu'" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/bc/60/2e4cbad1fe1726777d7b270a056cbca8d5d20efa01db88fb1064675767c1/spacy_curated_transformers-0.3.0.tar.gz", hash = "sha256:989a6bf2aa7becd1ac8c3be5f245cd489223d4e16e7218f6b69479c7e2689937", size = 218192 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/31/b5/023c6565ee40ec668181f50cea518e989bd773026362022a241619223ea5/spacy_curated_transformers-0.3.0-py2.py3-none-any.whl", hash = "sha256:ddfd33e81b53ad798dac841ab022189f9543718ff874eda1081fce6ff93de377", size = 236322 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "spacy-legacy"
|
||||
version = "3.0.12"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/d9/79/91f9d7cc8db5642acad830dcc4b49ba65a7790152832c4eceb305e46d681/spacy-legacy-3.0.12.tar.gz", hash = "sha256:b37d6e0c9b6e1d7ca1cf5bc7152ab64a4c4671f59c85adaf7a3fcb870357a774", size = 23806 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/c3/55/12e842c70ff8828e34e543a2c7176dac4da006ca6901c9e8b43efab8bc6b/spacy_legacy-3.0.12-py2.py3-none-any.whl", hash = "sha256:476e3bd0d05f8c339ed60f40986c07387c0a71479245d6d0f4298dbd52cda55f", size = 29971 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "spacy-loggers"
|
||||
version = "1.0.5"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/67/3d/926db774c9c98acf66cb4ed7faf6c377746f3e00b84b700d0868b95d0712/spacy-loggers-1.0.5.tar.gz", hash = "sha256:d60b0bdbf915a60e516cc2e653baeff946f0cfc461b452d11a4d5458c6fe5f24", size = 20811 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/33/78/d1a1a026ef3af911159398c939b1509d5c36fe524c7b644f34a5146c4e16/spacy_loggers-1.0.5-py3-none-any.whl", hash = "sha256:196284c9c446cc0cdb944005384270d775fdeaf4f494d8e269466cfa497ef645", size = 22343 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sqlalchemy"
|
||||
version = "2.0.27"
|
||||
|
@ -2873,6 +3305,45 @@ wheels = [
|
|||
{ url = "https://files.pythonhosted.org/packages/c0/f4/891e34108412875c77eb48771a8f8e72e6655363dd0d9b9c87c82eaa4870/SQLAlchemy-2.0.27-py3-none-any.whl", hash = "sha256:1ab4e0448018d01b142c916cc7119ca573803a4745cfe341b8f95657812700ac", size = 1867012 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "srsly"
|
||||
version = "2.5.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "catalogue" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/b7/e8/eb51b1349f50bac0222398af0942613fdc9d1453ae67cbe4bf9936a1a54b/srsly-2.5.1.tar.gz", hash = "sha256:ab1b4bf6cf3e29da23dae0493dd1517fb787075206512351421b89b4fc27c77e", size = 466464 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/37/08/448bcc87bb93bc19fccf70c2f0f993ac42aa41d5f44a19c60d00186aea09/srsly-2.5.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d0cda6f65cc0dd1daf47e856b0d6c5d51db8a9343c5007723ca06903dcfe367d", size = 636045 },
|
||||
{ url = "https://files.pythonhosted.org/packages/03/8a/379dd9014e56460e71346cf512632fb8cbc89aa6dfebe31dff21c9eb37ba/srsly-2.5.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cf643e6f45c266cfacea54997a1f9cfe0113fadac1ac21a1ec5b200cfe477ba0", size = 634425 },
|
||||
{ url = "https://files.pythonhosted.org/packages/95/69/46e672941b5f4403b0e2b14918d8e1393ca48e3338e2c01e549113261cdf/srsly-2.5.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:467ed25ddab09ca9404fda92519a317c803b5ea0849f846e74ba8b7843557df5", size = 1085032 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ce/d8/1039e663b87a06d2450148ebadc07eaf6f8b7dd7f7d5e2f4221050ce6702/srsly-2.5.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f8113d202664b7d31025bdbe40b9d3536e8d7154d09520b6a1955818fa6d622", size = 1089469 },
|
||||
{ url = "https://files.pythonhosted.org/packages/e9/62/f819ac665ecca2659343a6c79174c582fe292829f481899f05e7a7301988/srsly-2.5.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:794d39fccd2b333d24f1b445acc78daf90f3f37d3c0f6f0167f25c56961804e7", size = 1052673 },
|
||||
{ url = "https://files.pythonhosted.org/packages/a8/69/321a41fe4d549b96dd010b6a77657e84eb181034f9d125e2feebcd8f2e5c/srsly-2.5.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:df7fd77457c4d6c630f700b1019a8ad173e411e7cf7cfdea70e5ed86b608083b", size = 1062650 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d5/b8/3dfed2db5c7ecf275aaddb775e2ae17c576b09c848873188fce91e410129/srsly-2.5.1-cp310-cp310-win_amd64.whl", hash = "sha256:1a4dddb2edb8f7974c9aa5ec46dc687a75215b3bbdc815ce3fc9ea68fe1e94b5", size = 632267 },
|
||||
{ url = "https://files.pythonhosted.org/packages/df/9c/a248bb49de499fe0990e3cb0fb341c2373d8863ef9a8b5799353cade5731/srsly-2.5.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:58f0736794ce00a71d62a39cbba1d62ea8d5be4751df956e802d147da20ecad7", size = 635917 },
|
||||
{ url = "https://files.pythonhosted.org/packages/41/47/1bdaad84502df973ecb8ca658117234cf7fb20e1dec60da71dce82de993f/srsly-2.5.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7a8269c40859806d71920396d185f4f38dc985cdb6a28d3a326a701e29a5f629", size = 634374 },
|
||||
{ url = "https://files.pythonhosted.org/packages/e5/2a/d73c71989fcf2a6d1fa518d75322aff4db01a8763f167f8c5e00aac11097/srsly-2.5.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:889905900401fefc1032e22b73aecbed8b4251aa363f632b2d1f86fc16f1ad8e", size = 1108390 },
|
||||
{ url = "https://files.pythonhosted.org/packages/35/a3/9eda9997a8bd011caed18fdaa5ce606714eb06d8dab587ed0522b3e92ab1/srsly-2.5.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf454755f22589df49c25dc799d8af7b47dce3d861dded35baf0f0b6ceab4422", size = 1110712 },
|
||||
{ url = "https://files.pythonhosted.org/packages/8a/ef/4b50bc05d06349f905b27f824cc23b652098efd4be19aead3af4981df647/srsly-2.5.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:cc0607c8a59013a51dde5c1b4e465558728e9e0a35dcfa73c7cbefa91a0aad50", size = 1081244 },
|
||||
{ url = "https://files.pythonhosted.org/packages/90/af/d4a2512d9a5048d2b18efead39d4c4404bddd4972935bbc68211292a736c/srsly-2.5.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d5421ba3ab3c790e8b41939c51a1d0f44326bfc052d7a0508860fb79a47aee7f", size = 1091692 },
|
||||
{ url = "https://files.pythonhosted.org/packages/bb/da/657a685f63028dcb00ccdc4ac125ed347c8bff6fa0dab6a9eb3dc45f3223/srsly-2.5.1-cp311-cp311-win_amd64.whl", hash = "sha256:b96ea5a9a0d0379a79c46d255464a372fb14c30f59a8bc113e4316d131a530ab", size = 632627 },
|
||||
{ url = "https://files.pythonhosted.org/packages/fb/f6/bebc20d75bd02121fc0f65ad8c92a5dd2570e870005e940faa55a263e61a/srsly-2.5.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:683b54ed63d7dfee03bc2abc4b4a5f2152f81ec217bbadbac01ef1aaf2a75790", size = 636717 },
|
||||
{ url = "https://files.pythonhosted.org/packages/b6/e8/9372317a4742c70b87b413335adfcdfb2bee4f88f3faba89fabb9e6abf21/srsly-2.5.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:459d987130e57e83ce9e160899afbeb871d975f811e6958158763dd9a8a20f23", size = 634697 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d5/00/c6a7b99ab27b051a27bd26fe1a8c1885225bb8980282bf9cb99f70610368/srsly-2.5.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:184e3c98389aab68ff04aab9095bd5f1a8e5a72cc5edcba9d733bac928f5cf9f", size = 1134655 },
|
||||
{ url = "https://files.pythonhosted.org/packages/c2/e6/861459e8241ec3b78c111081bd5efa414ef85867e17c45b6882954468d6e/srsly-2.5.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:00c2a3e4856e63b7efd47591d049aaee8e5a250e098917f50d93ea68853fab78", size = 1143544 },
|
||||
{ url = "https://files.pythonhosted.org/packages/2d/85/8448fe874dd2042a4eceea5315cfff3af03ac77ff5073812071852c4e7e2/srsly-2.5.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:366b4708933cd8d6025c13c2cea3331f079c7bb5c25ec76fca392b6fc09818a0", size = 1098330 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ef/7e/04d0e1417da140b2ac4053a3d4fcfc86cd59bf4829f69d370bb899f74d5d/srsly-2.5.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c8a0b03c64eb6e150d772c5149befbadd981cc734ab13184b0561c17c8cef9b1", size = 1110670 },
|
||||
{ url = "https://files.pythonhosted.org/packages/96/1a/a8cd627eaa81a91feb6ceab50155f4ceff3eef6107916cb87ef796958427/srsly-2.5.1-cp312-cp312-win_amd64.whl", hash = "sha256:7952538f6bba91b9d8bf31a642ac9e8b9ccc0ccbb309feb88518bfb84bb0dc0d", size = 632598 },
|
||||
{ url = "https://files.pythonhosted.org/packages/42/94/cab36845aad6e2c22ecee1178accaa365657296ff87305b805648fd41118/srsly-2.5.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:84b372f7ef1604b4a5b3cee1571993931f845a5b58652ac01bcb32c52586d2a8", size = 634883 },
|
||||
{ url = "https://files.pythonhosted.org/packages/67/8b/501f51f4eaee7e1fd7327764799cb0a42f5d0de042a97916d30dbff770fc/srsly-2.5.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6ac3944c112acb3347a39bfdc2ebfc9e2d4bace20fe1c0b764374ac5b83519f2", size = 632842 },
|
||||
{ url = "https://files.pythonhosted.org/packages/07/be/5b8fce4829661e070a7d3e262d2e533f0e297b11b8993d57240da67d7330/srsly-2.5.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6118f9c4b221cde0a990d06a42c8a4845218d55b425d8550746fe790acf267e9", size = 1118516 },
|
||||
{ url = "https://files.pythonhosted.org/packages/91/60/a34e97564eac352c0e916c98f44b6f566b7eb6a9fb60bcd60ffa98530762/srsly-2.5.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7481460110d9986781d9e4ac0f5f991f1d6839284a80ad268625f9a23f686950", size = 1127974 },
|
||||
{ url = "https://files.pythonhosted.org/packages/70/a2/f642334db0cabd187fa86b8773257ee6993c6009338a6831d4804e2c5b3c/srsly-2.5.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6e57b8138082f09e35db60f99757e16652489e9e3692471d8e0c39aa95180688", size = 1086098 },
|
||||
{ url = "https://files.pythonhosted.org/packages/0d/9b/be48e185c5a010e71b5135e4cdf317ff56b8ac4bc08f394bbf882ac13b05/srsly-2.5.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:bab90b85a63a1fe0bbc74d373c8bb9bb0499ddfa89075e0ebe8d670f12d04691", size = 1100354 },
|
||||
{ url = "https://files.pythonhosted.org/packages/3a/e2/745aeba88a8513017fbac2fd2f9f07b8a36065e51695f818541eb795ec0c/srsly-2.5.1-cp313-cp313-win_amd64.whl", hash = "sha256:e73712be1634b5e1de6f81c273a7d47fe091ad3c79dc779c03d3416a5c117cee", size = 630634 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "starlette"
|
||||
version = "0.41.3"
|
||||
|
@ -2906,6 +3377,43 @@ wheels = [
|
|||
{ url = "https://files.pythonhosted.org/packages/40/44/4a5f08c96eb108af5cb50b41f76142f0afa346dfa99d5296fe7202a11854/tabulate-0.9.0-py3-none-any.whl", hash = "sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f", size = 35252 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "thinc"
|
||||
version = "8.3.4"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "blis" },
|
||||
{ name = "catalogue" },
|
||||
{ name = "confection" },
|
||||
{ name = "cymem" },
|
||||
{ name = "murmurhash" },
|
||||
{ name = "numpy" },
|
||||
{ name = "packaging" },
|
||||
{ name = "preshed" },
|
||||
{ name = "pydantic" },
|
||||
{ name = "setuptools" },
|
||||
{ name = "srsly" },
|
||||
{ name = "wasabi" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/b5/ff/60c9bcfe28e56c905aac8e61a838c7afe5dc3073c9beed0b63a26ace0bb7/thinc-8.3.4.tar.gz", hash = "sha256:b5925482498bbb6dca0771e375b35c915818f735891e93d93a662dab15f6ffd8", size = 193903 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/f9/c8/13db2e346d2e199f679fc3f620da53af561ea74b43b38e5b4a0a79a12860/thinc-8.3.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:916ea79a7c7462664be9435679b7769b4fc1ecea3886db6da6118e4eb5cc8c8b", size = 843884 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ff/32/c25d68b5030f91c8506dfbba706f24b1cd1d0d4950cb0e3de17d176a5411/thinc-8.3.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6c985ce9cf82a611f4f348c721372d073537ca0e8b7bbb8bd865c1598ddd79d1", size = 779384 },
|
||||
{ url = "https://files.pythonhosted.org/packages/5d/5f/8a88959191f8c9f7eed61a7efec45f0222720c6318c09f9a058609810128/thinc-8.3.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fff4b30f8513832d13a31486e9074a7020de3d48f8a3d1527e369c242d6ebe9", size = 3673814 },
|
||||
{ url = "https://files.pythonhosted.org/packages/6f/4f/ea998b85cece6c2441a2416c795476776a5c11f7f2c7fb478a00d407d7f6/thinc-8.3.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:a9ee46d19b9f4cac13a5539f97978c857338a31e4bf8d9b3a7741dcbc792220f", size = 4685083 },
|
||||
{ url = "https://files.pythonhosted.org/packages/0b/d0/295add6fcac8b633877a3a8d4b323e8cac4f4078f4f48910deb8c29666cb/thinc-8.3.4-cp310-cp310-win_amd64.whl", hash = "sha256:d08529d53f8652e15e4f3c0f6953e73f85cc71d3b6e4750d2d9ace23616dbe8f", size = 1492082 },
|
||||
{ url = "https://files.pythonhosted.org/packages/85/47/68187c78a04cdc31cbd3ae393068f994b60476b5ecac6dfe7d04b124aacf/thinc-8.3.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a8bb4b47358a1855803b375f4432cefdf373f46ef249b554418d2e77c7323040", size = 839320 },
|
||||
{ url = "https://files.pythonhosted.org/packages/49/ea/066dd415e61fcef20083bbca41c2c02e640fea71326531f2619708efee1e/thinc-8.3.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:00ed92f9a34b9794f51fcd48467c863f4eb7c5b41559aef6ef3c980c21378fec", size = 774196 },
|
||||
{ url = "https://files.pythonhosted.org/packages/8c/68/36c1a92a374891e0d496677c59f5f9fdc1e57bbb214c487bb8bb3e9290c2/thinc-8.3.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85691fca84a6a1506f7ddbd2c1706a5524d56f65582e76b2e260a06d9e83e86d", size = 3922504 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ec/8a/48e463240a586e91f83c87660986e520aa91fbd839f6631ee9bc0fbb3cbd/thinc-8.3.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:eae1573fc19e514defc1bfd4f93f0b4bfc1dcefdb6d70bad1863825747f24800", size = 4932946 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d9/98/f910b8d8113ab9b955a68e9bbf0d5bd0e828f22dd6d3c226af6ec3970817/thinc-8.3.4-cp311-cp311-win_amd64.whl", hash = "sha256:81e8638f9bdc38e366674acc4b63cf7c6267266a15477963a5db21b3d9f1aa36", size = 1490133 },
|
||||
{ url = "https://files.pythonhosted.org/packages/90/ff/d1b5d7e1a7f95581e9a736f50a5a9aff72327ddbbc629a68070c36acefd9/thinc-8.3.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c9da6375b106df5186bd2bfd1273bc923c01ab7d482f8942e4ee528a28965c3a", size = 825099 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ce/0b/d207c917886dc40671361de0880ec3ea0443a718aae9dbb0a50ac0849f92/thinc-8.3.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:07091c6b5faace50857c4cf0982204969d77388d0a6f156dd2442297dceeb838", size = 761024 },
|
||||
{ url = "https://files.pythonhosted.org/packages/4b/a3/3ec5e9d7cbebc3257b8223a3d188216b91ab6ec1e66b6fdd99d22394bc62/thinc-8.3.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd40ad71bcd8b1b9daa0462e1255b1c1e86e901c2fd773966601f44a95878032", size = 3710390 },
|
||||
{ url = "https://files.pythonhosted.org/packages/40/ee/955c74e4e6ff2f694c99dcbbf7be8d478a8868503aeb3474517277c07667/thinc-8.3.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:eb10823b3a3f1c6440998b11bf9a3571dd859feaed0fdb510a1c1097d9dc6a86", size = 4731524 },
|
||||
{ url = "https://files.pythonhosted.org/packages/a4/44/3786431e5c1eeebed3d7a4c97122896ca6d4a502b03d02c2171c417052fd/thinc-8.3.4-cp312-cp312-win_amd64.whl", hash = "sha256:b5e5e7bf5dae142fd50ed9785971292c4aab4d9ed18e4947653b6a0584d5227c", size = 1455883 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tiktoken"
|
||||
version = "0.8.0"
|
||||
|
@ -3034,25 +3542,25 @@ resolution-markers = [
|
|||
"(python_full_version == '3.12.*' and platform_machine == 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform == 'darwin')",
|
||||
]
|
||||
dependencies = [
|
||||
{ name = "filelock" },
|
||||
{ name = "fsspec" },
|
||||
{ name = "jinja2" },
|
||||
{ name = "networkx" },
|
||||
{ name = "nvidia-cublas-cu12", version = "12.4.5.8", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-cuda-cupti-cu12", version = "12.4.127", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-cuda-nvrtc-cu12", version = "12.4.127", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-cuda-runtime-cu12", version = "12.4.127", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-cudnn-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-cufft-cu12", version = "11.2.1.3", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-curand-cu12", version = "10.3.5.147", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-cusolver-cu12", version = "11.6.1.9", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-cusparse-cu12", version = "12.3.1.170", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-nccl-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-nvjitlink-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-nvtx-cu12", version = "12.4.127", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "setuptools", marker = "python_full_version >= '3.12'" },
|
||||
{ name = "sympy" },
|
||||
{ name = "typing-extensions" },
|
||||
{ name = "filelock", marker = "extra == 'extra-14-kokoro-fastapi-cpu'" },
|
||||
{ name = "fsspec", marker = "extra == 'extra-14-kokoro-fastapi-cpu'" },
|
||||
{ name = "jinja2", marker = "extra == 'extra-14-kokoro-fastapi-cpu'" },
|
||||
{ name = "networkx", marker = "extra == 'extra-14-kokoro-fastapi-cpu'" },
|
||||
{ name = "nvidia-cublas-cu12", version = "12.4.5.8", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine == 'x86_64' and sys_platform == 'linux' and extra == 'extra-14-kokoro-fastapi-cpu') or (platform_machine != 'x86_64' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu') or (sys_platform != 'linux' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "nvidia-cuda-cupti-cu12", version = "12.4.127", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine == 'x86_64' and sys_platform == 'linux' and extra == 'extra-14-kokoro-fastapi-cpu') or (platform_machine != 'x86_64' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu') or (sys_platform != 'linux' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "nvidia-cuda-nvrtc-cu12", version = "12.4.127", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine == 'x86_64' and sys_platform == 'linux' and extra == 'extra-14-kokoro-fastapi-cpu') or (platform_machine != 'x86_64' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu') or (sys_platform != 'linux' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "nvidia-cuda-runtime-cu12", version = "12.4.127", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine == 'x86_64' and sys_platform == 'linux' and extra == 'extra-14-kokoro-fastapi-cpu') or (platform_machine != 'x86_64' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu') or (sys_platform != 'linux' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "nvidia-cudnn-cu12", marker = "(platform_machine == 'x86_64' and sys_platform == 'linux' and extra == 'extra-14-kokoro-fastapi-cpu') or (platform_machine != 'x86_64' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu') or (sys_platform != 'linux' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "nvidia-cufft-cu12", version = "11.2.1.3", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine == 'x86_64' and sys_platform == 'linux' and extra == 'extra-14-kokoro-fastapi-cpu') or (platform_machine != 'x86_64' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu') or (sys_platform != 'linux' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "nvidia-curand-cu12", version = "10.3.5.147", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine == 'x86_64' and sys_platform == 'linux' and extra == 'extra-14-kokoro-fastapi-cpu') or (platform_machine != 'x86_64' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu') or (sys_platform != 'linux' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "nvidia-cusolver-cu12", version = "11.6.1.9", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine == 'x86_64' and sys_platform == 'linux' and extra == 'extra-14-kokoro-fastapi-cpu') or (platform_machine != 'x86_64' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu') or (sys_platform != 'linux' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "nvidia-cusparse-cu12", version = "12.3.1.170", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine == 'x86_64' and sys_platform == 'linux' and extra == 'extra-14-kokoro-fastapi-cpu') or (platform_machine != 'x86_64' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu') or (sys_platform != 'linux' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "nvidia-nccl-cu12", marker = "(platform_machine == 'x86_64' and sys_platform == 'linux' and extra == 'extra-14-kokoro-fastapi-cpu') or (platform_machine != 'x86_64' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu') or (sys_platform != 'linux' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "nvidia-nvjitlink-cu12", marker = "(platform_machine == 'x86_64' and sys_platform == 'linux' and extra == 'extra-14-kokoro-fastapi-cpu') or (platform_machine != 'x86_64' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu') or (sys_platform != 'linux' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "nvidia-nvtx-cu12", version = "12.4.127", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine == 'x86_64' and sys_platform == 'linux' and extra == 'extra-14-kokoro-fastapi-cpu') or (platform_machine != 'x86_64' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu') or (sys_platform != 'linux' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "setuptools", marker = "(python_full_version >= '3.12' and extra == 'extra-14-kokoro-fastapi-cpu') or (extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "sympy", marker = "extra == 'extra-14-kokoro-fastapi-cpu'" },
|
||||
{ name = "typing-extensions", marker = "extra == 'extra-14-kokoro-fastapi-cpu'" },
|
||||
]
|
||||
wheels = [
|
||||
{ url = "https://download.pytorch.org/whl/cpu/torch-2.5.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:269b10c34430aa8e9643dbe035dc525c4a9b1d671cd3dbc8ecbcaed280ae322d" },
|
||||
|
@ -3063,6 +3571,61 @@ wheels = [
|
|||
{ url = "https://download.pytorch.org/whl/cpu/torch-2.5.1-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:8c712df61101964eb11910a846514011f0b6f5920c55dbf567bff8a34163d5b1" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "torch"
|
||||
version = "2.5.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
resolution-markers = [
|
||||
"python_full_version == '3.11.*'",
|
||||
"(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"(python_full_version == '3.11.*' and platform_machine == 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.11.*' and sys_platform == 'darwin')",
|
||||
"python_full_version < '3.11'",
|
||||
"(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"(python_full_version < '3.11' and platform_machine == 'aarch64' and sys_platform == 'linux') or (python_full_version < '3.11' and sys_platform == 'darwin')",
|
||||
"python_full_version >= '3.13'",
|
||||
"(python_full_version >= '3.13' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.13' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"(python_full_version >= '3.13' and platform_machine == 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.13' and sys_platform == 'darwin')",
|
||||
"python_full_version == '3.12.*'",
|
||||
"(python_full_version == '3.12.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform != 'darwin' and sys_platform != 'linux')",
|
||||
"(python_full_version == '3.12.*' and platform_machine == 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform == 'darwin')",
|
||||
]
|
||||
dependencies = [
|
||||
{ name = "filelock", marker = "(extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu') or (extra != 'extra-14-kokoro-fastapi-cpu' and extra != 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "fsspec", marker = "(extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu') or (extra != 'extra-14-kokoro-fastapi-cpu' and extra != 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "jinja2", marker = "(extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu') or (extra != 'extra-14-kokoro-fastapi-cpu' and extra != 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "networkx", marker = "(extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu') or (extra != 'extra-14-kokoro-fastapi-cpu' and extra != 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "nvidia-cublas-cu12", version = "12.4.5.8", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine == 'x86_64' and sys_platform == 'linux' and extra != 'extra-14-kokoro-fastapi-cpu' and extra != 'extra-14-kokoro-fastapi-gpu') or (extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "nvidia-cuda-cupti-cu12", version = "12.4.127", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine == 'x86_64' and sys_platform == 'linux' and extra != 'extra-14-kokoro-fastapi-cpu' and extra != 'extra-14-kokoro-fastapi-gpu') or (extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "nvidia-cuda-nvrtc-cu12", version = "12.4.127", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine == 'x86_64' and sys_platform == 'linux' and extra != 'extra-14-kokoro-fastapi-cpu' and extra != 'extra-14-kokoro-fastapi-gpu') or (extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "nvidia-cuda-runtime-cu12", version = "12.4.127", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine == 'x86_64' and sys_platform == 'linux' and extra != 'extra-14-kokoro-fastapi-cpu' and extra != 'extra-14-kokoro-fastapi-gpu') or (extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "nvidia-cudnn-cu12", marker = "(platform_machine == 'x86_64' and sys_platform == 'linux' and extra != 'extra-14-kokoro-fastapi-cpu' and extra != 'extra-14-kokoro-fastapi-gpu') or (extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "nvidia-cufft-cu12", version = "11.2.1.3", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine == 'x86_64' and sys_platform == 'linux' and extra != 'extra-14-kokoro-fastapi-cpu' and extra != 'extra-14-kokoro-fastapi-gpu') or (extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "nvidia-curand-cu12", version = "10.3.5.147", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine == 'x86_64' and sys_platform == 'linux' and extra != 'extra-14-kokoro-fastapi-cpu' and extra != 'extra-14-kokoro-fastapi-gpu') or (extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "nvidia-cusolver-cu12", version = "11.6.1.9", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine == 'x86_64' and sys_platform == 'linux' and extra != 'extra-14-kokoro-fastapi-cpu' and extra != 'extra-14-kokoro-fastapi-gpu') or (extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "nvidia-cusparse-cu12", version = "12.3.1.170", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine == 'x86_64' and sys_platform == 'linux' and extra != 'extra-14-kokoro-fastapi-cpu' and extra != 'extra-14-kokoro-fastapi-gpu') or (extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "nvidia-nccl-cu12", marker = "(platform_machine == 'x86_64' and sys_platform == 'linux' and extra != 'extra-14-kokoro-fastapi-cpu' and extra != 'extra-14-kokoro-fastapi-gpu') or (extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "nvidia-nvjitlink-cu12", marker = "(platform_machine == 'x86_64' and sys_platform == 'linux' and extra != 'extra-14-kokoro-fastapi-cpu' and extra != 'extra-14-kokoro-fastapi-gpu') or (extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "nvidia-nvtx-cu12", version = "12.4.127", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine == 'x86_64' and sys_platform == 'linux' and extra != 'extra-14-kokoro-fastapi-cpu' and extra != 'extra-14-kokoro-fastapi-gpu') or (extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "setuptools", marker = "(python_full_version >= '3.12' and extra != 'extra-14-kokoro-fastapi-cpu' and extra != 'extra-14-kokoro-fastapi-gpu') or (extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "sympy", marker = "(extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu') or (extra != 'extra-14-kokoro-fastapi-cpu' and extra != 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "typing-extensions", marker = "(extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu') or (extra != 'extra-14-kokoro-fastapi-cpu' and extra != 'extra-14-kokoro-fastapi-gpu')" },
|
||||
]
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/2a/ef/834af4a885b31a0b32fff2d80e1e40f771e1566ea8ded55347502440786a/torch-2.5.1-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:71328e1bbe39d213b8721678f9dcac30dfc452a46d586f1d514a6aa0a99d4744", size = 906446312 },
|
||||
{ url = "https://files.pythonhosted.org/packages/69/f0/46e74e0d145f43fa506cb336eaefb2d240547e4ce1f496e442711093ab25/torch-2.5.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:34bfa1a852e5714cbfa17f27c49d8ce35e1b7af5608c4bc6e81392c352dbc601", size = 91919522 },
|
||||
{ url = "https://files.pythonhosted.org/packages/a5/13/1eb674c8efbd04d71e4a157ceba991904f633e009a584dd65dccbafbb648/torch-2.5.1-cp310-cp310-win_amd64.whl", hash = "sha256:32a037bd98a241df6c93e4c789b683335da76a2ac142c0973675b715102dc5fa", size = 203088048 },
|
||||
{ url = "https://files.pythonhosted.org/packages/a9/9d/e0860474ee0ff8f6ef2c50ec8f71a250f38d78a9b9df9fd241ad3397a65b/torch-2.5.1-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:23d062bf70776a3d04dbe74db950db2a5245e1ba4f27208a87f0d743b0d06e86", size = 63877046 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d1/35/e8b2daf02ce933e4518e6f5682c72fd0ed66c15910ea1fb4168f442b71c4/torch-2.5.1-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:de5b7d6740c4b636ef4db92be922f0edc425b65ed78c5076c43c42d362a45457", size = 906474467 },
|
||||
{ url = "https://files.pythonhosted.org/packages/40/04/bd91593a4ca178ece93ca55f27e2783aa524aaccbfda66831d59a054c31e/torch-2.5.1-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:340ce0432cad0d37f5a31be666896e16788f1adf8ad7be481196b503dad675b9", size = 91919450 },
|
||||
{ url = "https://files.pythonhosted.org/packages/0d/4a/e51420d46cfc90562e85af2fee912237c662ab31140ab179e49bd69401d6/torch-2.5.1-cp311-cp311-win_amd64.whl", hash = "sha256:603c52d2fe06433c18b747d25f5c333f9c1d58615620578c326d66f258686f9a", size = 203098237 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d0/db/5d9cbfbc7968d79c5c09a0bc0bc3735da079f2fd07cc10498a62b320a480/torch-2.5.1-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:31f8c39660962f9ae4eeec995e3049b5492eb7360dd4f07377658ef4d728fa4c", size = 63884466 },
|
||||
{ url = "https://files.pythonhosted.org/packages/8b/5c/36c114d120bfe10f9323ed35061bc5878cc74f3f594003854b0ea298942f/torch-2.5.1-cp312-cp312-manylinux1_x86_64.whl", hash = "sha256:ed231a4b3a5952177fafb661213d690a72caaad97d5824dd4fc17ab9e15cec03", size = 906389343 },
|
||||
{ url = "https://files.pythonhosted.org/packages/6d/69/d8ada8b6e0a4257556d5b4ddeb4345ea8eeaaef3c98b60d1cca197c7ad8e/torch-2.5.1-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:3f4b7f10a247e0dcd7ea97dc2d3bfbfc90302ed36d7f3952b0008d0df264e697", size = 91811673 },
|
||||
{ url = "https://files.pythonhosted.org/packages/5f/ba/607d013b55b9fd805db2a5c2662ec7551f1910b4eef39653eeaba182c5b2/torch-2.5.1-cp312-cp312-win_amd64.whl", hash = "sha256:73e58e78f7d220917c5dbfad1a40e09df9929d3b95d25e57d9f8558f84c9a11c", size = 203046841 },
|
||||
{ url = "https://files.pythonhosted.org/packages/57/6c/bf52ff061da33deb9f94f4121fde7ff3058812cb7d2036c97bc167793bd1/torch-2.5.1-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:8c712df61101964eb11910a846514011f0b6f5920c55dbf567bff8a34163d5b1", size = 63858109 },
|
||||
{ url = "https://files.pythonhosted.org/packages/69/72/20cb30f3b39a9face296491a86adb6ff8f1a47a897e4d14667e6cf89d5c3/torch-2.5.1-cp313-cp313-manylinux1_x86_64.whl", hash = "sha256:9b61edf3b4f6e3b0e0adda8b3960266b9009d02b37555971f4d1c8f7a05afed7", size = 906393265 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "torch"
|
||||
version = "2.5.1+cu121"
|
||||
|
@ -3082,25 +3645,25 @@ resolution-markers = [
|
|||
"(python_full_version == '3.12.*' and platform_machine == 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.12.*' and sys_platform == 'darwin')",
|
||||
]
|
||||
dependencies = [
|
||||
{ name = "filelock" },
|
||||
{ name = "fsspec" },
|
||||
{ name = "jinja2" },
|
||||
{ name = "networkx" },
|
||||
{ name = "nvidia-cublas-cu12", version = "12.1.3.1", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-cuda-cupti-cu12", version = "12.1.105", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-cuda-nvrtc-cu12", version = "12.1.105", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-cuda-runtime-cu12", version = "12.1.105", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-cudnn-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-cufft-cu12", version = "11.0.2.54", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-curand-cu12", version = "10.3.2.106", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-cusolver-cu12", version = "11.4.5.107", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-cusparse-cu12", version = "12.1.0.106", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-nccl-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "nvidia-nvtx-cu12", version = "12.1.105", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "setuptools", marker = "python_full_version >= '3.12'" },
|
||||
{ name = "sympy" },
|
||||
{ name = "triton", marker = "python_full_version < '3.13' and platform_machine == 'x86_64' and sys_platform == 'linux'" },
|
||||
{ name = "typing-extensions" },
|
||||
{ name = "filelock", marker = "extra == 'extra-14-kokoro-fastapi-gpu'" },
|
||||
{ name = "fsspec", marker = "extra == 'extra-14-kokoro-fastapi-gpu'" },
|
||||
{ name = "jinja2", marker = "extra == 'extra-14-kokoro-fastapi-gpu'" },
|
||||
{ name = "networkx", marker = "extra == 'extra-14-kokoro-fastapi-gpu'" },
|
||||
{ name = "nvidia-cublas-cu12", version = "12.1.3.1", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine == 'x86_64' and sys_platform == 'linux' and extra == 'extra-14-kokoro-fastapi-gpu') or (platform_machine != 'x86_64' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu') or (sys_platform != 'linux' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "nvidia-cuda-cupti-cu12", version = "12.1.105", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine == 'x86_64' and sys_platform == 'linux' and extra == 'extra-14-kokoro-fastapi-gpu') or (platform_machine != 'x86_64' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu') or (sys_platform != 'linux' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "nvidia-cuda-nvrtc-cu12", version = "12.1.105", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine == 'x86_64' and sys_platform == 'linux' and extra == 'extra-14-kokoro-fastapi-gpu') or (platform_machine != 'x86_64' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu') or (sys_platform != 'linux' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "nvidia-cuda-runtime-cu12", version = "12.1.105", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine == 'x86_64' and sys_platform == 'linux' and extra == 'extra-14-kokoro-fastapi-gpu') or (platform_machine != 'x86_64' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu') or (sys_platform != 'linux' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "nvidia-cudnn-cu12", marker = "(platform_machine == 'x86_64' and sys_platform == 'linux' and extra == 'extra-14-kokoro-fastapi-gpu') or (platform_machine != 'x86_64' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu') or (sys_platform != 'linux' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "nvidia-cufft-cu12", version = "11.0.2.54", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine == 'x86_64' and sys_platform == 'linux' and extra == 'extra-14-kokoro-fastapi-gpu') or (platform_machine != 'x86_64' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu') or (sys_platform != 'linux' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "nvidia-curand-cu12", version = "10.3.2.106", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine == 'x86_64' and sys_platform == 'linux' and extra == 'extra-14-kokoro-fastapi-gpu') or (platform_machine != 'x86_64' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu') or (sys_platform != 'linux' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "nvidia-cusolver-cu12", version = "11.4.5.107", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine == 'x86_64' and sys_platform == 'linux' and extra == 'extra-14-kokoro-fastapi-gpu') or (platform_machine != 'x86_64' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu') or (sys_platform != 'linux' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "nvidia-cusparse-cu12", version = "12.1.0.106", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine == 'x86_64' and sys_platform == 'linux' and extra == 'extra-14-kokoro-fastapi-gpu') or (platform_machine != 'x86_64' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu') or (sys_platform != 'linux' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "nvidia-nccl-cu12", marker = "(platform_machine == 'x86_64' and sys_platform == 'linux' and extra == 'extra-14-kokoro-fastapi-gpu') or (platform_machine != 'x86_64' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu') or (sys_platform != 'linux' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "nvidia-nvtx-cu12", version = "12.1.105", source = { registry = "https://pypi.org/simple" }, marker = "(platform_machine == 'x86_64' and sys_platform == 'linux' and extra == 'extra-14-kokoro-fastapi-gpu') or (platform_machine != 'x86_64' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu') or (sys_platform != 'linux' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "setuptools", marker = "(python_full_version >= '3.12' and extra == 'extra-14-kokoro-fastapi-gpu') or (extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "sympy", marker = "extra == 'extra-14-kokoro-fastapi-gpu'" },
|
||||
{ name = "triton", marker = "(python_full_version < '3.13' and platform_machine == 'x86_64' and sys_platform == 'linux' and extra == 'extra-14-kokoro-fastapi-gpu') or (python_full_version >= '3.13' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu') or (platform_machine != 'x86_64' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu') or (sys_platform != 'linux' and extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu')" },
|
||||
{ name = "typing-extensions", marker = "extra == 'extra-14-kokoro-fastapi-gpu'" },
|
||||
]
|
||||
wheels = [
|
||||
{ url = "https://download.pytorch.org/whl/cu121/torch-2.5.1%2Bcu121-cp310-cp310-linux_x86_64.whl", hash = "sha256:92af92c569de5da937dd1afb45ecfdd598ec1254cf2e49e3d698cb24d71aae14" },
|
||||
|
@ -3150,7 +3713,7 @@ name = "triton"
|
|||
version = "3.1.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "filelock", marker = "python_full_version < '3.13'" },
|
||||
{ name = "filelock", marker = "(python_full_version < '3.13' and extra == 'extra-14-kokoro-fastapi-gpu') or (extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu')" },
|
||||
]
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/98/29/69aa56dc0b2eb2602b553881e34243475ea2afd9699be042316842788ff5/triton-3.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b0dd10a925263abbe9fa37dcde67a5e9b2383fc269fdf59f5657cac38c5d1d8", size = 209460013 },
|
||||
|
@ -3223,6 +3786,38 @@ wheels = [
|
|||
{ url = "https://files.pythonhosted.org/packages/61/14/33a3a1352cfa71812a3a21e8c9bfb83f60b0011f5e36f2b1399d51928209/uvicorn-0.34.0-py3-none-any.whl", hash = "sha256:023dc038422502fa28a09c7a30bf2b6991512da7dcdb8fd35fe57cfc154126f4", size = 62315 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wasabi"
|
||||
version = "1.1.3"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "colorama", marker = "sys_platform == 'win32' or (extra == 'extra-14-kokoro-fastapi-cpu' and extra == 'extra-14-kokoro-fastapi-gpu')" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/ac/f9/054e6e2f1071e963b5e746b48d1e3727470b2a490834d18ad92364929db3/wasabi-1.1.3.tar.gz", hash = "sha256:4bb3008f003809db0c3e28b4daf20906ea871a2bb43f9914197d540f4f2e0878", size = 30391 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/06/7c/34330a89da55610daa5f245ddce5aab81244321101614751e7537f125133/wasabi-1.1.3-py3-none-any.whl", hash = "sha256:f76e16e8f7e79f8c4c8be49b4024ac725713ab10cd7f19350ad18a8e3f71728c", size = 27880 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "weasel"
|
||||
version = "0.4.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "cloudpathlib" },
|
||||
{ name = "confection" },
|
||||
{ name = "packaging" },
|
||||
{ name = "pydantic" },
|
||||
{ name = "requests" },
|
||||
{ name = "smart-open" },
|
||||
{ name = "srsly" },
|
||||
{ name = "typer" },
|
||||
{ name = "wasabi" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/a7/1a/9c522dd61b52939c217925d3e55c95f9348b73a66a956f52608e1e59a2c0/weasel-0.4.1.tar.gz", hash = "sha256:aabc210f072e13f6744e5c3a28037f93702433405cd35673f7c6279147085aa9", size = 38417 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/2a/87/abd57374044e1f627f0a905ac33c1a7daab35a3a815abfea4e1bafd3fdb1/weasel-0.4.1-py3-none-any.whl", hash = "sha256:24140a090ea1ac512a2b2f479cc64192fd1d527a7f3627671268d08ed5ac418c", size = 50270 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "websockets"
|
||||
version = "11.0.3"
|
||||
|
@ -3262,3 +3857,67 @@ sdist = { url = "https://files.pythonhosted.org/packages/b3/8f/705086c9d734d3b66
|
|||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/e1/07/c6fe3ad3e685340704d314d765b7912993bcb8dc198f0e7a89382d37974b/win32_setctime-1.2.0-py3-none-any.whl", hash = "sha256:95d644c4e708aba81dc3704a116d8cbc974d70b3bdb8be1d150e36be6e9d1390", size = 4083 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wrapt"
|
||||
version = "1.17.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/c3/fc/e91cc220803d7bc4db93fb02facd8461c37364151b8494762cc88b0fbcef/wrapt-1.17.2.tar.gz", hash = "sha256:41388e9d4d1522446fe79d3213196bd9e3b301a336965b9e27ca2788ebd122f3", size = 55531 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/5a/d1/1daec934997e8b160040c78d7b31789f19b122110a75eca3d4e8da0049e1/wrapt-1.17.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3d57c572081fed831ad2d26fd430d565b76aa277ed1d30ff4d40670b1c0dd984", size = 53307 },
|
||||
{ url = "https://files.pythonhosted.org/packages/1b/7b/13369d42651b809389c1a7153baa01d9700430576c81a2f5c5e460df0ed9/wrapt-1.17.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5e251054542ae57ac7f3fba5d10bfff615b6c2fb09abeb37d2f1463f841ae22", size = 38486 },
|
||||
{ url = "https://files.pythonhosted.org/packages/62/bf/e0105016f907c30b4bd9e377867c48c34dc9c6c0c104556c9c9126bd89ed/wrapt-1.17.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:80dd7db6a7cb57ffbc279c4394246414ec99537ae81ffd702443335a61dbf3a7", size = 38777 },
|
||||
{ url = "https://files.pythonhosted.org/packages/27/70/0f6e0679845cbf8b165e027d43402a55494779295c4b08414097b258ac87/wrapt-1.17.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a6e821770cf99cc586d33833b2ff32faebdbe886bd6322395606cf55153246c", size = 83314 },
|
||||
{ url = "https://files.pythonhosted.org/packages/0f/77/0576d841bf84af8579124a93d216f55d6f74374e4445264cb378a6ed33eb/wrapt-1.17.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b60fb58b90c6d63779cb0c0c54eeb38941bae3ecf7a73c764c52c88c2dcb9d72", size = 74947 },
|
||||
{ url = "https://files.pythonhosted.org/packages/90/ec/00759565518f268ed707dcc40f7eeec38637d46b098a1f5143bff488fe97/wrapt-1.17.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b870b5df5b71d8c3359d21be8f0d6c485fa0ebdb6477dda51a1ea54a9b558061", size = 82778 },
|
||||
{ url = "https://files.pythonhosted.org/packages/f8/5a/7cffd26b1c607b0b0c8a9ca9d75757ad7620c9c0a9b4a25d3f8a1480fafc/wrapt-1.17.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:4011d137b9955791f9084749cba9a367c68d50ab8d11d64c50ba1688c9b457f2", size = 81716 },
|
||||
{ url = "https://files.pythonhosted.org/packages/7e/09/dccf68fa98e862df7e6a60a61d43d644b7d095a5fc36dbb591bbd4a1c7b2/wrapt-1.17.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:1473400e5b2733e58b396a04eb7f35f541e1fb976d0c0724d0223dd607e0f74c", size = 74548 },
|
||||
{ url = "https://files.pythonhosted.org/packages/b7/8e/067021fa3c8814952c5e228d916963c1115b983e21393289de15128e867e/wrapt-1.17.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3cedbfa9c940fdad3e6e941db7138e26ce8aad38ab5fe9dcfadfed9db7a54e62", size = 81334 },
|
||||
{ url = "https://files.pythonhosted.org/packages/4b/0d/9d4b5219ae4393f718699ca1c05f5ebc0c40d076f7e65fd48f5f693294fb/wrapt-1.17.2-cp310-cp310-win32.whl", hash = "sha256:582530701bff1dec6779efa00c516496968edd851fba224fbd86e46cc6b73563", size = 36427 },
|
||||
{ url = "https://files.pythonhosted.org/packages/72/6a/c5a83e8f61aec1e1aeef939807602fb880e5872371e95df2137142f5c58e/wrapt-1.17.2-cp310-cp310-win_amd64.whl", hash = "sha256:58705da316756681ad3c9c73fd15499aa4d8c69f9fd38dc8a35e06c12468582f", size = 38774 },
|
||||
{ url = "https://files.pythonhosted.org/packages/cd/f7/a2aab2cbc7a665efab072344a8949a71081eed1d2f451f7f7d2b966594a2/wrapt-1.17.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ff04ef6eec3eee8a5efef2401495967a916feaa353643defcc03fc74fe213b58", size = 53308 },
|
||||
{ url = "https://files.pythonhosted.org/packages/50/ff/149aba8365fdacef52b31a258c4dc1c57c79759c335eff0b3316a2664a64/wrapt-1.17.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4db983e7bca53819efdbd64590ee96c9213894272c776966ca6306b73e4affda", size = 38488 },
|
||||
{ url = "https://files.pythonhosted.org/packages/65/46/5a917ce85b5c3b490d35c02bf71aedaa9f2f63f2d15d9949cc4ba56e8ba9/wrapt-1.17.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9abc77a4ce4c6f2a3168ff34b1da9b0f311a8f1cfd694ec96b0603dff1c79438", size = 38776 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ca/74/336c918d2915a4943501c77566db41d1bd6e9f4dbc317f356b9a244dfe83/wrapt-1.17.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b929ac182f5ace000d459c59c2c9c33047e20e935f8e39371fa6e3b85d56f4a", size = 83776 },
|
||||
{ url = "https://files.pythonhosted.org/packages/09/99/c0c844a5ccde0fe5761d4305485297f91d67cf2a1a824c5f282e661ec7ff/wrapt-1.17.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f09b286faeff3c750a879d336fb6d8713206fc97af3adc14def0cdd349df6000", size = 75420 },
|
||||
{ url = "https://files.pythonhosted.org/packages/b4/b0/9fc566b0fe08b282c850063591a756057c3247b2362b9286429ec5bf1721/wrapt-1.17.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a7ed2d9d039bd41e889f6fb9364554052ca21ce823580f6a07c4ec245c1f5d6", size = 83199 },
|
||||
{ url = "https://files.pythonhosted.org/packages/9d/4b/71996e62d543b0a0bd95dda485219856def3347e3e9380cc0d6cf10cfb2f/wrapt-1.17.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:129a150f5c445165ff941fc02ee27df65940fcb8a22a61828b1853c98763a64b", size = 82307 },
|
||||
{ url = "https://files.pythonhosted.org/packages/39/35/0282c0d8789c0dc9bcc738911776c762a701f95cfe113fb8f0b40e45c2b9/wrapt-1.17.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:1fb5699e4464afe5c7e65fa51d4f99e0b2eadcc176e4aa33600a3df7801d6662", size = 75025 },
|
||||
{ url = "https://files.pythonhosted.org/packages/4f/6d/90c9fd2c3c6fee181feecb620d95105370198b6b98a0770cba090441a828/wrapt-1.17.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9a2bce789a5ea90e51a02dfcc39e31b7f1e662bc3317979aa7e5538e3a034f72", size = 81879 },
|
||||
{ url = "https://files.pythonhosted.org/packages/8f/fa/9fb6e594f2ce03ef03eddbdb5f4f90acb1452221a5351116c7c4708ac865/wrapt-1.17.2-cp311-cp311-win32.whl", hash = "sha256:4afd5814270fdf6380616b321fd31435a462019d834f83c8611a0ce7484c7317", size = 36419 },
|
||||
{ url = "https://files.pythonhosted.org/packages/47/f8/fb1773491a253cbc123c5d5dc15c86041f746ed30416535f2a8df1f4a392/wrapt-1.17.2-cp311-cp311-win_amd64.whl", hash = "sha256:acc130bc0375999da18e3d19e5a86403667ac0c4042a094fefb7eec8ebac7cf3", size = 38773 },
|
||||
{ url = "https://files.pythonhosted.org/packages/a1/bd/ab55f849fd1f9a58ed7ea47f5559ff09741b25f00c191231f9f059c83949/wrapt-1.17.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:d5e2439eecc762cd85e7bd37161d4714aa03a33c5ba884e26c81559817ca0925", size = 53799 },
|
||||
{ url = "https://files.pythonhosted.org/packages/53/18/75ddc64c3f63988f5a1d7e10fb204ffe5762bc663f8023f18ecaf31a332e/wrapt-1.17.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:3fc7cb4c1c744f8c05cd5f9438a3caa6ab94ce8344e952d7c45a8ed59dd88392", size = 38821 },
|
||||
{ url = "https://files.pythonhosted.org/packages/48/2a/97928387d6ed1c1ebbfd4efc4133a0633546bec8481a2dd5ec961313a1c7/wrapt-1.17.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8fdbdb757d5390f7c675e558fd3186d590973244fab0c5fe63d373ade3e99d40", size = 38919 },
|
||||
{ url = "https://files.pythonhosted.org/packages/73/54/3bfe5a1febbbccb7a2f77de47b989c0b85ed3a6a41614b104204a788c20e/wrapt-1.17.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5bb1d0dbf99411f3d871deb6faa9aabb9d4e744d67dcaaa05399af89d847a91d", size = 88721 },
|
||||
{ url = "https://files.pythonhosted.org/packages/25/cb/7262bc1b0300b4b64af50c2720ef958c2c1917525238d661c3e9a2b71b7b/wrapt-1.17.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d18a4865f46b8579d44e4fe1e2bcbc6472ad83d98e22a26c963d46e4c125ef0b", size = 80899 },
|
||||
{ url = "https://files.pythonhosted.org/packages/2a/5a/04cde32b07a7431d4ed0553a76fdb7a61270e78c5fd5a603e190ac389f14/wrapt-1.17.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc570b5f14a79734437cb7b0500376b6b791153314986074486e0b0fa8d71d98", size = 89222 },
|
||||
{ url = "https://files.pythonhosted.org/packages/09/28/2e45a4f4771fcfb109e244d5dbe54259e970362a311b67a965555ba65026/wrapt-1.17.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6d9187b01bebc3875bac9b087948a2bccefe464a7d8f627cf6e48b1bbae30f82", size = 86707 },
|
||||
{ url = "https://files.pythonhosted.org/packages/c6/d2/dcb56bf5f32fcd4bd9aacc77b50a539abdd5b6536872413fd3f428b21bed/wrapt-1.17.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:9e8659775f1adf02eb1e6f109751268e493c73716ca5761f8acb695e52a756ae", size = 79685 },
|
||||
{ url = "https://files.pythonhosted.org/packages/80/4e/eb8b353e36711347893f502ce91c770b0b0929f8f0bed2670a6856e667a9/wrapt-1.17.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e8b2816ebef96d83657b56306152a93909a83f23994f4b30ad4573b00bd11bb9", size = 87567 },
|
||||
{ url = "https://files.pythonhosted.org/packages/17/27/4fe749a54e7fae6e7146f1c7d914d28ef599dacd4416566c055564080fe2/wrapt-1.17.2-cp312-cp312-win32.whl", hash = "sha256:468090021f391fe0056ad3e807e3d9034e0fd01adcd3bdfba977b6fdf4213ea9", size = 36672 },
|
||||
{ url = "https://files.pythonhosted.org/packages/15/06/1dbf478ea45c03e78a6a8c4be4fdc3c3bddea5c8de8a93bc971415e47f0f/wrapt-1.17.2-cp312-cp312-win_amd64.whl", hash = "sha256:ec89ed91f2fa8e3f52ae53cd3cf640d6feff92ba90d62236a81e4e563ac0e991", size = 38865 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ce/b9/0ffd557a92f3b11d4c5d5e0c5e4ad057bd9eb8586615cdaf901409920b14/wrapt-1.17.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:6ed6ffac43aecfe6d86ec5b74b06a5be33d5bb9243d055141e8cabb12aa08125", size = 53800 },
|
||||
{ url = "https://files.pythonhosted.org/packages/c0/ef/8be90a0b7e73c32e550c73cfb2fa09db62234227ece47b0e80a05073b375/wrapt-1.17.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:35621ae4c00e056adb0009f8e86e28eb4a41a4bfa8f9bfa9fca7d343fe94f998", size = 38824 },
|
||||
{ url = "https://files.pythonhosted.org/packages/36/89/0aae34c10fe524cce30fe5fc433210376bce94cf74d05b0d68344c8ba46e/wrapt-1.17.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a604bf7a053f8362d27eb9fefd2097f82600b856d5abe996d623babd067b1ab5", size = 38920 },
|
||||
{ url = "https://files.pythonhosted.org/packages/3b/24/11c4510de906d77e0cfb5197f1b1445d4fec42c9a39ea853d482698ac681/wrapt-1.17.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5cbabee4f083b6b4cd282f5b817a867cf0b1028c54d445b7ec7cfe6505057cf8", size = 88690 },
|
||||
{ url = "https://files.pythonhosted.org/packages/71/d7/cfcf842291267bf455b3e266c0c29dcb675b5540ee8b50ba1699abf3af45/wrapt-1.17.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:49703ce2ddc220df165bd2962f8e03b84c89fee2d65e1c24a7defff6f988f4d6", size = 80861 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d5/66/5d973e9f3e7370fd686fb47a9af3319418ed925c27d72ce16b791231576d/wrapt-1.17.2-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8112e52c5822fc4253f3901b676c55ddf288614dc7011634e2719718eaa187dc", size = 89174 },
|
||||
{ url = "https://files.pythonhosted.org/packages/a7/d3/8e17bb70f6ae25dabc1aaf990f86824e4fd98ee9cadf197054e068500d27/wrapt-1.17.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9fee687dce376205d9a494e9c121e27183b2a3df18037f89d69bd7b35bcf59e2", size = 86721 },
|
||||
{ url = "https://files.pythonhosted.org/packages/6f/54/f170dfb278fe1c30d0ff864513cff526d624ab8de3254b20abb9cffedc24/wrapt-1.17.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:18983c537e04d11cf027fbb60a1e8dfd5190e2b60cc27bc0808e653e7b218d1b", size = 79763 },
|
||||
{ url = "https://files.pythonhosted.org/packages/4a/98/de07243751f1c4a9b15c76019250210dd3486ce098c3d80d5f729cba029c/wrapt-1.17.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:703919b1633412ab54bcf920ab388735832fdcb9f9a00ae49387f0fe67dad504", size = 87585 },
|
||||
{ url = "https://files.pythonhosted.org/packages/f9/f0/13925f4bd6548013038cdeb11ee2cbd4e37c30f8bfd5db9e5a2a370d6e20/wrapt-1.17.2-cp313-cp313-win32.whl", hash = "sha256:abbb9e76177c35d4e8568e58650aa6926040d6a9f6f03435b7a522bf1c487f9a", size = 36676 },
|
||||
{ url = "https://files.pythonhosted.org/packages/bf/ae/743f16ef8c2e3628df3ddfd652b7d4c555d12c84b53f3d8218498f4ade9b/wrapt-1.17.2-cp313-cp313-win_amd64.whl", hash = "sha256:69606d7bb691b50a4240ce6b22ebb319c1cfb164e5f6569835058196e0f3a845", size = 38871 },
|
||||
{ url = "https://files.pythonhosted.org/packages/3d/bc/30f903f891a82d402ffb5fda27ec1d621cc97cb74c16fea0b6141f1d4e87/wrapt-1.17.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:4a721d3c943dae44f8e243b380cb645a709ba5bd35d3ad27bc2ed947e9c68192", size = 56312 },
|
||||
{ url = "https://files.pythonhosted.org/packages/8a/04/c97273eb491b5f1c918857cd26f314b74fc9b29224521f5b83f872253725/wrapt-1.17.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:766d8bbefcb9e00c3ac3b000d9acc51f1b399513f44d77dfe0eb026ad7c9a19b", size = 40062 },
|
||||
{ url = "https://files.pythonhosted.org/packages/4e/ca/3b7afa1eae3a9e7fefe499db9b96813f41828b9fdb016ee836c4c379dadb/wrapt-1.17.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:e496a8ce2c256da1eb98bd15803a79bee00fc351f5dfb9ea82594a3f058309e0", size = 40155 },
|
||||
{ url = "https://files.pythonhosted.org/packages/89/be/7c1baed43290775cb9030c774bc53c860db140397047cc49aedaf0a15477/wrapt-1.17.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d615e4fe22f4ad3528448c193b218e077656ca9ccb22ce2cb20db730f8d306", size = 113471 },
|
||||
{ url = "https://files.pythonhosted.org/packages/32/98/4ed894cf012b6d6aae5f5cc974006bdeb92f0241775addad3f8cd6ab71c8/wrapt-1.17.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a5aaeff38654462bc4b09023918b7f21790efb807f54c000a39d41d69cf552cb", size = 101208 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ea/fd/0c30f2301ca94e655e5e057012e83284ce8c545df7661a78d8bfca2fac7a/wrapt-1.17.2-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a7d15bbd2bc99e92e39f49a04653062ee6085c0e18b3b7512a4f2fe91f2d681", size = 109339 },
|
||||
{ url = "https://files.pythonhosted.org/packages/75/56/05d000de894c4cfcb84bcd6b1df6214297b8089a7bd324c21a4765e49b14/wrapt-1.17.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:e3890b508a23299083e065f435a492b5435eba6e304a7114d2f919d400888cc6", size = 110232 },
|
||||
{ url = "https://files.pythonhosted.org/packages/53/f8/c3f6b2cf9b9277fb0813418e1503e68414cd036b3b099c823379c9575e6d/wrapt-1.17.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:8c8b293cd65ad716d13d8dd3624e42e5a19cc2a2f1acc74b30c2c13f15cb61a6", size = 100476 },
|
||||
{ url = "https://files.pythonhosted.org/packages/a7/b1/0bb11e29aa5139d90b770ebbfa167267b1fc548d2302c30c8f7572851738/wrapt-1.17.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4c82b8785d98cdd9fed4cac84d765d234ed3251bd6afe34cb7ac523cb93e8b4f", size = 106377 },
|
||||
{ url = "https://files.pythonhosted.org/packages/6a/e1/0122853035b40b3f333bbb25f1939fc1045e21dd518f7f0922b60c156f7c/wrapt-1.17.2-cp313-cp313t-win32.whl", hash = "sha256:13e6afb7fe71fe7485a4550a8844cc9ffbe263c0f1a1eea569bc7091d4898555", size = 37986 },
|
||||
{ url = "https://files.pythonhosted.org/packages/09/5e/1655cf481e079c1f22d0cabdd4e51733679932718dc23bf2db175f329b76/wrapt-1.17.2-cp313-cp313t-win_amd64.whl", hash = "sha256:eaf675418ed6b3b31c7a989fd007fa7c3be66ce14e5c3b27336383604c9da85c", size = 40750 },
|
||||
{ url = "https://files.pythonhosted.org/packages/2d/82/f56956041adef78f849db6b289b282e72b55ab8045a75abad81898c28d19/wrapt-1.17.2-py3-none-any.whl", hash = "sha256:b18f2d1533a71f069c7f82d524a52599053d4c7166e9dd374ae2136b7f40f7c8", size = 23594 },
|
||||
]
|
||||
|
|
|
@ -51,10 +51,15 @@
|
|||
></textarea>
|
||||
<div class="controls">
|
||||
<div class="voice-select-container">
|
||||
<input
|
||||
type="text"
|
||||
id="voice-search"
|
||||
class="voice-search"
|
||||
<div class="version-select-wrapper">
|
||||
<select id="version-select" class="version-select">
|
||||
<!-- Version options will be inserted here -->
|
||||
</select>
|
||||
</div>
|
||||
<input
|
||||
type="text"
|
||||
id="voice-search"
|
||||
class="voice-search"
|
||||
placeholder="Search voices..."
|
||||
>
|
||||
<div class="voice-dropdown" id="voice-dropdown">
|
||||
|
|
|
@ -5,13 +5,21 @@ export class VoiceSelector {
|
|||
voiceSearch: document.getElementById('voice-search'),
|
||||
voiceDropdown: document.getElementById('voice-dropdown'),
|
||||
voiceOptions: document.getElementById('voice-options'),
|
||||
selectedVoices: document.getElementById('selected-voices')
|
||||
selectedVoices: document.getElementById('selected-voices'),
|
||||
versionSelect: document.getElementById('version-select')
|
||||
};
|
||||
|
||||
this.setupEventListeners();
|
||||
}
|
||||
|
||||
setupEventListeners() {
|
||||
// Version selection
|
||||
this.elements.versionSelect?.addEventListener('change', async (e) => {
|
||||
await this.voiceService.setVersion(e.target.value);
|
||||
this.renderVoiceOptions(this.voiceService.getAvailableVoices());
|
||||
this.updateSelectedVoicesDisplay();
|
||||
});
|
||||
|
||||
// Voice search
|
||||
this.elements.voiceSearch.addEventListener('input', (e) => {
|
||||
const filteredVoices = this.voiceService.filterVoices(e.target.value);
|
||||
|
@ -101,8 +109,28 @@ export class VoiceSelector {
|
|||
}
|
||||
}
|
||||
|
||||
renderVersionSelect() {
|
||||
if (!this.elements.versionSelect) return;
|
||||
|
||||
const versions = this.voiceService.getAvailableVersions();
|
||||
const currentVersion = this.voiceService.getCurrentVersion();
|
||||
|
||||
this.elements.versionSelect.innerHTML = versions
|
||||
.map(version => `
|
||||
<option value="${version}" ${version === currentVersion ? 'selected' : ''}>
|
||||
${version}
|
||||
</option>
|
||||
`)
|
||||
.join('');
|
||||
}
|
||||
|
||||
async initialize() {
|
||||
try {
|
||||
// Load versions first
|
||||
await this.voiceService.loadVersions();
|
||||
this.renderVersionSelect();
|
||||
|
||||
// Then load voices for current version
|
||||
await this.voiceService.loadVoices();
|
||||
this.renderVoiceOptions(this.voiceService.getAvailableVoices());
|
||||
this.updateSelectedVoicesDisplay();
|
||||
|
|
|
@ -11,6 +11,11 @@ export class AudioService {
|
|||
this.CHARS_PER_CHUNK = 300; // Estimated chars per chunk
|
||||
this.serverDownloadPath = null; // Server-side download path
|
||||
this.pendingOperations = []; // Queue for buffer operations
|
||||
this.currentVersion = null; // Current model version
|
||||
}
|
||||
|
||||
setVersion(version) {
|
||||
this.currentVersion = version;
|
||||
}
|
||||
|
||||
async streamAudio(text, voice, speed, onProgress) {
|
||||
|
@ -33,7 +38,7 @@ export class AudioService {
|
|||
|
||||
console.log('AudioService: Making API call...', { text, voice, speed });
|
||||
|
||||
const response = await fetch('/v1/audio/speech', {
|
||||
const response = await fetch('/v2/audio/speech', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
|
@ -42,7 +47,8 @@ export class AudioService {
|
|||
response_format: 'mp3',
|
||||
stream: true,
|
||||
speed: speed,
|
||||
return_download_link: true
|
||||
return_download_link: true,
|
||||
version: this.currentVersion
|
||||
}),
|
||||
signal: this.controller.signal
|
||||
});
|
||||
|
@ -55,7 +61,7 @@ export class AudioService {
|
|||
// Check for download path as soon as we get the response
|
||||
const downloadPath = response.headers.get('x-download-path');
|
||||
if (downloadPath) {
|
||||
this.serverDownloadPath = `/v1${downloadPath}`;
|
||||
this.serverDownloadPath = `/v2${downloadPath}`;
|
||||
console.log('Download path received:', this.serverDownloadPath);
|
||||
}
|
||||
|
||||
|
@ -123,7 +129,7 @@ export class AudioService {
|
|||
const downloadPath = headers['x-download-path'];
|
||||
if (downloadPath) {
|
||||
// Prepend /v1 since the router is mounted there
|
||||
this.serverDownloadPath = `/v1${downloadPath}`;
|
||||
this.serverDownloadPath = `/v2${downloadPath}`;
|
||||
console.log('Download path received:', this.serverDownloadPath);
|
||||
} else {
|
||||
console.warn('No X-Download-Path header found. Available headers:',
|
||||
|
|
|
@ -2,11 +2,36 @@ export class VoiceService {
|
|||
constructor() {
|
||||
this.availableVoices = [];
|
||||
this.selectedVoices = new Set();
|
||||
this.currentVersion = null;
|
||||
this.availableVersions = [];
|
||||
}
|
||||
|
||||
async loadVersions() {
|
||||
try {
|
||||
const response = await fetch('/v2/audio/versions');
|
||||
if (!response.ok) {
|
||||
const error = await response.json();
|
||||
throw new Error(error.detail?.message || 'Failed to load versions');
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
this.availableVersions = data.versions;
|
||||
this.currentVersion = data.current;
|
||||
return data;
|
||||
} catch (error) {
|
||||
console.error('Failed to load versions:', error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
async loadVoices() {
|
||||
try {
|
||||
const response = await fetch('/v1/audio/voices');
|
||||
// Load versions first if not loaded
|
||||
if (!this.currentVersion) {
|
||||
await this.loadVersions();
|
||||
}
|
||||
|
||||
const response = await fetch(`/v2/audio/voices?version=${this.currentVersion}`);
|
||||
if (!response.ok) {
|
||||
const error = await response.json();
|
||||
throw new Error(error.detail?.message || 'Failed to load voices');
|
||||
|
@ -27,7 +52,10 @@ export class VoiceService {
|
|||
}
|
||||
}
|
||||
|
||||
return this.availableVoices;
|
||||
return {
|
||||
voices: this.availableVoices,
|
||||
version: data.version
|
||||
};
|
||||
} catch (error) {
|
||||
console.error('Failed to load voices:', error);
|
||||
throw error;
|
||||
|
@ -46,6 +74,40 @@ export class VoiceService {
|
|||
return Array.from(this.selectedVoices).join('+');
|
||||
}
|
||||
|
||||
async setVersion(version) {
|
||||
try {
|
||||
const response = await fetch('/v2/audio/version', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify(version)
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const error = await response.json();
|
||||
throw new Error(error.detail?.message || 'Failed to set version');
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
this.currentVersion = data.current;
|
||||
this.availableVersions = data.versions;
|
||||
|
||||
// Reload voices for new version
|
||||
await this.loadVoices();
|
||||
return data;
|
||||
} catch (error) {
|
||||
console.error('Failed to set version:', error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
getCurrentVersion() {
|
||||
return this.currentVersion;
|
||||
}
|
||||
|
||||
getAvailableVersions() {
|
||||
return this.availableVersions;
|
||||
}
|
||||
|
||||
addVoice(voice) {
|
||||
if (this.availableVoices.includes(voice)) {
|
||||
this.selectedVoices.add(voice);
|
||||
|
|
|
@ -36,6 +36,34 @@ textarea::placeholder {
|
|||
gap: 1rem;
|
||||
}
|
||||
|
||||
.version-select-wrapper {
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.version-select {
|
||||
width: 100%;
|
||||
padding: 0.75rem 1rem;
|
||||
border: 1px solid var(--border);
|
||||
border-radius: 0.5rem;
|
||||
background: rgba(15, 23, 42, 0.3);
|
||||
color: var(--text);
|
||||
font-size: 1rem;
|
||||
transition: all 0.2s ease;
|
||||
cursor: pointer;
|
||||
font-family: var(--font-family);
|
||||
}
|
||||
|
||||
.version-select:focus {
|
||||
outline: none;
|
||||
border-color: var(--fg-color);
|
||||
box-shadow: 0 0 0 3px rgba(99, 102, 241, 0.2);
|
||||
}
|
||||
|
||||
.version-select option {
|
||||
background: var(--surface);
|
||||
color: var(--text);
|
||||
}
|
||||
|
||||
.voice-search {
|
||||
width: 100%;
|
||||
padding: 0.75rem 1rem;
|
||||
|
|
Loading…
Add table
Reference in a new issue