From 4a24be1605ff9ad4a305ea7d41f1cebd674658d3 Mon Sep 17 00:00:00 2001 From: remsky Date: Wed, 22 Jan 2025 02:33:29 -0700 Subject: [PATCH] Refactor model loading and configuration: update, adjust model loading device,. add async streaming examples and remove unused warmup service. --- api/src/builds/models.py | 2 +- api/src/core/config.py | 6 +- api/src/core/model_config.py | 25 +- api/src/inference/model_manager.py | 175 ++++++----- api/src/inference/onnx_cpu.py | 66 +---- api/src/inference/onnx_gpu.py | 78 +---- api/src/inference/pytorch_gpu.py | 134 ++++++--- api/src/inference/session_pool.py | 271 ++++++++++++++++++ api/src/inference/voice_manager.py | 60 ++-- api/src/main.py | 63 +++- api/src/routers/development.py | 9 +- api/src/routers/openai_compatible.py | 38 ++- .../services/text_processing/phonemizer.py | 9 +- api/src/services/tts_service.py | 201 ++++++------- api/src/services/warmup.py | 60 ---- docker/gpu/docker-compose.yml | 2 +- examples/openai_streaming_audio.py | 4 +- examples/simul_file_test.py | 53 ++++ examples/simul_openai_streaming_audio.py | 91 ++++++ examples/simul_speaker_test.py | 66 +++++ examples/speech.mp3 | Bin 0 -> 73440 bytes 21 files changed, 929 insertions(+), 484 deletions(-) create mode 100644 api/src/inference/session_pool.py delete mode 100644 api/src/services/warmup.py create mode 100644 examples/simul_file_test.py create mode 100644 examples/simul_openai_streaming_audio.py create mode 100644 examples/simul_speaker_test.py diff --git a/api/src/builds/models.py b/api/src/builds/models.py index bfd52ad..5495465 100644 --- a/api/src/builds/models.py +++ b/api/src/builds/models.py @@ -367,7 +367,7 @@ async def build_model(path, device): decoder=decoder.to(device).eval(), text_encoder=text_encoder.to(device).eval(), ) - weights = await load_model_weights(path, device='cpu') + weights = await load_model_weights(path, device=device) for key, state_dict in weights['net'].items(): assert key in model, key try: diff --git a/api/src/core/config.py b/api/src/core/config.py index 91aec63..03ce30e 100644 --- a/api/src/core/config.py +++ b/api/src/core/config.py @@ -15,9 +15,9 @@ class Settings(BaseSettings): default_voice: str = "af" use_gpu: bool = False # Whether to use GPU acceleration if available use_onnx: bool = True # Whether to use ONNX runtime - # Paths relative to api directory - model_dir: str = "src/models" # Model directory relative to api/ - voices_dir: str = "src/voices" # Voices directory relative to api/ + # Container absolute paths + model_dir: str = "/app/api/src/models" # Absolute path in container + voices_dir: str = "/app/api/src/voices" # Absolute path in container # Model filenames pytorch_model_file: str = "kokoro-v0_19.pth" diff --git a/api/src/core/model_config.py b/api/src/core/model_config.py index dc82aee..f9ae254 100644 --- a/api/src/core/model_config.py +++ b/api/src/core/model_config.py @@ -6,6 +6,11 @@ from pydantic import BaseModel, Field class ONNXCPUConfig(BaseModel): """ONNX CPU runtime configuration.""" + # Session pooling + max_instances: int = Field(4, description="Maximum concurrent model instances") + instance_timeout: int = Field(300, description="Session timeout in seconds") + + # Runtime settings num_threads: int = Field(8, description="Number of threads for parallel operations") inter_op_threads: int = Field(4, description="Number of threads for operator parallelism") execution_mode: str = Field("parallel", description="ONNX execution mode") @@ -20,9 +25,14 @@ class ONNXCPUConfig(BaseModel): class ONNXGPUConfig(ONNXCPUConfig): """ONNX GPU-specific configuration.""" + # CUDA settings device_id: int = Field(0, description="CUDA device ID") gpu_mem_limit: float = Field(0.7, description="Fraction of GPU memory to use") cudnn_conv_algo_search: str = Field("EXHAUSTIVE", description="CuDNN convolution algorithm search") + + # Stream management + cuda_streams: int = Field(2, description="Number of CUDA streams for inference") + stream_timeout: int = Field(60, description="Stream timeout in seconds") do_copy_in_default_stream: bool = Field(True, description="Copy in default CUDA stream") class Config: @@ -32,8 +42,6 @@ class ONNXGPUConfig(ONNXCPUConfig): class PyTorchCPUConfig(BaseModel): """PyTorch CPU backend configuration.""" - max_batch_size: int = Field(32, description="Maximum batch size for batched inference") - stream_buffer_size: int = Field(8, description="Size of stream buffer") memory_threshold: float = Field(0.8, description="Memory threshold for cleanup") retry_on_oom: bool = Field(True, description="Whether to retry on OOM errors") num_threads: int = Field(8, description="Number of threads for parallel operations") @@ -49,18 +57,11 @@ class PyTorchGPUConfig(BaseModel): device_id: int = Field(0, description="CUDA device ID") use_fp16: bool = Field(True, description="Whether to use FP16 precision") use_triton: bool = Field(True, description="Whether to use Triton for CUDA kernels") - max_batch_size: int = Field(32, description="Maximum batch size for batched inference") - stream_buffer_size: int = Field(8, description="Size of CUDA stream buffer") memory_threshold: float = Field(0.8, description="Memory threshold for cleanup") retry_on_oom: bool = Field(True, description="Whether to retry on OOM errors") sync_cuda: bool = Field(True, description="Whether to synchronize CUDA operations") - - class Config: - frozen = True - """PyTorch CPU-specific configuration.""" - - num_threads: int = Field(8, description="Number of threads for parallel operations") - pin_memory: bool = Field(True, description="Whether to pin memory for faster CPU-GPU transfer") + cuda_streams: int = Field(2, description="Number of CUDA streams for inference") + stream_timeout: int = Field(60, description="Stream timeout in seconds") class Config: frozen = True @@ -74,7 +75,7 @@ class ModelConfig(BaseModel): device_type: str = Field("auto", description="Device type ('cpu', 'gpu', or 'auto')") cache_models: bool = Field(True, description="Whether to cache loaded models") cache_voices: bool = Field(True, description="Whether to cache voice tensors") - voice_cache_size: int = Field(10, description="Maximum number of cached voices") + voice_cache_size: int = Field(2, description="Maximum number of cached voices") # Backend-specific configs onnx_cpu: ONNXCPUConfig = Field(default_factory=ONNXCPUConfig) diff --git a/api/src/inference/model_manager.py b/api/src/inference/model_manager.py index 26621c2..ed67ce8 100644 --- a/api/src/inference/model_manager.py +++ b/api/src/inference/model_manager.py @@ -1,5 +1,6 @@ """Model management and caching.""" +import asyncio from typing import Dict, Optional import torch @@ -13,11 +14,19 @@ from .onnx_cpu import ONNXCPUBackend from .onnx_gpu import ONNXGPUBackend from .pytorch_cpu import PyTorchCPUBackend from .pytorch_gpu import PyTorchGPUBackend +from .session_pool import CPUSessionPool, StreamingSessionPool + + +# Global singleton instance and state +_manager_instance = None +_manager_lock = asyncio.Lock() +_loaded_models = {} +_backends = {} class ModelManager: """Manages model loading and inference across backends.""" - + def __init__(self, config: Optional[ModelConfig] = None): """Initialize model manager. @@ -25,65 +34,60 @@ class ModelManager: config: Optional configuration """ self._config = config or model_config - self._backends: Dict[str, BaseModelBackend] = {} - self._current_backend: Optional[str] = None - self._initialize_backends() + global _loaded_models, _backends + self._loaded_models = _loaded_models + self._backends = _backends + + # Initialize session pools + self._session_pools = { + 'onnx_cpu': CPUSessionPool(), + 'onnx_gpu': StreamingSessionPool() + } + + # Initialize locks + self._backend_locks: Dict[str, asyncio.Lock] = {} - def _initialize_backends(self) -> None: - """Initialize available backends based on settings.""" - has_gpu = settings.use_gpu and torch.cuda.is_available() + def _determine_device(self) -> str: + """Determine device based on settings.""" + if settings.use_gpu and torch.cuda.is_available(): + return "cuda" + return "cpu" + + async def initialize(self) -> None: + """Initialize backends.""" + if self._backends: + logger.debug("Using existing backend instances") + return + + device = self._determine_device() try: - if has_gpu: + if device == "cuda": if settings.use_onnx: - # ONNX GPU primary self._backends['onnx_gpu'] = ONNXGPUBackend() self._current_backend = 'onnx_gpu' - logger.info("Initialized ONNX GPU backend") - - # PyTorch GPU fallback - self._backends['pytorch_gpu'] = PyTorchGPUBackend() - logger.info("Initialized PyTorch GPU backend") + logger.info("Initialized new ONNX GPU backend") else: - # PyTorch GPU primary self._backends['pytorch_gpu'] = PyTorchGPUBackend() self._current_backend = 'pytorch_gpu' - logger.info("Initialized PyTorch GPU backend") + logger.info("Initialized new PyTorch GPU backend") + else: + if settings.use_onnx: + self._backends['onnx_cpu'] = ONNXCPUBackend() + self._current_backend = 'onnx_cpu' + logger.info("Initialized new ONNX CPU backend") + else: + self._backends['pytorch_cpu'] = PyTorchCPUBackend() + self._current_backend = 'pytorch_cpu' + logger.info("Initialized new PyTorch CPU backend") - # ONNX GPU fallback - self._backends['onnx_gpu'] = ONNXGPUBackend() - logger.info("Initialized ONNX GPU backend") - else: - self._initialize_cpu_backends() - except Exception as e: - logger.error(f"Failed to initialize GPU backends: {e}") - # Fallback to CPU if GPU fails - self._initialize_cpu_backends() - - def _initialize_cpu_backends(self) -> None: - """Initialize CPU backends based on settings.""" - try: - if settings.use_onnx: - # ONNX CPU primary - self._backends['onnx_cpu'] = ONNXCPUBackend() - self._current_backend = 'onnx_cpu' - logger.info("Initialized ONNX CPU backend") + # Initialize locks for each backend + for backend in self._backends: + self._backend_locks[backend] = asyncio.Lock() - # PyTorch CPU fallback - self._backends['pytorch_cpu'] = PyTorchCPUBackend() - logger.info("Initialized PyTorch CPU backend") - else: - # PyTorch CPU primary - self._backends['pytorch_cpu'] = PyTorchCPUBackend() - self._current_backend = 'pytorch_cpu' - logger.info("Initialized PyTorch CPU backend") - - # ONNX CPU fallback - self._backends['onnx_cpu'] = ONNXCPUBackend() - logger.info("Initialized ONNX CPU backend") except Exception as e: - logger.error(f"Failed to initialize CPU backends: {e}") - raise RuntimeError("No backends available") + logger.error(f"Failed to initialize backend: {e}") + raise RuntimeError("Failed to initialize backend") def get_backend(self, backend_type: Optional[str] = None) -> BaseModelBackend: """Get specified backend. @@ -154,19 +158,42 @@ class ModelManager: if backend_type is None: backend_type = self._determine_backend(abs_path) - backend = self.get_backend(backend_type) + # Get backend lock + lock = self._backend_locks[backend_type] - # Load model - await backend.load_model(abs_path) - logger.info(f"Loaded model on {backend_type} backend") - - # Run warmup if voice provided - if warmup_voice is not None: - await self._warmup_inference(backend, warmup_voice) + async with lock: + backend = self.get_backend(backend_type) + + # For ONNX backends, use session pool + if backend_type.startswith('onnx'): + pool = self._session_pools[backend_type] + backend._session = await pool.get_session(abs_path) + self._loaded_models[backend_type] = abs_path + logger.info(f"Fetched model instance from {backend_type} pool") + + # For PyTorch backends, load normally + else: + # Check if model is already loaded + if (backend_type in self._loaded_models and + self._loaded_models[backend_type] == abs_path and + backend.is_loaded): + logger.info(f"Fetching existing model instance from {backend_type}") + return + + # Load model + await backend.load_model(abs_path) + self._loaded_models[backend_type] = abs_path + logger.info(f"Initialized new model instance on {backend_type}") + + # Run warmup if voice provided + if warmup_voice is not None: + await self._warmup_inference(backend, warmup_voice) except Exception as e: + # Clear cached path on failure + self._loaded_models.pop(backend_type, None) raise RuntimeError(f"Failed to load model: {e}") - + async def _warmup_inference(self, backend: BaseModelBackend, voice: torch.Tensor) -> None: """Run warmup inference to initialize model. @@ -188,7 +215,7 @@ class ModelManager: # Run inference backend.generate(tokens, voice, speed=1.0) - logger.info("Completed warmup inference") + logger.debug("Completed warmup inference") except Exception as e: logger.warning(f"Warmup inference failed: {e}") @@ -221,16 +248,23 @@ class ModelManager: try: # Generate audio using provided voice tensor + # No lock needed here since inference is thread-safe return backend.generate(tokens, voice, speed) - except Exception as e: raise RuntimeError(f"Generation failed: {e}") def unload_all(self) -> None: - """Unload models from all backends.""" + """Unload models from all backends and clear cache.""" + # Clean up session pools + for pool in self._session_pools.values(): + pool.cleanup() + + # Unload PyTorch backends for backend in self._backends.values(): backend.unload() - logger.info("Unloaded all models") + + self._loaded_models.clear() + logger.info("Unloaded all models and cleared cache") @property def available_backends(self) -> list[str]: @@ -251,12 +285,8 @@ class ModelManager: return self._current_backend -# Module-level instance -_manager: Optional[ModelManager] = None - - -def get_manager(config: Optional[ModelConfig] = None) -> ModelManager: - """Get or create global model manager instance. +async def get_manager(config: Optional[ModelConfig] = None) -> ModelManager: + """Get global model manager instance. Args: config: Optional model configuration @@ -264,7 +294,10 @@ def get_manager(config: Optional[ModelConfig] = None) -> ModelManager: Returns: ModelManager instance """ - global _manager - if _manager is None: - _manager = ModelManager(config) - return _manager \ No newline at end of file + global _manager_instance + + async with _manager_lock: + if _manager_instance is None: + _manager_instance = ModelManager(config) + await _manager_instance.initialize() + return _manager_instance \ No newline at end of file diff --git a/api/src/inference/onnx_cpu.py b/api/src/inference/onnx_cpu.py index 34d68ac..b17227c 100644 --- a/api/src/inference/onnx_cpu.py +++ b/api/src/inference/onnx_cpu.py @@ -1,20 +1,16 @@ """CPU-based ONNX inference backend.""" -from typing import Dict, Optional +from typing import Optional import numpy as np import torch from loguru import logger -from onnxruntime import ( - ExecutionMode, - GraphOptimizationLevel, - InferenceSession, - SessionOptions -) +from onnxruntime import InferenceSession from ..core import paths from ..core.model_config import model_config from .base import BaseModelBackend +from .session_pool import create_session_options, create_provider_options class ONNXCPUBackend(BaseModelBackend): @@ -47,8 +43,8 @@ class ONNXCPUBackend(BaseModelBackend): logger.info(f"Loading ONNX model: {model_path}") # Configure session - options = self._create_session_options() - provider_options = self._create_provider_options() + options = create_session_options(is_gpu=False) + provider_options = create_provider_options(is_gpu=False) # Create session self._session = InferenceSession( @@ -84,9 +80,9 @@ class ONNXCPUBackend(BaseModelBackend): raise RuntimeError("Model not loaded") try: - # Prepare inputs - tokens_input = np.array([tokens], dtype=np.int64) - style_input = voice[len(tokens)].numpy() + # Prepare inputs with start/end tokens + tokens_input = np.array([[0, *tokens, 0]], dtype=np.int64) # Add start/end tokens + style_input = voice[len(tokens) + 2].numpy() # Adjust index for start/end tokens speed_input = np.full(1, speed, dtype=np.float32) # Run inference @@ -104,52 +100,6 @@ class ONNXCPUBackend(BaseModelBackend): except Exception as e: raise RuntimeError(f"Generation failed: {e}") - def _create_session_options(self) -> SessionOptions: - """Create ONNX session options. - - Returns: - Configured session options - """ - options = SessionOptions() - config = model_config.onnx_cpu - - # Set optimization level - if config.optimization_level == "all": - options.graph_optimization_level = GraphOptimizationLevel.ORT_ENABLE_ALL - elif config.optimization_level == "basic": - options.graph_optimization_level = GraphOptimizationLevel.ORT_ENABLE_BASIC - else: - options.graph_optimization_level = GraphOptimizationLevel.ORT_DISABLE_ALL - - # Configure threading - options.intra_op_num_threads = config.num_threads - options.inter_op_num_threads = config.inter_op_threads - - # Set execution mode - options.execution_mode = ( - ExecutionMode.ORT_PARALLEL - if config.execution_mode == "parallel" - else ExecutionMode.ORT_SEQUENTIAL - ) - - # Configure memory optimization - options.enable_mem_pattern = config.memory_pattern - - return options - - def _create_provider_options(self) -> Dict: - """Create CPU provider options. - - Returns: - Provider configuration - """ - return { - "CPUExecutionProvider": { - "arena_extend_strategy": model_config.onnx_cpu.arena_extend_strategy, - "cpu_memory_arena_cfg": "cpu:0" - } - } - def unload(self) -> None: """Unload model and free resources.""" if self._session is not None: diff --git a/api/src/inference/onnx_gpu.py b/api/src/inference/onnx_gpu.py index 2df32e2..f11534e 100644 --- a/api/src/inference/onnx_gpu.py +++ b/api/src/inference/onnx_gpu.py @@ -1,20 +1,16 @@ """GPU-based ONNX inference backend.""" -from typing import Dict, Optional +from typing import Optional import numpy as np import torch from loguru import logger -from onnxruntime import ( - ExecutionMode, - GraphOptimizationLevel, - InferenceSession, - SessionOptions -) +from onnxruntime import InferenceSession from ..core import paths from ..core.model_config import model_config from .base import BaseModelBackend +from .session_pool import create_session_options, create_provider_options class ONNXGPUBackend(BaseModelBackend): @@ -27,6 +23,9 @@ class ONNXGPUBackend(BaseModelBackend): raise RuntimeError("CUDA not available") self._device = "cuda" self._session: Optional[InferenceSession] = None + + # Configure GPU + torch.cuda.set_device(model_config.onnx_gpu.device_id) @property def is_loaded(self) -> bool: @@ -49,8 +48,8 @@ class ONNXGPUBackend(BaseModelBackend): logger.info(f"Loading ONNX model on GPU: {model_path}") # Configure session - options = self._create_session_options() - provider_options = self._create_provider_options() + options = create_session_options(is_gpu=True) + provider_options = create_provider_options(is_gpu=True) # Create session with CUDA provider self._session = InferenceSession( @@ -87,8 +86,8 @@ class ONNXGPUBackend(BaseModelBackend): try: # Prepare inputs - tokens_input = np.array([tokens], dtype=np.int64) - style_input = voice[len(tokens)].cpu().numpy() # Move to CPU for ONNX + tokens_input = np.array([[0, *tokens, 0]], dtype=np.int64) # Add start/end tokens + style_input = voice[len(tokens) + 2].cpu().numpy() # Move to CPU for ONNX speed_input = np.full(1, speed, dtype=np.float32) # Run inference @@ -104,62 +103,15 @@ class ONNXGPUBackend(BaseModelBackend): return result[0] except Exception as e: + if "out of memory" in str(e).lower(): + # Clear CUDA cache and retry + torch.cuda.empty_cache() + return self.generate(tokens, voice, speed) raise RuntimeError(f"Generation failed: {e}") - def _create_session_options(self) -> SessionOptions: - """Create ONNX session options. - - Returns: - Configured session options - """ - options = SessionOptions() - config = model_config.onnx_gpu - - # Set optimization level - if config.optimization_level == "all": - options.graph_optimization_level = GraphOptimizationLevel.ORT_ENABLE_ALL - elif config.optimization_level == "basic": - options.graph_optimization_level = GraphOptimizationLevel.ORT_ENABLE_BASIC - else: - options.graph_optimization_level = GraphOptimizationLevel.ORT_DISABLE_ALL - - # Configure threading - options.intra_op_num_threads = config.num_threads - options.inter_op_num_threads = config.inter_op_threads - - # Set execution mode - options.execution_mode = ( - ExecutionMode.ORT_PARALLEL - if config.execution_mode == "parallel" - else ExecutionMode.ORT_SEQUENTIAL - ) - - # Configure memory optimization - options.enable_mem_pattern = config.memory_pattern - - return options - - def _create_provider_options(self) -> Dict: - """Create CUDA provider options. - - Returns: - Provider configuration - """ - config = model_config.onnx_gpu - return { - "CUDAExecutionProvider": { - "device_id": config.device_id, - "arena_extend_strategy": config.arena_extend_strategy, - "gpu_mem_limit": int(config.gpu_mem_limit * torch.cuda.get_device_properties(0).total_memory), - "cudnn_conv_algo_search": config.cudnn_conv_algo_search, - "do_copy_in_default_stream": config.do_copy_in_default_stream - } - } - def unload(self) -> None: """Unload model and free resources.""" if self._session is not None: del self._session self._session = None - if torch.cuda.is_available(): - torch.cuda.empty_cache() \ No newline at end of file + torch.cuda.empty_cache() \ No newline at end of file diff --git a/api/src/inference/pytorch_gpu.py b/api/src/inference/pytorch_gpu.py index 2995818..fc07a0d 100644 --- a/api/src/inference/pytorch_gpu.py +++ b/api/src/inference/pytorch_gpu.py @@ -13,8 +13,37 @@ from ..core.model_config import model_config from .base import BaseModelBackend +class CUDAStreamManager: + """CUDA stream manager.""" + + def __init__(self, num_streams: int): + """Initialize stream manager. + + Args: + num_streams: Number of CUDA streams + """ + self.streams = [torch.cuda.Stream() for _ in range(num_streams)] + self._current = 0 + + def get_next_stream(self) -> torch.cuda.Stream: + """Get next available stream. + + Returns: + CUDA stream + """ + stream = self.streams[self._current] + self._current = (self._current + 1) % len(self.streams) + return stream + + @torch.no_grad() -def forward(model: torch.nn.Module, tokens: list[int], ref_s: torch.Tensor, speed: float) -> np.ndarray: +def forward( + model: torch.nn.Module, + tokens: list[int], + ref_s: torch.Tensor, + speed: float, + stream: Optional[torch.cuda.Stream] = None +) -> np.ndarray: """Forward pass through model. Args: @@ -22,59 +51,67 @@ def forward(model: torch.nn.Module, tokens: list[int], ref_s: torch.Tensor, spee tokens: Input tokens ref_s: Reference signal (shape: [1, n_features]) speed: Speed multiplier + stream: Optional CUDA stream Returns: Generated audio """ device = ref_s.device - # Initial tensor setup - tokens = torch.LongTensor([[0, *tokens, 0]]).to(device) - input_lengths = torch.LongTensor([tokens.shape[-1]]).to(device) - text_mask = length_to_mask(input_lengths).to(device) + # Use provided stream or default + with torch.cuda.stream(stream) if stream else torch.cuda.default_stream(): + # Initial tensor setup + tokens = torch.LongTensor([[0, *tokens, 0]]).to(device) + input_lengths = torch.LongTensor([tokens.shape[-1]]).to(device) + text_mask = length_to_mask(input_lengths).to(device) - # Split reference signals (style_dim=128 from config) - style_dim = 128 - s_ref = ref_s[:, :style_dim].clone().to(device) - s_content = ref_s[:, style_dim:].clone().to(device) + # Split reference signals (style_dim=128 from config) + style_dim = 128 + s_ref = ref_s[:, :style_dim].clone().to(device) + s_content = ref_s[:, style_dim:].clone().to(device) - # BERT and encoder pass - bert_dur = model.bert(tokens, attention_mask=(~text_mask).int()) - d_en = model.bert_encoder(bert_dur).transpose(-1, -2) + # BERT and encoder pass + bert_dur = model.bert(tokens, attention_mask=(~text_mask).int()) + d_en = model.bert_encoder(bert_dur).transpose(-1, -2) - # Predictor forward pass - d = model.predictor.text_encoder(d_en, s_content, input_lengths, text_mask) - x, _ = model.predictor.lstm(d) + # Predictor forward pass + d = model.predictor.text_encoder(d_en, s_content, input_lengths, text_mask) + x, _ = model.predictor.lstm(d) - # Duration prediction - duration = model.predictor.duration_proj(x) - duration = torch.sigmoid(duration).sum(axis=-1) / speed - pred_dur = torch.round(duration).clamp(min=1).long() - del duration, x + # Duration prediction + duration = model.predictor.duration_proj(x) + duration = torch.sigmoid(duration).sum(axis=-1) / speed + pred_dur = torch.round(duration).clamp(min=1).long() + del duration, x - # Alignment matrix construction - pred_aln_trg = torch.zeros(input_lengths.item(), pred_dur.sum().item(), device=device) - c_frame = 0 - for i in range(pred_aln_trg.size(0)): - pred_aln_trg[i, c_frame:c_frame + pred_dur[0, i].item()] = 1 - c_frame += pred_dur[0, i].item() - pred_aln_trg = pred_aln_trg.unsqueeze(0) + # Alignment matrix construction + pred_aln_trg = torch.zeros(input_lengths.item(), pred_dur.sum().item(), device=device) + c_frame = 0 + for i in range(pred_aln_trg.size(0)): + pred_aln_trg[i, c_frame:c_frame + pred_dur[0, i].item()] = 1 + c_frame += pred_dur[0, i].item() + pred_aln_trg = pred_aln_trg.unsqueeze(0) - # Matrix multiplications - en = d.transpose(-1, -2) @ pred_aln_trg - del d - - F0_pred, N_pred = model.predictor.F0Ntrain(en, s_content) - del en + # Matrix multiplications + en = d.transpose(-1, -2) @ pred_aln_trg + del d + + F0_pred, N_pred = model.predictor.F0Ntrain(en, s_content) + del en - # Final text encoding and decoding - t_en = model.text_encoder(tokens, input_lengths, text_mask) - asr = t_en @ pred_aln_trg - del t_en + # Final text encoding and decoding + t_en = model.text_encoder(tokens, input_lengths, text_mask) + asr = t_en @ pred_aln_trg + del t_en - # Generate output - output = model.decoder(asr, F0_pred, N_pred, s_ref) - return output.squeeze().cpu().numpy() + # Generate output + output = model.decoder(asr, F0_pred, N_pred, s_ref) + + # Ensure operation completion if using custom stream + if stream: + stream.synchronize() + + return output.squeeze().cpu().numpy() def length_to_mask(lengths: torch.Tensor) -> torch.Tensor: @@ -92,9 +129,10 @@ class PyTorchGPUBackend(BaseModelBackend): def __init__(self): """Initialize GPU backend.""" super().__init__() - if not torch.cuda.is_available(): - raise RuntimeError("CUDA not available") - self._device = "cuda" + from ..core.config import settings + if not (settings.use_gpu and torch.cuda.is_available()): + raise RuntimeError("GPU backend requires GPU support and use_gpu=True") + self._device = "cuda" # Device is enforced by backend selection in model_manager self._model: Optional[torch.nn.Module] = None # Configure GPU settings @@ -102,6 +140,9 @@ class PyTorchGPUBackend(BaseModelBackend): if config.sync_cuda: torch.cuda.synchronize() torch.cuda.set_device(config.device_id) + + # Initialize stream manager + self._stream_manager = CUDAStreamManager(config.cuda_streams) async def load_model(self, path: str) -> None: """Load PyTorch model. @@ -154,8 +195,11 @@ class PyTorchGPUBackend(BaseModelBackend): if ref_s.dim() == 1: ref_s = ref_s.unsqueeze(0) # Add batch dimension if needed - # Generate audio - return forward(self._model, tokens, ref_s, speed) + # Get next available stream + stream = self._stream_manager.get_next_stream() + + # Generate audio using stream + return forward(self._model, tokens, ref_s, speed, stream) except Exception as e: logger.error(f"Generation failed: {e}") diff --git a/api/src/inference/session_pool.py b/api/src/inference/session_pool.py new file mode 100644 index 0000000..4674a91 --- /dev/null +++ b/api/src/inference/session_pool.py @@ -0,0 +1,271 @@ +"""Session pooling for model inference.""" + +import asyncio +import time +from dataclasses import dataclass +from typing import Dict, Optional, Set + +import torch +from loguru import logger +from onnxruntime import ( + ExecutionMode, + GraphOptimizationLevel, + InferenceSession, + SessionOptions +) + +from ..core import paths +from ..core.model_config import model_config + + +@dataclass +class SessionInfo: + """Session information.""" + session: InferenceSession + last_used: float + stream_id: Optional[int] = None + + +def create_session_options(is_gpu: bool = False) -> SessionOptions: + """Create ONNX session options. + + Args: + is_gpu: Whether to use GPU configuration + + Returns: + Configured session options + """ + options = SessionOptions() + config = model_config.onnx_gpu if is_gpu else model_config.onnx_cpu + + # Set optimization level + if config.optimization_level == "all": + options.graph_optimization_level = GraphOptimizationLevel.ORT_ENABLE_ALL + elif config.optimization_level == "basic": + options.graph_optimization_level = GraphOptimizationLevel.ORT_ENABLE_BASIC + else: + options.graph_optimization_level = GraphOptimizationLevel.ORT_DISABLE_ALL + + # Configure threading + options.intra_op_num_threads = config.num_threads + options.inter_op_num_threads = config.inter_op_threads + + # Set execution mode + options.execution_mode = ( + ExecutionMode.ORT_PARALLEL + if config.execution_mode == "parallel" + else ExecutionMode.ORT_SEQUENTIAL + ) + + # Configure memory optimization + options.enable_mem_pattern = config.memory_pattern + + return options + + +def create_provider_options(is_gpu: bool = False) -> Dict: + """Create provider options. + + Args: + is_gpu: Whether to use GPU configuration + + Returns: + Provider configuration + """ + if is_gpu: + config = model_config.onnx_gpu + return { + "CUDAExecutionProvider": { + "device_id": config.device_id, + "arena_extend_strategy": config.arena_extend_strategy, + "gpu_mem_limit": int(config.gpu_mem_limit * torch.cuda.get_device_properties(0).total_memory), + "cudnn_conv_algo_search": config.cudnn_conv_algo_search, + "do_copy_in_default_stream": config.do_copy_in_default_stream + } + } + else: + return { + "CPUExecutionProvider": { + "arena_extend_strategy": model_config.onnx_cpu.arena_extend_strategy, + "cpu_memory_arena_cfg": "cpu:0" + } + } + + +class BaseSessionPool: + """Base session pool implementation.""" + + def __init__(self, max_size: int, timeout: int): + """Initialize session pool. + + Args: + max_size: Maximum number of concurrent sessions + timeout: Session timeout in seconds + """ + self._max_size = max_size + self._timeout = timeout + self._sessions: Dict[str, SessionInfo] = {} + self._lock = asyncio.Lock() + + async def get_session(self, model_path: str) -> InferenceSession: + """Get session from pool. + + Args: + model_path: Path to model file + + Returns: + ONNX inference session + + Raises: + RuntimeError: If no sessions available + """ + async with self._lock: + # Clean expired sessions + self._cleanup_expired() + + # Check if session exists and is valid + if model_path in self._sessions: + session_info = self._sessions[model_path] + session_info.last_used = time.time() + return session_info.session + + # Check if we can create new session + if len(self._sessions) >= self._max_size: + raise RuntimeError( + f"Maximum number of sessions reached ({self._max_size})" + ) + + # Create new session + session = await self._create_session(model_path) + self._sessions[model_path] = SessionInfo( + session=session, + last_used=time.time() + ) + return session + + def _cleanup_expired(self) -> None: + """Remove expired sessions.""" + current_time = time.time() + expired = [ + path for path, info in self._sessions.items() + if current_time - info.last_used > self._timeout + ] + for path in expired: + logger.info(f"Removing expired session: {path}") + del self._sessions[path] + + async def _create_session(self, model_path: str) -> InferenceSession: + """Create new session. + + Args: + model_path: Path to model file + + Returns: + ONNX inference session + """ + raise NotImplementedError + + def cleanup(self) -> None: + """Clean up all sessions.""" + self._sessions.clear() + + +class StreamingSessionPool(BaseSessionPool): + """GPU session pool with CUDA streams.""" + + def __init__(self): + """Initialize GPU session pool.""" + config = model_config.onnx_gpu + super().__init__(config.cuda_streams, config.stream_timeout) + self._available_streams: Set[int] = set(range(config.cuda_streams)) + + async def get_session(self, model_path: str) -> InferenceSession: + """Get session with CUDA stream. + + Args: + model_path: Path to model file + + Returns: + ONNX inference session + + Raises: + RuntimeError: If no streams available + """ + async with self._lock: + # Clean expired sessions + self._cleanup_expired() + + # Try to find existing session + if model_path in self._sessions: + session_info = self._sessions[model_path] + session_info.last_used = time.time() + return session_info.session + + # Get available stream + if not self._available_streams: + raise RuntimeError("No CUDA streams available") + stream_id = self._available_streams.pop() + + try: + # Create new session + session = await self._create_session(model_path) + self._sessions[model_path] = SessionInfo( + session=session, + last_used=time.time(), + stream_id=stream_id + ) + return session + + except Exception: + # Return stream to pool on failure + self._available_streams.add(stream_id) + raise + + def _cleanup_expired(self) -> None: + """Remove expired sessions and return streams.""" + current_time = time.time() + expired = [ + path for path, info in self._sessions.items() + if current_time - info.last_used > self._timeout + ] + for path in expired: + info = self._sessions[path] + if info.stream_id is not None: + self._available_streams.add(info.stream_id) + logger.info(f"Removing expired session: {path}") + del self._sessions[path] + + async def _create_session(self, model_path: str) -> InferenceSession: + """Create new session with CUDA provider.""" + abs_path = await paths.get_model_path(model_path) + options = create_session_options(is_gpu=True) + provider_options = create_provider_options(is_gpu=True) + + return InferenceSession( + abs_path, + sess_options=options, + providers=["CUDAExecutionProvider"], + provider_options=[provider_options] + ) + + +class CPUSessionPool(BaseSessionPool): + """CPU session pool.""" + + def __init__(self): + """Initialize CPU session pool.""" + config = model_config.onnx_cpu + super().__init__(config.max_instances, config.instance_timeout) + + async def _create_session(self, model_path: str) -> InferenceSession: + """Create new session with CPU provider.""" + abs_path = await paths.get_model_path(model_path) + options = create_session_options(is_gpu=False) + provider_options = create_provider_options(is_gpu=False) + + return InferenceSession( + abs_path, + sess_options=options, + providers=["CPUExecutionProvider"], + provider_options=[provider_options] + ) \ No newline at end of file diff --git a/api/src/inference/voice_manager.py b/api/src/inference/voice_manager.py index 15932c7..67f34a3 100644 --- a/api/src/inference/voice_manager.py +++ b/api/src/inference/voice_manager.py @@ -1,11 +1,10 @@ """Voice pack management and caching.""" import os -from typing import Dict, List, Optional, Union +from typing import Dict, List, Optional import torch from loguru import logger -from pydantic import BaseModel from ..core import paths from ..core.config import settings @@ -13,7 +12,7 @@ from ..structures.model_schemas import VoiceConfig class VoiceManager: - """Manages voice loading, caching, and operations.""" + """Manages voice loading and operations.""" def __init__(self, config: Optional[VoiceConfig] = None): """Initialize voice manager. @@ -33,15 +32,8 @@ class VoiceManager: Returns: Path to voice file if exists, None otherwise """ - # Get api directory path (two levels up from inference) api_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) - - # Construct voice path relative to api directory voice_path = os.path.join(api_dir, settings.voices_dir, f"{voice_name}.pt") - - # Ensure voices directory exists - os.makedirs(os.path.dirname(voice_path), exist_ok=True) - return voice_path if os.path.exists(voice_path) else None async def load_voice(self, voice_name: str, device: str = "cpu") -> torch.Tensor: @@ -66,21 +58,20 @@ class VoiceManager: if self._config.use_cache and cache_key in self._voice_cache: return self._voice_cache[cache_key] + # Load voice tensor try: - # Load voice tensor voice = await paths.load_voice_tensor(voice_path, device=device) - - # Cache if enabled - if self._config.use_cache: - self._manage_cache() - self._voice_cache[cache_key] = voice - logger.debug(f"Cached voice: {voice_name} on {device}") - - return voice - except Exception as e: raise RuntimeError(f"Failed to load voice {voice_name}: {e}") + # Cache if enabled + if self._config.use_cache: + self._manage_cache() + self._voice_cache[cache_key] = voice + logger.debug(f"Cached voice: {voice_name} on {device}") + + return voice + def _manage_cache(self) -> None: """Manage voice cache size.""" if len(self._voice_cache) >= self._config.cache_size: @@ -123,14 +114,14 @@ class VoiceManager: # Get api directory path api_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) voices_dir = os.path.join(api_dir, settings.voices_dir) - - # Ensure voices directory exists os.makedirs(voices_dir, exist_ok=True) # Save combined voice combined_path = os.path.join(voices_dir, f"{combined_name}.pt") try: torch.save(combined_tensor, combined_path) + # Cache the new combined voice + self._voice_cache[f"{combined_path}_{device}"] = combined_tensor except Exception as e: raise RuntimeError(f"Failed to save combined voice: {e}") @@ -147,17 +138,13 @@ class VoiceManager: """ voices = [] try: - # Get api directory path api_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) voices_dir = os.path.join(api_dir, settings.voices_dir) - - # Ensure voices directory exists os.makedirs(voices_dir, exist_ok=True) - # List voice files for entry in os.listdir(voices_dir): if entry.endswith(".pt"): - voices.append(entry[:-3]) # Remove .pt extension + voices.append(entry[:-3]) except Exception as e: logger.error(f"Error listing voices: {e}") return sorted(voices) @@ -174,11 +161,8 @@ class VoiceManager: try: if not os.path.exists(voice_path): return False - - # Try loading voice voice = torch.load(voice_path, map_location="cpu") return isinstance(voice, torch.Tensor) - except Exception: return False @@ -195,12 +179,12 @@ class VoiceManager: } -# Module-level instance -_manager: Optional[VoiceManager] = None +# Global singleton instance +_manager_instance = None -def get_manager(config: Optional[VoiceConfig] = None) -> VoiceManager: - """Get or create global voice manager instance. +async def get_manager(config: Optional[VoiceConfig] = None) -> VoiceManager: + """Get global voice manager instance. Args: config: Optional voice configuration @@ -208,7 +192,7 @@ def get_manager(config: Optional[VoiceConfig] = None) -> VoiceManager: Returns: VoiceManager instance """ - global _manager - if _manager is None: - _manager = VoiceManager(config) - return _manager \ No newline at end of file + global _manager_instance + if _manager_instance is None: + _manager_instance = VoiceManager(config) + return _manager_instance \ No newline at end of file diff --git a/api/src/main.py b/api/src/main.py index ec1b6c8..8516325 100644 --- a/api/src/main.py +++ b/api/src/main.py @@ -1,10 +1,13 @@ + """ FastAPI OpenAI Compatible API """ +import os import sys from contextlib import asynccontextmanager +import torch import uvicorn from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware @@ -41,19 +44,59 @@ setup_logger() @asynccontextmanager async def lifespan(app: FastAPI): """Lifespan context manager for model initialization""" + from .inference.model_manager import get_manager + from .inference.voice_manager import get_manager as get_voice_manager + logger.info("Loading TTS model and voice packs...") - # Initialize service - service = TTSService() - await service.ensure_initialized() - - # Get available voices - voices = await service.list_voices() - voicepack_count = len(voices) + try: + # Initialize managers globally + model_manager = await get_manager() + voice_manager = await get_voice_manager() - # Get device info from model manager - device = "GPU" if settings.use_gpu else "CPU" - model = "ONNX" if settings.use_onnx else "PyTorch" + # Determine backend type based on settings + if settings.use_gpu and torch.cuda.is_available(): + backend_type = 'pytorch_gpu' if not settings.use_onnx else 'onnx_gpu' + else: + backend_type = 'pytorch_cpu' if not settings.use_onnx else 'onnx_cpu' + + # Get backend and initialize model + backend = model_manager.get_backend(backend_type) + + # Use model path directly from settings + model_file = settings.pytorch_model_file if not settings.use_onnx else settings.onnx_model_file + model_path = os.path.join(settings.model_dir, model_file) + + + if not os.path.exists(model_path): + raise RuntimeError(f"Model file not found: {model_path}") + + # Pre-cache default voice and use for warmup + warmup_voice = await voice_manager.load_voice(settings.default_voice, device=backend.device) + logger.info(f"Pre-cached voice {settings.default_voice} for warmup") + + # Initialize model with warmup voice + await model_manager.load_model(model_path, warmup_voice, backend_type) + + # Pre-cache common voices in background + common_voices = ['af', 'af_bella', 'af_sarah', 'af_nicole'] + for voice_name in common_voices: + try: + await voice_manager.load_voice(voice_name, device=backend.device) + logger.debug(f"Pre-cached voice {voice_name}") + except Exception as e: + logger.warning(f"Failed to pre-cache voice {voice_name}: {e}") + + # Get available voices for startup message + voices = await voice_manager.list_voices() + voicepack_count = len(voices) + + # Get device info for startup message + device = "GPU" if settings.use_gpu else "CPU" + model = "ONNX" if settings.use_onnx else "PyTorch" + except Exception as e: + logger.error(f"Failed to initialize model: {e}") + raise boundary = "░" * 2*12 startup_msg = f""" diff --git a/api/src/routers/development.py b/api/src/routers/development.py index 4713440..b8ecf35 100644 --- a/api/src/routers/development.py +++ b/api/src/routers/development.py @@ -1,7 +1,7 @@ from typing import List import numpy as np -from fastapi import APIRouter, Depends, HTTPException, Response +from fastapi import APIRouter, Depends, HTTPException, Request, Response from loguru import logger from ..services.audio import AudioService @@ -16,9 +16,9 @@ from ..structures.text_schemas import ( router = APIRouter(tags=["text processing"]) -def get_tts_service() -> TTSService: +async def get_tts_service() -> TTSService: """Dependency to get TTSService instance""" - return TTSService() + return await TTSService.create() # Create service with properly initialized managers @router.post("/text/phonemize", response_model=PhonemeResponse, tags=["deprecated"]) @@ -82,9 +82,6 @@ async def generate_from_phonemes( ) try: - # Ensure service is initialized - await tts_service.ensure_initialized() - # Validate voice exists available_voices = await tts_service.list_voices() if request.voice not in available_voices: diff --git a/api/src/routers/openai_compatible.py b/api/src/routers/openai_compatible.py index 57d1257..4d1ca7a 100644 --- a/api/src/routers/openai_compatible.py +++ b/api/src/routers/openai_compatible.py @@ -1,6 +1,6 @@ from typing import AsyncGenerator, List, Union -from fastapi import APIRouter, Depends, Header, HTTPException, Response, Request +from fastapi import APIRouter, Depends, Header, HTTPException, Request, Response from fastapi.responses import StreamingResponse from loguru import logger @@ -13,10 +13,28 @@ router = APIRouter( responses={404: {"description": "Not found"}}, ) +# Global TTSService instance with lock +_tts_service = None +_init_lock = None -def get_tts_service() -> TTSService: - """Dependency to get TTSService instance with database session""" - return TTSService() # Initialize TTSService with default settings +async def get_tts_service() -> TTSService: + """Get global TTSService instance""" + global _tts_service, _init_lock + + # Create lock if needed + if _init_lock is None: + import asyncio + _init_lock = asyncio.Lock() + + # Initialize service if needed + if _tts_service is None: + async with _init_lock: + # Double check pattern + if _tts_service is None: + _tts_service = await TTSService.create() + logger.info("Created global TTSService instance") + + return _tts_service async def process_voices( @@ -78,11 +96,13 @@ async def stream_audio_chunks( async def create_speech( request: OpenAISpeechRequest, client_request: Request, - tts_service: TTSService = Depends(get_tts_service), x_raw_response: str = Header(None, alias="x-raw-response"), ): """OpenAI-compatible endpoint for text-to-speech""" try: + # Get global service instance + tts_service = await get_tts_service() + # Process voice combination and validate voice_to_use = await process_voices(request.voice, tts_service) @@ -145,9 +165,10 @@ async def create_speech( @router.get("/audio/voices") -async def list_voices(tts_service: TTSService = Depends(get_tts_service)): +async def list_voices(): """List all available voices for text-to-speech""" try: + tts_service = await get_tts_service() voices = await tts_service.list_voices() return {"voices": voices} except Exception as e: @@ -156,9 +177,7 @@ async def list_voices(tts_service: TTSService = Depends(get_tts_service)): @router.post("/audio/voices/combine") -async def combine_voices( - request: Union[str, List[str]], tts_service: TTSService = Depends(get_tts_service) -): +async def combine_voices(request: Union[str, List[str]]): """Combine multiple voices into a new voice. Args: @@ -174,6 +193,7 @@ async def combine_voices( - 500: Server error (file system issues, combination failed) """ try: + tts_service = await get_tts_service() combined_voice = await process_voices(request, tts_service) voices = await tts_service.list_voices() return {"voices": voices, "voice": combined_voice} diff --git a/api/src/services/text_processing/phonemizer.py b/api/src/services/text_processing/phonemizer.py index a328bb5..b0782f3 100644 --- a/api/src/services/text_processing/phonemizer.py +++ b/api/src/services/text_processing/phonemizer.py @@ -4,7 +4,7 @@ from abc import ABC, abstractmethod import phonemizer from .normalizer import normalize_text - +phonemizers = {} class PhonemizerBackend(ABC): """Abstract base class for phonemization backends""" @@ -91,8 +91,9 @@ def phonemize(text: str, language: str = "a", normalize: bool = True) -> str: Returns: Phonemized text """ + global phonemizers if normalize: text = normalize_text(text) - - phonemizer = create_phonemizer(language) - return phonemizer.phonemize(text) + if language not in phonemizers: + phonemizers[language]=create_phonemizer(language) + return phonemizers[language].phonemize(text) \ No newline at end of file diff --git a/api/src/services/tts_service.py b/api/src/services/tts_service.py index 9c51b19..83bf8e1 100644 --- a/api/src/services/tts_service.py +++ b/api/src/services/tts_service.py @@ -1,9 +1,8 @@ """TTS service using model and voice managers.""" import io -import os import time -from typing import List, Tuple +from typing import List, Tuple, Optional import numpy as np import scipy.io.wavfile as wavfile @@ -17,9 +16,14 @@ from .audio import AudioNormalizer, AudioService from .text_processing import chunker, normalize_text, process_text +import asyncio + class TTSService: """Text-to-speech service.""" + # Limit concurrent chunk processing + _chunk_semaphore = asyncio.Semaphore(4) + def __init__(self, output_dir: str = None): """Initialize service. @@ -27,53 +31,24 @@ class TTSService: output_dir: Optional output directory for saving audio """ self.output_dir = output_dir - self.model_manager = get_model_manager() - self.voice_manager = get_voice_manager() - self._initialized = False - self._initialization_error = None + self.model_manager = None + self._voice_manager = None - async def ensure_initialized(self): - """Ensure model is initialized.""" - if self._initialized: - return - if self._initialization_error: - raise self._initialization_error - - try: - # Get api directory path (one level up from src) - api_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) + @classmethod + async def create(cls, output_dir: str = None) -> 'TTSService': + """Create and initialize TTSService instance. + + Args: + output_dir: Optional output directory for saving audio - # Determine model file and backend based on hardware - if settings.use_gpu and torch.cuda.is_available(): - model_file = settings.pytorch_model_file - backend_type = 'pytorch_gpu' - else: - model_file = settings.onnx_model_file - backend_type = 'onnx_cpu' - - # Construct model path relative to api directory - model_path = os.path.join(api_dir, settings.model_dir, model_file) - - # Ensure model directory exists - os.makedirs(os.path.dirname(model_path), exist_ok=True) - - if not os.path.exists(model_path): - raise RuntimeError(f"Model file not found: {model_path}") - - # Load default voice for warmup - backend = self.model_manager.get_backend(backend_type) - warmup_voice = await self.voice_manager.load_voice(settings.default_voice, device=backend.device) - logger.info(f"Loaded voice {settings.default_voice} for warmup") - - # Initialize model with warmup voice - await self.model_manager.load_model(model_path, warmup_voice, backend_type) - logger.info(f"Initialized model on {backend_type} backend") - self._initialized = True - - except Exception as e: - logger.error(f"Failed to initialize model: {e}") - self._initialization_error = RuntimeError(f"Model initialization failed: {e}") - raise self._initialization_error + Returns: + Initialized TTSService instance + """ + service = cls(output_dir) + # Initialize managers + service.model_manager = await get_model_manager() + service._voice_manager = await get_voice_manager() + return service async def generate_audio( self, text: str, voice: str, speed: float = 1.0 @@ -88,8 +63,8 @@ class TTSService: Returns: Audio samples and processing time """ - await self.ensure_initialized() start_time = time.time() + voice_tensor = None try: # Normalize text @@ -98,31 +73,40 @@ class TTSService: raise ValueError("Text is empty after preprocessing") text = str(normalized) - # Process text into chunks - audio_chunks = [] - for chunk in chunker.split_text(text): - try: - # Convert chunk to token IDs - tokens = process_text(chunk) - if not tokens: - continue + # Get backend and load voice + backend = self.model_manager.get_backend() + voice_tensor = await self._voice_manager.load_voice(voice, device=backend.device) - # Get backend and load voice - backend = self.model_manager.get_backend() - voice_tensor = await self.voice_manager.load_voice(voice, device=backend.device) - - # Generate audio - chunk_audio = await self.model_manager.generate( - tokens, - voice_tensor, - speed=speed - ) - if chunk_audio is not None: - audio_chunks.append(chunk_audio) - except Exception as e: - logger.error(f"Failed to process chunk: '{chunk}'. Error: {str(e)}") - continue + # Get all chunks upfront + chunks = list(chunker.split_text(text)) + if not chunks: + raise ValueError("No text chunks to process") + # Process chunk with concurrency control + async def process_chunk(chunk: str) -> Optional[np.ndarray]: + async with self._chunk_semaphore: + try: + tokens = process_text(chunk) + if not tokens: + return None + + # Generate audio + return await self.model_manager.generate( + tokens, + voice_tensor, + speed=speed + ) + except Exception as e: + logger.error(f"Failed to process chunk: '{chunk}'. Error: {str(e)}") + return None + + # Process all chunks concurrently + chunk_results = await asyncio.gather(*[ + process_chunk(chunk) for chunk in chunks + ]) + + # Filter out None results and combine + audio_chunks = [chunk for chunk in chunk_results if chunk is not None] if not audio_chunks: raise ValueError("No audio chunks were generated successfully") @@ -134,6 +118,11 @@ class TTSService: except Exception as e: logger.error(f"Error in audio generation: {str(e)}") raise + finally: + # Always clean up voice tensor + if voice_tensor is not None: + del voice_tensor + torch.cuda.empty_cache() async def generate_audio_stream( self, @@ -153,33 +142,34 @@ class TTSService: Yields: Audio chunks as bytes """ - await self.ensure_initialized() - + # Setup audio processing + stream_normalizer = AudioNormalizer() + voice_tensor = None + try: - # Setup audio processing - stream_normalizer = AudioNormalizer() - # Normalize text normalized = normalize_text(text) if not normalized: raise ValueError("Text is empty after preprocessing") text = str(normalized) - # Process chunks - is_first = True - chunk_gen = chunker.split_text(text) - current_chunk = next(chunk_gen, None) + # Get backend and load voice + backend = self.model_manager.get_backend() + voice_tensor = await self._voice_manager.load_voice(voice, device=backend.device) + + # Get all chunks upfront + chunks = list(chunker.split_text(text)) + if not chunks: + raise ValueError("No text chunks to process") + + # Process chunk with concurrency control + async def process_chunk(chunk: str, is_first: bool, is_last: bool) -> Optional[bytes]: + async with self._chunk_semaphore: + try: + tokens = process_text(chunk) + if not tokens: + return None - while current_chunk is not None: - next_chunk = next(chunk_gen, None) - try: - # Convert chunk to token IDs - tokens = process_text(current_chunk) - if tokens: - # Get backend and load voice - backend = self.model_manager.get_backend() - voice_tensor = await self.voice_manager.load_voice(voice, device=backend.device) - # Generate audio chunk_audio = await self.model_manager.generate( tokens, @@ -189,26 +179,38 @@ class TTSService: if chunk_audio is not None: # Convert to bytes - chunk_bytes = AudioService.convert_audio( + return AudioService.convert_audio( chunk_audio, 24000, output_format, is_first_chunk=is_first, normalizer=stream_normalizer, - is_last_chunk=(next_chunk is None), + is_last_chunk=is_last, stream=True ) - yield chunk_bytes - is_first = False + except Exception as e: + logger.error(f"Failed to generate audio for chunk: '{chunk}'. Error: {str(e)}") + return None - except Exception as e: - logger.error(f"Failed to generate audio for chunk: '{current_chunk}'. Error: {str(e)}") + # Create tasks for all chunks + tasks = [ + process_chunk(chunk, i==0, i==len(chunks)-1) + for i, chunk in enumerate(chunks) + ] - current_chunk = next_chunk + # Process chunks concurrently and yield results in order + for chunk_bytes in await asyncio.gather(*tasks): + if chunk_bytes is not None: + yield chunk_bytes except Exception as e: logger.error(f"Error in audio generation stream: {str(e)}") raise + finally: + # Always clean up voice tensor + if voice_tensor is not None: + del voice_tensor + torch.cuda.empty_cache() async def combine_voices(self, voices: List[str]) -> str: """Combine multiple voices. @@ -219,8 +221,7 @@ class TTSService: Returns: Name of combined voice """ - await self.ensure_initialized() - return await self.voice_manager.combine_voices(voices) + return await self._voice_manager.combine_voices(voices) async def list_voices(self) -> List[str]: """List available voices. @@ -228,7 +229,7 @@ class TTSService: Returns: List of voice names """ - return await self.voice_manager.list_voices() + return await self._voice_manager.list_voices() def _audio_to_bytes(self, audio: np.ndarray) -> bytes: """Convert audio to WAV bytes. diff --git a/api/src/services/warmup.py b/api/src/services/warmup.py deleted file mode 100644 index 3a50000..0000000 --- a/api/src/services/warmup.py +++ /dev/null @@ -1,60 +0,0 @@ -import os -from typing import List, Tuple - -import torch -from loguru import logger - -from ..core.config import settings -from .tts_model import TTSModel -from .tts_service import TTSService - - -class WarmupService: - """Service for warming up TTS models and voice caches""" - - def __init__(self): - """Initialize warmup service and ensure model is ready""" - # Initialize model if not already initialized - if TTSModel._instance is None: - TTSModel.initialize(settings.model_dir) - self.tts_service = TTSService() - - def load_voices(self) -> List[Tuple[str, torch.Tensor]]: - """Load and cache voices up to LRU limit""" - # Get all voices sorted by filename length (shorter names first, usually base voices) - voice_files = sorted( - [f for f in os.listdir(TTSModel.VOICES_DIR) if f.endswith(".pt")], key=len - ) - - n_voices_cache = 1 - loaded_voices = [] - for voice_file in voice_files[:n_voices_cache]: - try: - voice_path = os.path.join(TTSModel.VOICES_DIR, voice_file) - # load using service, lru cache - voicepack = self.tts_service._load_voice(voice_path) - loaded_voices.append( - (voice_file[:-3], voicepack) - ) # Store name and tensor - # voicepack = torch.load(voice_path, map_location=TTSModel.get_device(), weights_only=True) - # logger.info(f"Loaded voice {voice_file[:-3]} into cache") - except Exception as e: - logger.error(f"Failed to load voice {voice_file}: {e}") - logger.info(f"Pre-loaded {len(loaded_voices)} voices into cache") - return loaded_voices - - async def warmup_voices( - self, warmup_text: str, loaded_voices: List[Tuple[str, torch.Tensor]] - ): - """Warm up voice inference and streaming""" - n_warmups = 1 - for voice_name, _ in loaded_voices[:n_warmups]: - try: - logger.info(f"Running warmup inference on voice {voice_name}") - async for _ in self.tts_service.generate_audio_stream( - warmup_text, voice_name, 1.0, "pcm" - ): - pass # Process all chunks to properly warm up - logger.info(f"Completed warmup for voice {voice_name}") - except Exception as e: - logger.warning(f"Warmup failed for voice {voice_name}: {e}") diff --git a/docker/gpu/docker-compose.yml b/docker/gpu/docker-compose.yml index 458eff9..f27e15b 100644 --- a/docker/gpu/docker-compose.yml +++ b/docker/gpu/docker-compose.yml @@ -10,7 +10,7 @@ services: ports: - "8880:8880" environment: - - PYTHONPATH=/app + - PYTHONPATH=/app:/app/api - USE_GPU=true - USE_ONNX=false - PYTHONUNBUFFERED=1 diff --git a/examples/openai_streaming_audio.py b/examples/openai_streaming_audio.py index 35ef58f..353ee3d 100644 --- a/examples/openai_streaming_audio.py +++ b/examples/openai_streaming_audio.py @@ -25,9 +25,7 @@ def main() -> None: def stream_to_speakers() -> None: import pyaudio - player_stream = pyaudio.PyAudio().open( - format=pyaudio.paInt16, channels=1, rate=24000, output=True - ) + player_stream = pyaudio.PyAudio().open(format=pyaudio.paInt16, channels=1, rate=24000, output=True) start_time = time.time() diff --git a/examples/simul_file_test.py b/examples/simul_file_test.py new file mode 100644 index 0000000..c1f14a0 --- /dev/null +++ b/examples/simul_file_test.py @@ -0,0 +1,53 @@ +#!/usr/bin/env rye run python +import asyncio +import time +from pathlib import Path +from openai import AsyncOpenAI + +# Initialize async client +openai = AsyncOpenAI(base_url="http://localhost:8880/v1", api_key="not-needed-for-local") + +async def save_to_file(text: str, file_id: int) -> None: + """Save TTS output to file asynchronously""" + speech_file_path = Path(__file__).parent / f"speech_{file_id}.mp3" + + start_time = time.time() + print(f"Starting file {file_id}") + + try: + # Use streaming endpoint with mp3 format + async with openai.audio.speech.with_streaming_response.create( + model="kokoro", + voice="af_bella", + input=text, + response_format="mp3" + ) as response: + print(f"File {file_id} - Time to first byte: {int((time.time() - start_time) * 1000)}ms") + + # Open file in binary write mode + with open(speech_file_path, 'wb') as f: + async for chunk in response.iter_bytes(): + f.write(chunk) + + print(f"File {file_id} completed in {int((time.time() - start_time) * 1000)}ms") + except Exception as e: + print(f"Error processing file {file_id}: {e}") + +async def main() -> None: + # Different text samples for variety + texts = [ + "The quick brown fox jumped over the lazy dogs. I see skies of blue and clouds of white", + "I see skies of blue and clouds of white. I see skies of blue and clouds of white", + ] + + # Create tasks for saving to files + file_tasks = [ + save_to_file(text, i) + for i, text in enumerate(texts) + ] + + # Run file tasks concurrently + await asyncio.gather(*file_tasks) + +if __name__ == "__main__": + asyncio.run(main()) \ No newline at end of file diff --git a/examples/simul_openai_streaming_audio.py b/examples/simul_openai_streaming_audio.py new file mode 100644 index 0000000..befae01 --- /dev/null +++ b/examples/simul_openai_streaming_audio.py @@ -0,0 +1,91 @@ +#!/usr/bin/env rye run python +import asyncio +import time +from pathlib import Path +import pyaudio +from openai import AsyncOpenAI + +# Initialize async client +openai = AsyncOpenAI(base_url="http://localhost:8880/v1", api_key="not-needed-for-local") + +# Create a shared PyAudio instance +p = pyaudio.PyAudio() + +async def stream_to_speakers(text: str, stream_id: int) -> None: + """Stream TTS audio to speakers asynchronously""" + player_stream = p.open( + format=pyaudio.paInt16, + channels=1, + rate=24000, + output=True + ) + + start_time = time.time() + print(f"Starting stream {stream_id}") + + try: + async with openai.audio.speech.with_streaming_response.create( + model="kokoro", + voice="af_bella", + response_format="pcm", + input=text + ) as response: + print(f"Stream {stream_id} - Time to first byte: {int((time.time() - start_time) * 1000)}ms") + + async for chunk in response.iter_bytes(chunk_size=1024): + player_stream.write(chunk) + # Small sleep to allow other coroutines to run + await asyncio.sleep(0.001) + + print(f"Stream {stream_id} completed in {int((time.time() - start_time) * 1000)}ms") + + finally: + player_stream.stop_stream() + player_stream.close() + +async def save_to_file(text: str, file_id: int) -> None: + """Save TTS output to file asynchronously""" + speech_file_path = Path(__file__).parent / f"speech_{file_id}.mp3" + + async with openai.audio.speech.with_streaming_response.create( + model="kokoro", + voice="af_bella", + input=text + ) as response: + # Open file in binary write mode + with open(speech_file_path, 'wb') as f: + async for chunk in response.iter_bytes(): + f.write(chunk) + print(f"File {file_id} saved to {speech_file_path}") + +async def main() -> None: + # Different text samples for variety + texts = [ + "The quick brown fox jumped over the lazy dogs. I see skies of blue and clouds of white", + "I see skies of blue and clouds of white. I see skies of blue and clouds of white", + ] + + # Create tasks for streaming to speakers + speaker_tasks = [ + stream_to_speakers(text, i) + for i, text in enumerate(texts) + ] + + # Create tasks for saving to files + file_tasks = [ + save_to_file(text, i) + for i, text in enumerate(texts) + ] + + # Combine all tasks + all_tasks = speaker_tasks + file_tasks + + # Run all tasks concurrently + try: + await asyncio.gather(*all_tasks) + finally: + # Clean up PyAudio + p.terminate() + +if __name__ == "__main__": + asyncio.run(main()) \ No newline at end of file diff --git a/examples/simul_speaker_test.py b/examples/simul_speaker_test.py new file mode 100644 index 0000000..d2e4354 --- /dev/null +++ b/examples/simul_speaker_test.py @@ -0,0 +1,66 @@ +#!/usr/bin/env rye run python +import asyncio +import time +import pyaudio +from openai import AsyncOpenAI + +# Initialize async client +openai = AsyncOpenAI(base_url="http://localhost:8880/v1", api_key="not-needed-for-local") + +# Create a shared PyAudio instance +p = pyaudio.PyAudio() + +async def stream_to_speakers(text: str, stream_id: int) -> None: + """Stream TTS audio to speakers asynchronously""" + player_stream = p.open( + format=pyaudio.paInt16, + channels=1, + rate=24000, + output=True + ) + + start_time = time.time() + print(f"Starting stream {stream_id}") + + try: + async with openai.audio.speech.with_streaming_response.create( + model="kokoro", + voice="af_bella", + response_format="pcm", + input=text + ) as response: + print(f"Stream {stream_id} - Time to first byte: {int((time.time() - start_time) * 1000)}ms") + + async for chunk in response.iter_bytes(chunk_size=1024): + player_stream.write(chunk) + # Small sleep to allow other coroutines to run + await asyncio.sleep(0.001) + + print(f"Stream {stream_id} completed in {int((time.time() - start_time) * 1000)}ms") + + finally: + player_stream.stop_stream() + player_stream.close() + +async def main() -> None: + # Different text samples for variety + texts = [ + "The quick brown fox jumped over the lazy dogs. I see skies of blue and clouds of white", + "I see skies of blue and clouds of white. I see skies of blue and clouds of white", + ] + + # Create tasks for streaming to speakers + speaker_tasks = [ + stream_to_speakers(text, i) + for i, text in enumerate(texts) + ] + + # Run speaker tasks concurrently + try: + await asyncio.gather(*speaker_tasks) + finally: + # Clean up PyAudio + p.terminate() + +if __name__ == "__main__": + asyncio.run(main()) \ No newline at end of file diff --git a/examples/speech.mp3 b/examples/speech.mp3 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..be3431b8562f5065dfb4f5f81ff1eae2a4511047 100644 GIT binary patch literal 73440 zcmeFZc~DbXxA1*3l7tYF5Fmk|Nq`t8JqbYpwL5_j0z?gy0-`2Nf>R@);?Q;`LV$?O zB#0VD0nuh~Kt(%@GN~ZWtu~X0V>`FqZR@Act$V+!@BQa{{=9EJ`&25YDm#^R*00uD zd#|;2zPz331^#;kZA<#$>%HaImmYx3n*d-)l(_|-Xl-jprO}<4u5OD!u9uJRcYJ@L zC}f2sEOJdutW=hmv}yAXsTn(V@7b?VX|#C<3W`e0D=Ljub@ffhe{MN-`rP?TT|NB+ z!&gSH-@JYA{>0>?*{9EbfA#MDpMQP+`dczFe?@@jLN*9OGXGl?lFwn?Y|M2}^2<_hx`Tu&poHTz|M^8e}LOQ2iUx772|ePCf?iQT z2^vEXsjjKFRZ@wiUZG84k5qrB_~<}a*3Mg&LU^jZXtiwl+ZNT6k25;mWS_cqQ2UNX~Lf|I0C7SVeGwdbJ7f`N)+e9D7wD0%@=AOv1>K~vM&sTA++ z%C0Ku=(GSmkDFPDcpH&#KlkKJAR?pr(XwxRzwGGr!2%E4N^aa!g>N$dQ&N@u>d~C& z^p+^EQy10{udTzrFm2l+OFLHN?R${;I^3y`2YQEY%xbVP`n>A0TDQmTy)^!5ssLc6 zN(EI~6C8?UvB!@0czUTh*Dy}rdW55>AhT-@sfST-(JphzGM9<#a&d7)K&Zeeq!=&2 zw^WCihb_PJr<83GCWgWpMJNWG#!z9p18OJ3f|}%$jKB)Jf*3h>mWDEqU)2nM)mFDS zvS~$8fFXDX0lF+29I(q0g{?Ps`Ip(1tlj>5*F8JB9*XkkDU*-avpiYvcmmW)%3;^| z8&y|a13NK0Pc!}YZ9H~4S{?dvOX~vM%gwWs&?IJ998FW-wsCrPaadv;1FBRVN;a~F z$mhC6z|v<2^WzSx4@t;sn*Kyuu!cEAAwdV^%>u{~`x-(5ay2j;~= zw+yw2!+#04jz1^8F#9ptoW{DJjACKl`um%VK{foJL3C2n#Ku01x2w0N3JKTtbqctb z5QGe%s)b7fz?YO9D)RqXSd&&ibhZ+!pWjrFMoX5WAq10{$=#?b5xXOf=; z!Zj^e<2j4Owc?vWMI0iGrO$gk@8$uI;r%RrLtI4N_5z+MDKnWBBk)xn3{#uKY^EU? zpSA8|wMv_1mM%L+kq{ECcwHf_SYbUn7Y|%?(15Mt?ZeNL@a?23{EEmxgZsVZ23DD9 zs^uX}$8!f|im(;dE7&)qBvw0K*XUZqn~4~I3e0^#cJ$oPw$IRL@0-@-tAuuEqc?Q8 z=5?i!M|18)Skk0LRJ-+L0Rb9UR?nurKNfgSqb~0GJ$Nlre6<0IzVn@SQjDc1d>62r`!e60g-doq`^jsl1W)GE- z#O~PRACYjaVsXz%p+73YE@v+})QPs)=B6L7w>Pfq8kXEXj~hWhs5EGzs^l1iT~Sh$ z$*;?SQh-_wsAa5-(Uz)d*bvewMra{exTbQ^L<+Y*EnN=`^ z-KU&(cQ1h5&mhoLcvn1x77e-};QGtC`Sf|q67nHnN&plr=j8dxwyF?|g3^#d;@U+4 zRSRwz*l|S7W{%L)QWWtN0_bbXE$q5JkZ2GiFakp(IG!xA^DNaZ7}ZE{^z6Lk#T?}c^%;5Sj9^B3GOM7 z(6+Gz@ia;Z#qH#~J!VV`cj2%^5N-iT5&W1;(nPNfoXC44KC$ltovA#sV)jPVqmTNh zy3k!K%3!gOG@8Zy0ly8o<`JqzzRSzw7a`g2TZVgnp5(S0b9+3@xTmN6qPc!nelqZV zG1|v`@n)e|5eF2Go`hilHfmzr&KanWh}(;j7)*k`z=P%g9EK{$?ar5Ws@yO|;7T#t zIOC)_5CpcgM)sRr=@tI<`(7VI#a}0LyLKcwNr1)!3q1zNa-rlZffJ97PQ`~(cj&0V z!)RuHY7WHOXN_%bulwTwud8~)>sy0LdY{~D+@>ntBq*2Pjeu7_g#!zpvs-HPGoO4KUmK-;ZjSOe=S*#f{pl@r~ zVqKsh9b4EaJ;Sqg_)c+`Vpev-oQQ(oaM{hmN7yF2O3ud71*b!ILi1fQ6@y2Hc>F4q z>?t;`Ab?en&`-?Gdh$V`tU(d5JTM@?+}}UHz}-b6E4Yq#kNcY}idGWVHs$`}_MA+x zN~W)pZVzaKEcQ;W%6h?x3|+DvmiIEk0?gY&-kmW*Lx#ata~an{UP;6jJ3XXBw?x`j za(51ZiJ;ZpXu(8HxcFNfYXyQo$urzS-lPlK96FwXXa27-CGqTF3SdkhaOlkPx=VRMv{#Y zYJ=Ir${fG2E_oMMqh$!Ko_*sxl%C_FY8MsCz@hWE}pG zxodtbJG6_@{z`J<#vt{=WJ;HFt&2LSDCPWl^TIvcF=^55^2|Pf^%!T zcDcYoeagdR>?+2z-r&1=xK|uUtUj*`T-o<>1KXM5x}|oiwDm^Mq8NNO7Zs_uaM3{- zFn#3S*{{N#U2;X!Q~`;HJ}|7RI{Y0@K_UqGFx%vdLo@Kx6&Hw|y_0^G>*qx{4;9rK z`&4G9*H=p;QblA43M;Det64a;t>tYzQ5WsHC3jWKi?Du;v(vqVx-7%EME(q^$n7B^Am(o4ZV~ni*hLe;kkVsh4ZnwI@clSJmo?z) zHy&V8>%|r1>Xi6?Dq};qm7|?Gi-0L6kD4U9UjAK_v_J>*p4xFa2+wQ@z8mQxg5sT8 z3zrpUvF+}&2pXQ5m8{mqvlt}#juk{e4@X04RW-13zA2hn(aU1GmirTMw-L6y#il(LS-kbV7Q zPw|YH(x!yY^_*AasZwKeUHvz{*S7cibaEc1eoAVjop6c$Ve>l7>C+2Eu82-z*b6I` zxrU({a2hMumZ7ZdX}f0`(e#vkg%f8dHUUWqdyk4qfa@FvfY5W9KVZfe z?j*))CeIt{c&K8_`Kx9QC$a=tLIm7t;6s$E?*hSk=kZhurWX=qeE`a_PGd5@uAKP8 z=h4BL?x@Vsm*p!%L+cKEk`2L+zO%X|tNQ-Y(RtD*{1fvqO7C1lHOo)tct`sb{*W1C zXurX1IX&aO;}sZ?g1=I7)-zQ(VMfL~D}xvhhJ?bko^ep5_u@^5EuUfmTtgR^k(OA# z6L1_@o}wYy$1U6h=`z`ezh|>)(;P7ZX=k3&SxDY3*MHo8hk_a$N1(d=`-_58oP|_8 zM#NmYqz{?r4lS--5z5w+`glEyQKm^~kj_ChC@D&UWN2e8Og?UM{0L2e=GQKwTEiu< zJ7)x%_SEf9jyP0!+50)5r^kNd%V{p|nyOMA<=xH+bNTcR7twxB-FrHR&ID?nsQQpMN&>{}5&51)p5ffB_dOjMCppw}B zLzn>(Do+QfS-hmA{zjW^2WKyK9ov!|uJ6_S5LA1K-g_g;s@j8E3Atb|a|DKv!CTd` zqP1tX@}#eGRqC*&$0DadE9%DcNNrZ}AuApXR*=k(6p~V;&j*_ECYe3biiTm~ z2~A-e%2I08cGhswq~LJ6`>7uxaCLtnZDEI`81RyjygTY7iWOeFB%KgNb_5!2NxJD8 z_L0r)2`B;{RcC+WJCP3e>Eu3ab6U`YUN;A3*Jd9OO=JalD1ML|3BG#nqA4`g4k zL<(in25|6$_5?XrYS$Pw=C!B18#1gN9eeG-ThV|wPV$I1OgjK zT^9np5yejSSTtX-v^E!Z$8LjxUX<$!*~@dv#X^^dY&~6GNrOItq1Qt>aEP7{I}pkr zJ2W~WI%p$Aih3U|+OsrZyz4z|-iB9cc>+n~`N6dX>{Iq&fa1RJb&6Y-gJ&|(^N2tx zxz_zwY{^Te*&0q;9x|TmJVPbo&+fivAcqZR+g7*o{`BPQ-R#&YhmYxLt$(M5k#&Xp z%>j#ZN^%5u%i@CQnrMKyq=}tvMp|hv0!N)A%X1O{_$43!>$##uH-t%m>Yea~&#-Ey zB*j%)gmoWszb2-oM?e_bVBs{Y#u@Iw2z^Nu#Y{jO|FT^dC@vN|#$5j`Xe!R$*U6>B zXp-7E%m&p!7^{J5+mYto)mLkMgUULiS}BCO~lLRYBmJ$Z! zGq~~^FL*yU%=R1K$2${zPJP=Sk)ykx>If@($qk&NFc9B}JAn-txI8&X0M^Q5F(e)V=USt5v2jdQC7IoWAR;#?D#MrS9Q zbMaNA_{$BO&Zu|@>>2Y^^#3Wc>N7{xfLl@B2(spm;@jF9r<}z12NS9 zd)s+K97a1T*qb_?jpKsT3^x#@oxiKs5Q`PM@K-25BsnS~e0yggaHK--H%2S?D z-x}Xk?oK2-O`H>i*A_tls4eTe^a!-cTsZ)Z3x4%7`;s~5i2M5;#Rx|TQG^<%(V)hq zDV-?Nf7x$@PbcXi=hT@VA%c&@h&`lJLCQ^*Q-U1#^-{CkZH~1!;M&kJfs6g``PVn% z*Vb;fGvK6&Sh@*8*o0Aw3i`!UMXW9-*C)I0>EJV*ZYZqQMva2aDUJOKGb5D;J*!d# zsy{Jlz4XKR$QtcJ6BLK%$b#2Sa5kaV3Xz8)FalPM7;1?H z=xn02{K&Pf`-M4~0X!rIg*w+vcMXGp!E(wJVsd4D%(~O}yGAS422wKU9^C!|U-t#^ z$Q~U5PBV@Z{e9xF5DOAwKb?kkb)(1>ZJd>2uaW3cMU#z~&NInYKyJ*SuOzP43Xc#rV!~)R&X}5|IZu!G4;8!e*m}=X%q~H$iy_V)&l3S0Cy^Ch5%sHAx zb4_uez{m}anI2aJvB*JCqF*mGh=|<|+cAz$pUY4oQpK%0AXI{(Eig~25Edr}hhxdU zPPErQ72oyD#+hR_Hb6*tyB{Hr5Ib^ck=w$ccbAx~i;bV)4mp;~M#e(Ruj=dmWxtQo znteJ5YX<<9cac=2vSHyj`5hETIFLvCT;X{f16IFff%d za&0^SQ5I{kuD)fBZ5ogFVe_zX3oea@mt{feJUK|V*{WZ-K!4RmpN6PlJ8YyciZ1O| zTt`)9t%_Y=&@~p@n=0rBB6_bYZ&5wd!`Tb4jy~b3GwnUh)A37Z_pFbfY!dhKN1U`> zCD?u1{{HttC#9`LJlbft(BG`vMoSdWj?nh?uJKqI^62+}YHSJMXvQ_LB{g+f$0!_2 zE+{dHTGUb0jgGgs{_$pm%(U5P>JqziMIJxs!zjAEg~Im`V;p_5QGBVEjmnjgYj@7i zvIGkyYzoKcAkJ+}is(IzqzPp_&AfuG-u>GyrR5>r(}M;Re_VCHb=#@7?hW7U_r*?k zU*GQ!-vQ424%>8kBqGAw8w*m;#3`=yj_PzIs@`2clCW<8htylFOT87ml4;sri|Eew z-T(Oh?jyD*?#zY_+jD>!LBiZa`CAe)y5P?|EP28>3aKQTArH%45v>WlmO5@G94427O`S+)&z=jYSFW@_wnDoJ+r{GyN*eg{qT1=ah7ZM+3p02<12w*lzUl8Sqo_i1Uz8`GU38 zSPvw|Hz=Wy!B0xVQ}@u#H~J~-+2EF!_3L9?X?USbJo{dr^eBF4QYD|TviIefTIAcw zv^hc+iJW7IQZ6lp1*k1R_Xk0`C@&mcF8A|vvx8XEC__3ezh7nj7ApUBcIF{y)^C`M zy>lwLa@=ao$r1xtK1C$tgrI<1gw^_nqJ%0V<<6j5oVsN|Y=QmM=bwA!U-5VEKHn(F zgFArk9UN;)f(gB;@!iOIG14Rms@}%N*4C3P!9+Qc!F6Z^e>6?vg6}m6)m8-=Bsrm| zeUDAs)MQA~X|Lw98|VMN^vd0bfB*6zCMDRdbLjK7`H>KOFuH``dhf3^-@V#vBLS3< zlHBK`8E|(erI#>2>#cj20!fK#jFOM0YAd6ha-xz+C1f>3(33Mk;7_2MG?s8op?<)i zhM7T%?G!m0|866aN|p&`-HA@zxY`NhG|`NxA`+r8Gj@PD?&~wsj1V4PV9M=&qKUFx z1jwO?H3Pg>EkbLmoB+BG+Co%!1q$)4} z5KM&0=;K#>jDq(nN8Bg6<=8FkVmIH^EcHl1Y$~lc5Syv%M~LMm-bsun8i4W?>Z51) zoLE#*L%AHQE;)4Qb(u~tCus92z+5A3?7Fg9waWW(hn=9Z7Q$Cse3BiT6NOac`F%Qe z(HE7vx%RK9s2v5hVn15J)kEvbLpI*9L8UbN?D$?{UR2&3SsFw!|HgM@Pl>NL@baDB zdfm0LA9da}^Ss;py*p3-5)gY9K?E;aokP)?_ZYu{ri$HCw|}aDE9XsUcO8wJ6)yQ zVP72!Ax6`rjRW`;Fk~dg`RdQn241LCr@X7iIoP(O|juR zsrp9uz;dz2WpZEa0xXJ0ONENf6_wr;v1Jzvq^x$~0eZB&n(gD*90otx>+IHK6}_gVow~X$c=9ce>`@c-dpQuq_L`{r(%@Q@b;KKGN5Z z0_@mZ&_C&KU@_f|5CunVwRBk2TePt1AYcKvwnN4x-Zo#u?q9*-rZb3%BRE?#fIX=& z9mA%;aB@Gjp`n1*pjt>nMMTA!F-cF)I*DQ#zJ(S$^_%NY6M{6yrkBTH{xL3E(-<7x z{o4w_G=Pr$n!HK;GjN8zlf+F0T9cObQj|R^c~%gt6o6gGGjOA;v#Wy?@E6C2norNc zl^b5RU6&4%&NMiKkWl>PqTs19vSQYO@7R_+&BqSWXZv}1Vz5^L%gN)OF1a|CaG@6% zm6N$9D4gb1<6O8`tY~La#~h>((+xx;4g&IS@S!5BKdm4Nm4$;2YsNm8 zNz-1`VZ(8m6@V!DM=+Isd2vqfb#wn@!qwPlPNup`?eB!Nz^TC>$+M#3gx--}<FoF|LZx4f19c|c{^Lf`Z^zb3G&c|%!=T^zR_%P}yR7l6?^1;P zGyibMkfQeK4-cmXBCwr8$UK-4I#gi}!O|Ae&<5VVZ9q;%uN=5zA|OLb5GdP%zA6<> zj>YaEDEC6LZMd9eBQjZ(I0-g?}$bR0*HGc znM@&rhBs%M!J>FVAvVZer6r?Nbho7oB4LUxDn5AG;rHupUg#ZEwOj>Pq z>QArFYknH!!Dvai@dY(yVjL7qMicFRy=HqT??T!?u`%Wn5x92A74faGSR@i^ByINi z^!EoR3|q^(3_PN2JBUIpCR>`D>1fvA#En5XjII$sC$gisJ=N{jg;xL_d$hyagL^k) zD8M9fGASe-7B5qX5}w09dB;oY*kHtIysx<)FGiT_GdpC^R&7u>sEB$EO4+ng*q+IIhStK*gUzoX!ZiDk-kT|c)sL4-h z1jx#7eD!;&zFf$!cK}({BUn(8JKWlW zO|T-O2`cg^bbWnW>SAZ7lO*c`=fC_4EMO|ht5Yd99O#)ty`~_q(VV(ah^$B|N#-~! z;W}>-l~;zMIU(0P%P{e6&X>XQny zipSuQFeHXdLAA2f02Mej`GJ~y0P=IA%1g3%6>ngE>7xBEG2L$!3zi1OqwcRe(vr|? z23?*vSQ-3lI}QdbAvHE7cnaq_Dnl0U&c0c+WT*c5FJnPY@C&7$(Y0D0KH0?ia~{JC zNr2L-vXo>ATA2?+bjwkrNKcPe9>XeWXXiSf7jU^l{=CsWKi;o^Ik zcubri5sC@xr!|Ibi$8UD@Jana`Fxl(by`AGCrDk2f|J6QQcV3@PY>So_A(8XdKgQ( z5y(u%Vc)Gm73z8gU#pZg9NswF_bStUsWM>Gg3ffcm*LIr2 zS}i6{4n#ON58YOM4}q_ZunyF-51tE|i6Ud>rP z9(Vt#?l+6mciz9;zkxD*G$hZpCTHn&%$}3j<<45}Q*_4un9mO`?%)5JdhEs8+fVA| z2LyRSy;nJ6qnall%BNb7+2#1JNk4MnX^u|_8*n~l)6(O}K-}6ja0*S|-g7kURWi!` zQ%`ac3%%JiI8?mPXZb=ZM~F*?5D-YY;349Gua*VP#WKw zX>_FJpo(>4mldx}>`DY#U{teA>XN0bCtH*6)XIP=C2#25IeFl0wWnRn{X~)^_DF&; zM`fDizt&@!UK^2(EXx?OPNeZK9`7;k2 z7A#Du!kh{hT+U&lk^*bfv;1Cr4J_xFL5zPj8ofo|`1b6q^%W!k;Ffgn=veiXcyIje z^7T0l-(MNH3XqTqCT!FatC1D$pJ%SrATTfog3?=jAj*+M_6~40oMK~&_t-F}!>?x} zaKvqshsu%;qXe%WKTS;#8!qtJ8uzhcp%ml%34DF=?_E=*jL|J^(WM)HOgxYjio5ko zN$+>p9fm?U_t2}}dThJ=h}?d!{Yu&y;;-p9pF9vfI9T;lY3s8!FCsdm;k9*UYD0kx zLbY+klz-q`UJeC>%Nb~Ym{~!oosCK%6K<~>GHyG7<^~F~F(|(-GF~}57QSoOq@LUK z)h7%S!0Hk0n}3dlQ3F+|VTznqCAfA~0$Uu>nM9V;(imtop=eQN%QzIZ7oCH%W)>FB z;!Gj4H(VY*{hu=sAX1qf=Y0{vW2<<79*FB2HMXjZ4Mrv&l?xNvs+smE%~5kr4J*>KKOeX-Xoqm;ytcVwota`~Bw`&GK!|GK*96vccic^^(Idp}`V=OM zESb~MZXdU}xDKkL?z=KPk{EG&cxAKk798at$eW;78-r4E!}T0IfF*hBLdgm15P+TG zsNe0k+gCr1{@Dv&q9-{F5^KuDOGEXL{Mt;JFJQr>}i#d+2pSFz-e0y1Q$4 z?Adg|>f8mlMx$uK(GN5YDwucb-h>CW!!ex?yiQf zS;QI(SVXUx?@PNmIDByJsB})cR)&AGeWw_-CNbs*m#Lh7O8htyljqAe#J^`}x&_1; zp;BHjxWz7bq;g=P{zC+%0wh<1*`7?C8o9+bY5IxTZx4Fcc(d+l@mI8YuZ0}Q9xw{1k8+|3d&b}?O!R$V@A#Y#`nvv`(O7*zjHt7dAZ3mlvnHH zkcNGT?IX~3(CN46`IvY=!vcoWNt4{CAx;c8l7&_bG8}&qyYAs;-2E?~|NaW}#IB=X zK7Hv~*4DVm>389ctuaZ{E0W$}e_I;%Ao0rPH@`nauX9=*uz&4}w=^KxZ{ua3xPbNT z+=P*(+_9kRPz5v)b|G5~zOdk=C!f0IY~vR<$SBMf#rGT=%tF-(myM!{J$7q&(}_|j zpyhLB-X|WndD~Ql7zYjnsRzmUWwAYGt4C8+>v!*k7WU-P&$;F#hn?P9R*RBibXb(U zRqyIF559Tme+;n+DM=tIbOKY6OOc|#)zdDoEOGjE=goe}dINk<3KU&Gq9jlZ=z11e zug6GVcOCHWheP!!WXWKe)VQaR00?5>~ebM{V+t z-(!G8Vz z+ne(Un#j}I`?`OwopwF30b6n79cGgE;IEj#J5b)B#vP5Yuy#u9!t?9~Hp%|w* z+)r3(N7MH*|H?VR&3g184z;DEsyilH?z?f!?v|#Ro*##w7^^okUv$k?1V^jiVsxIE zUKJGH?pzFWK^9wOb4=X0D6CZKo;a79X{U#p4a#{9VR2Eyy;ZNa`Dk8;IIs&6NRE0c z-0q~Sy?!e-?MY`$!C77?F;28ziYEXD#-$3x~{lgLD8c9Yx7R#6Oxm z!-iGs@>BxNv|n(z8%Co5u5f0bKet%{NjXWiuk-6b!w=Rsh_-i&L<{hZUfkr-`bBi zbIlSDBHUyGQlsAfqQL_zL-2BE()Dz!6*Y9Vr<2+f*Trbj_R@Mqfp+E*yALaXcnGk| z23`L2d3Au#`77PdHv47U?-aK@8{xN%@7^@O@YV_QL%L9NSdi%gWe;(_JQ>qHe{Scz zokug3?@zBtTjG7K)-yey^k`Gi-NE!X(<|GTybNSRy-?en&Ob_f_jBU5*nk$kiT274 zp;CNEE9y4YB+mq>r6`)c2SLGu4md*wc@rqCl;%sdS7oK6VDUiDxS84t1%+X{*(juJ zE*8l~YQxR>;{%2Yh(Ko)Q{_Z@Z zFO8IJxRx;;l5!&E@Z(Ff5vqxIC%0400%pP+m-b-}o}n!LduQm6Q^u`soFvlI;RgLb zK{wBg+;v-BzQq0Urs0WQXFgrf?OckxQyRZ1=%nD@zR>&oAC%oJX^+euba`~`?=HB% zL40-HamMNG4@-S;xsM{+c1~T0)$as;I@(Bj@!jelK2YLZtU9Z1wVh~=d}^+ZO}d%x zv+`6-E&pe)CM(+J!Zv#O78a4)P5M&p7_HP8l87RGgE+2KriIlDsQTDoS)n4EkE9c! z0#j92HUOuGGS11n9t~pASjC8w87CYTXCa}wb5o4lnAoV%(%tBUCjdH3c%A!#OT_yql~nwwW;_smxlEi z?gJc*;xsgoe2fccjn(;?=uT3CZxoxHeTX5m2C_Xc!LHAaf)B-~<`u#>w~Tlf@Qy#xuWkDkn|ohJ%$EmK%ur+xl(VB(IKu2j zNn>1#%@bdYuBbAK&k9!6?~x-$Jic~9*)<(5uA4e%gBT8du^J>}EU^a} zW*uZYXlsOa_fsBAJlH`uyi5SDjBHh#rhXGJY;q>=ng z2Ns@3Kq3*M=U#8#-b6Sb(B{uuL7t~gN)OWJPCZBmzV2UQIAm1c*Xl*3H%$th+cxsG z<_dN#8P*(%2Mn47L2Fkgsk_j>1c3qYJ`ulgLadivyj=9yTMFED&HE+gB`>ImuOZHe zp}?@86NEj@H(aBC96h)_sy_UYM z5!wg&@vgJV#J(8)&xXe*(n%_Wx)TB%#BiWKVlhWKiR$GNcH*~OVv6eBg!V4CkfEhj zE>A+*t;2EUr_fSa%{s$2J;Qc_s;KWSL)?wbsUzdI_)J(ipTQty-}t`$THn6|sT=?* zYP9#Gfe>LbTwVW(&s6ZJfMaNzzN{OLy*!e7AYH7G4;meQvaPj%Mj2zU7}E+63ZjCVAd+K#b>yV+9mz{7t2a5iiG z#)+tQbTQW})|O_+*Al>bcMmUHKfQ+LzP)k|Z!_=OYCM)!27>_!1kJ_KL|_ZRsG9R@ ztkaWv4}n(&6H*yppPP0N>F(-6%)JJ{)YSZV7%SzJ@)3KCF^BR?#~@eB3a8SG8g_Dx zNsS&IkD0FZ(*wyH>XZ;H(Sbei$(tA-KXwFacKWwGvypBe&)svwjXmb1{7WkEqqv9W ziMJT}nuF<(dV_Ejn^wOmk=Gwry&Y1|@uqutw!U4+@l+ZoyYPg<7VL4uWJ+nheJVr& zgYZX4;(pcqVrLgl>#MGGD}Q!hJj0g4LMm$S-DkM^X$#8;)>?>aMW#I$J%+iZoE^E>Ldx1lVk7##=2TzOM)7l1Wn~aecx}&cp^*EUb%XQNREXHJ zf-g-6Y*18C0)qDU-H{>^+J$4)^F0!z-K@Q&Q+s#!yMN>Rd~c_(cUR>dK(KRM*0JLm z9B7Z+qSk|-#;cyznoNSP(EIWR9 z9w)n3@)*AKn$G7(s~Z;q4V;Rh{&epjj!X50Rp5ji&v*y&904fK;h@%p&5)QZu78w} z;ZzbL0uDU$?oo~K4*Mxh)ISYd7xfI=MY`2BmHz0_ol)hlh64g#2Sua$`K==bd=<&W z)97X}i##~g-(NKzdcW`Q=Tc!vYFeFVthi8oej-KkDxNn9mT0TxW+&y^sx0yVTo z@HJm4rDYARI@V7qIZoa2K=%9u*Vb#<;y&57`+uKZPB?VvBKw@X9;ZK-CCiZ(>ccQ-`ILfUu8&p%EV83IG)wDC&ZQ_6 zcn7^l79@)WCx_eeK&@f+V?5-NScx`z{h)2FcgA2`_$tR_A}WWQD)_^$lOU%R)G zp0)1ybF(Gm?`oM?Z-+?1p2+HO`r4Cy`_Y@035N$mhYsg?kF@(FdThCnosu+{=o3%R z*}voW$uCF#y!6wT&lk$thCM!i`EqeKeDN{ot$hzxRgG5cU*Nb9jKZ2|VE{iRf(jxp zJ~fqmXkLz6Tkzt$ZSm28=lrYB+^pMP3@Ox;5CJ5l?WbN^`Q+j6p1YO~>@0Cj+F-${ zzYu;}pI&r*UUI4Lrvt*6UNy-K`8Cv8bY(gBAefLY*A!T%HCPL`g1k~9vwgm93eEfQz3OK-z)$Rga2gR~wsh_&SldUXyXFV?!#!bYs+ z&8A+8DD2}EsuW8E=4i<}OIXR}nQHNJ$h)4IXskm|vMK6@~`;2^S&<)_+y9DmE~WuXTyW1eN%+kkCkUW zCF3JmYRE|}!`E8yL$0lpG35X=8~RrkZ{05$65k((UY?4i{5Ue zrkVYGSd@0~XgKbtx1BG)|MJV{{innFbnEMR793eZYcADVr;O6@w#OiCKd!v>b^>jo$B0_Z}`BT#iLz zv5Vo@Gt!Y!;z%4;w*`$gr{dT}y5F6E8iS_mwKhAYK7pQ7)1~QYEmsv9nkFy$caZ(T zSUd33WzS`apmC2aO)xSP#r5Y)#x<*|XMK-hql>0pe(VO_#-Kn+*hystl5! zpK`7ZY%{C@L> zEM&_+OC#=n4!i$LY4ew7e}4Iz$oew-(~%`lig!~)eEoEND2%fb=$eX@!viX_LB7ku zWe}2hSwU1pwF2Sa;shrHx#)njO%8P>fh8YVt{cL;nti_P*sxJ6@R};Qd{mz|ynX2y zGjPLq_lBpRWx!)}_voo{$KOAmT6Af!C zj{qUvaw-xQA2ulfYs})!6$B&LjbaUE40Mfl8sk|uE4DiKH!A7RscvMZy}fskJePP? z&n4)O%ut_(kUuz*u^93ZisAYL!!|qpVmwUIPExAKqA_(S5UsV>AFZmgBzNs5pz~-4 z?$q*9qm_9TiLDQFD>he^n5lSIYewwWgF-2^AF}#~fqMP6B=fkn$HLz17WRX9ni+7| z;0bB6rIme|YE1ga5J9H&Uzp;a6r_g2mS^@W`zkUfR@k8X0ty zB=d8>mes`0yxzc(3~>+CyrZI?8jBRGSv0kE2vV2$+F4Ya06zcz`+%gn<$uL4?Fcxr z{h!ZIm(x!CWd75aKfio=`xWz-FE4LpA91d~uA|XECW^T|y`>5)3^4T@rFEm@1zs~? zs#o&LW8?;0*9`mYKb2r#j{8rJR>u$jweoB?q4=eK{Fkzb$`iX{FGnW-k%VI6*kEtk zk2icU3T!|M!U|Fq#|=#ESt#uQ z+cBZ0go3Z-(U`gClft45prIA>%%Utxoot40q^Q{>5*y zz{cQ8$Gp-T4lN<47M79>g{)#sAGB2e>d7Il9yI#!`cWgZ$uS?V77DNlV1F8Jv!Fgz z4c!1vLl*G(Y(fYENjSli+7aBOfLwpzdXySgD>CsRWkqdQleGI0WE?Xl7NRsQk9|EVcG$dAn zx}J$lh6z`aP&795tvL!)N>;E?saFxe2xglA?yiaBn+iQ`32Ser|Mv1F^&llekB@%EyF06M$>-|aE$TMo(t`kp`!fy^39uYLIAT&7X04Gy=@7`lgVq`w z4+O`|#{1>S8NZ>PobpW(-_xX1AB`pOh#W`$@J@#x19FnqEjj8&ev8|7>BMwWZ4*q^ zWBfDo;yC>}| z_!{8}HAaEfD69FLPWF9-jOPzRU|%QhX7_T7^}5koECQ+-IrW5%TISBe0s7?qJ0}bC z6{gWA+We$_TMvgy8Mb6~C)Cw2_cSFvB^S~^52wfGCiQnv_rn(^6j%Hhc+985ZuL9k zV0&+7Drp~u3M|%Wc-|;y&PNCb;mBvi*xV5Tk%|jg+ z-s!1WQ3Kh;Tn+FF%d`GLA`#9uB^y^5ZYRHsSXSB{J`(g6Pn7mk|(GdFF}w7U>LPcRND%2 zI#puPz`BFa%wZQ>rFL65!0C1gCRLc80Etg>uSz$MdMH}@D`ndy(p%e>Td#lL)%nwg zE043MOD@HIsG|-)jr}r#g%OOfis!=#iBouwt@xV(zXg7* z*xa%h+EmpAHQD@y)u}ZV!!fI8FXuaTzh51G#QUv&F#d-fn+wg}@Nt}MC2Q!GFGtVr z2Ndd5hONT}1vYS`gki`D*5tN14VjecNBR?ED5`)BA!ps*+4ctUMZ8grOrAuNOE@ch# z#9b|FR|_?sDXvQLRZaEdNnB9QHW?gbtewQjBN47yhxXx{owt#H_K>4jFZ<4kb-ad+q@DwLBHQ-^_hn*rBV9a zi!;J*Tg3gsUa)Y}KU#nUwff?g_uJzgo=)2@Sg(1?`O71DO5i+!ch%EbY>0xDXd-4y z*ve{9G{SWW2+TPE6U|wZ{=azc^xVptO0vGWrO$n?K|K++tu`d?nr*>giKzefH zSmn$iXo7SIsDijl$!7J;XWG547%A9K2KB2g^$}l}!?}741TPmtvy-Tr?DzD3QeH3L zMdM#sL%4fD*rlB;R482xc)P*S4@{K5E}_HQsV|_nDeQTs^&!2Ve!_e7U1>8Gr+?Ls zPyH2$N-%iM?2c7yn9LvMtn@^n{Q8r1qS*QSH@L5U9luz7dlVTrB=rc$LZB!vK?h0y z4`=Tk)a2T>{obi0Az%n0+`)iJXaNBeiUOi5fe)&~C8c-~ zJuU-crFiS3fXw+fhlAZx)r3n7yo2@Xkwi3EouLdrUrzPtlrgV83D0|(G|GE4*&1^ zZD?Q#T*N5|F~v!V*C;{5g>>af(M`}P+d!=R2*$v7N`oAZuGQ6Mv(N`nARR4tA)y`; zP&|LOCc|J5qs`DJ8xKJ=6WZMn?@X4aoi=FHQ9dRj^9%vFtC=smT{CDT50)l&&`~@z z@RHSKFPpB7o3w}bh&pm=?_hc1_Z0aj=TE%X2^)JB!L`^{IPXJphDHB1p{s{TL6`;eXIPD#1r+dEWQrkIcLL2)ljMmndQm(L`0OALhr+ZY4=^7ZD#sUd`GWxa zNz2!_jbMFhA+*p^jOjjd4aQf}=7C{?8{s?KK?uwB_ACFJ)>o$py#;1*YS4$y|g`R=OKExkJr|AAAj z+3}n2{bW0LJL1nb6Z;?8?xB{xd=!<{&X6}C;b8U-vY)4&4&C)+ox#+0>#>zzNvxFe z{TU14SL>hk5w0VzUk_{>-$zb78_^@MA$t$W>ZcFfy}hd1ItSi!dl@em`udLIcK#{L z2eeaPwVx+*ewn?va*+7s)n7rvU4tH#FE`D{d-^)vdo)jXZQi?3oa|z}L&McNG>_zg zWVu|JvPnDAWF#zc44RQz08YbE-i}!T4SCfRSW8!YrZc;Z;n19LupuT{8}@OhqxAHy zft13&rQrE6=g&^u3vDrHAI=;3kNW+s67lN{;SAU`Y9Yn&}*BB|`9fD36at!Q%GDP((-iUvz9l)*#hQkH{XXjXeN)$OK=)&O$ z$FG5mnL0C_EY`eeS7oiGzH$AbGvhR zFivC}6V$S*^&mk^IpQ2zvHyX8h&Bd0OEujVb`ST<4g2VytTt&V!np{N0SEur{`yb;f_wA;uZ)}=Q zGgB3K@eSdjFCY_J~zXiB9MSL%uY1)d|7LL`}@UG!IL*?-*>dx zxcbAM384rBYd5LSMqT^qOOvS2fRVgM>gX$nba(^axDxGRY6?v#w zVCkd!E$hxS*%=sWG&g=Is_YI;2{**i^e%>bUp#l2{gKr`>ft2->75)eATX*PWx48!w$ z_nYsJ3}<%=^p|rYWu=VaqjvxY!OnBO09L9lz3tidS6Gz$BDiZ48Wc!~%|Y(!Vaw-5 z%b)m%E#^L8^MV4;O!iY{%|_X_+hCD}{`&I|ER@~RK% zWQbX!PyRxEpRQ1MqY?V~ox11s&cg86j(nni?wb!F3LBSe;#8Y_en0f$jb$Tk17m`# zBDPE*vC0%*89<#fnBuXG^Tj94zyJjvx3o&xIk^3Rq1!wn6$^7&>P94#O!n zU7KgGF8G3v7p(f6gbm||?Gckjk?JX=4f_mq zPbp9ZDNx>=8NDtF#E%mA;b4Rt=Yy1pI{EDK#fw}PCzPTU?e}?B{Z)iIZ{eV8zNIYE z%n1<^i^A>}WWCWst9UsIMmJngA?Wd;CK*6VE+G#4)Y1hkNmW)R3O5hL9C4Y`$;u3C z?NnR;7hjG$i!keaNq59f49m?t8y0!%b{a}YUyto%#fxxq4)m#m1L2e(5HBy092<6UjR1Za&l3vj7+N@(p7FjLbS~xeJosX zEeCo4Qx#B!f=sh4Xn@pUVYdWZ^W{-5FU7(qu)^9#Bm!|HlutNtSuHf8$6eQ@hqKi# zN``*anU+gslBl!$cR|*k(6deS`E0T(=!u$%5>fJL=F6;#Ldfl)^WQ0KEm7HK#iX*v zYtpm%Pl_Z0eb&O3vny&)yr!V;B%ZGeV4hiN_C1x0Ap)K;aU%4VAzaZ13`7YjNR&zZ zhd2SkAm5VtR69C6C8J6MZFZpkiEEsV1`G3&F8MS;Gcnhx(%BpqI0E<0%X5&}!YTJw zct>&rW6}%24b|JmPD#!xItj*XIf%tFi{6`% z?b&(rvPsY*Y&As-<)-WGV8l4ttb|~dioVQ}%rfi?b$;*!XOb`aPBET-3@ZI6#it zlr{B|%6ks1M3cLEk!y(9m}8SDS(@j%_D(zDpu7o?5k1TwC%TW8JY z4lJ}Rao+QuhYzwzL9YViQ)3uwjjU7(KmR*8t*&iuOc1_5iIgwf8pfRixsD+MzNgjRRomeg|2WramL*_?rCp; zG>H*4*@UpFeWHl>quOM;`Udsf3)ib~oXzM^>Ml(QZDZJ^-n|(`3jNKudd&y-0Ls9J zdoORZqslk6pR(F}b=~KRHGa3F4jE^Jvh_xdwq3zo;46kKFS_r_y84>8cQx(MrG?Cr ztNfT&Ex!9b{gB52XV+inw>#!rrtkUFbY%Mbt>^Quy}hQo>~AS`%&<76MO$9K44n)P zWDmPQ>bV5ynwft*i~6m439+9#CC70}9Dy7^>_dYhA+{bOU6uK)0DZzCcAimaj;;wX zSvl>n(jwN=gRE{rhNK=2(un<8$FZ)#l*AF0;|XlcnRyJU*y|>OCa;4rP*Brda9#RB zD460O|3A_6*^ZxA$)9n(Wn}oShIe^RFiIHsaKW(RX1rhBIif9|KwS~;zRaaIq-*uA zOcwUcwZ;dQ8|pjvhqhmMS>?4p;NfHH2Frd)l>vd-Ewb6=a*pcS1(!9Vc4Oj=fr z4EPY4o%l&Y_(t5BDisLr0W+9^_o=RlWIigLwcjool~Z|_X6FGgl`jaxM#+n`TCYak zd&9$e0YcvoyF7gm2_-q)sE21|r0s-cmQi@t0T=ILe#XVBt`5=B41cd2H}OU(d(Z|S zFY}nH;^{iC=M)?37{Co_ar$Pq6eW=Ic+EUq-EY2+H+Q)+Sv}`opb#sxY}0XlJTWWQ zTm_5q=9un*137Y$Z|6XuXY}~J>z%rG7+V*edL9W=ceb^5F*m0~vE+cKis0<3%h$ zA^zA%;ls!N4pes%jUAbD*~sfB8H^l4B6hiGG{D*dVX15C>e$k{4Cf5N@pB~xZ9YY~Q zfI+k!uQ@6}ujo2N_}AzkPP<-wzZpj3>ZSf$ZhdG*K1}O=A`;^|xjstCidrvqt^Z~@vyhLkx9nazU8v82S-sMMJ= z8S_Ie8v9%l;v3K0F5%iO{5a2JIAl#)_v+$;rMAg7(hnDORTWyhw}%5pqnrQS>1P*J zj|~zQ1auSrke*s}1h!Qqkd+&u7gjRdPb@m)M+L}u7D%(2IB2`Z%_yJo^wCS>ID1Zp zw^3wMaM8WFfc8s|1ZI4puH4RWn+}0!F{qbrO*-%TrU!3T5!Yb)jZ}F=ksb8IYjW6- z$CwKcd#0K8_xykI6*$hh_+?PFzCeMrGVBdn-`*}usuBnz^<_E&Ld@c*1PdV`tTpT* zsBtmK6;@OY4nW4D1B;nluUMq1a_-+(3zVshjm4hrEmCeg6Kgg0^L9zp{AaFB7J=M?=)dytbswUmt)jorz4`e zE$eMwMsEDjZ2j%rzNhQ%j-S}Fv+L-JBP+wp#1`gFjhmx|PxsOcb$gDg>aW^wo}WQv zfQrdm&(f;hGtWL(J+8=Iy7`|{>!>`lS^w2`clUlf<2(CI*#y^HMTi??J~C=9IUl)Z z33ebkWaIT0XIF;`B8p0`zJ6r6apdOAxuZz%ym=sk5$f7`zz`{giI8=sY z(S#$}wTHBM0whcZLG?&E*Nlp})~E!ioLjjRd31jclF?S`iw5Fw2Z4*s6QZFRH~FSUJPFo1olv`+G>pSu+^d z5mIX+LEMDaETIL9gTSDQjyoR1IqdaoyI+{&Da1o3leJkK=w~XEf-D5ze&RbOXL83<-(!Di2xA4<`*R8HsKkY;iUbZqD0iTZPIcrmcDlv=Fk9;ur_ z0R!fhV=`O|2I34fa37<8^*4K)iv4{*nc=a@R?oz4?Ra+0rdxkH#L*&S_rtR|Jqr5S z+>|2JQ?d>^l!D^eF4-uJk4~#Ad|}n(E5Vyv10;i5yCmG_A1BUqSul~}rFz%wqfV!P zZcKZ%bN$`o5!;1LL%P1)tpU}Ampb#15Qi7Y%S9h^RL#PeC(VZ06QuMW&wkwPj>zQ8 zEDqE#OI>M)%FO{1Bov>pf>h_WEp?AhKX18TThkm7{l}60ZlkYCEf-hK?)-b!<*Mz* zf9|PHueTSDLAGCF7hQ!eI^)1t&OHLx~jexx9!F&5}x8G~eVlNenkNKn=pmifCJW*I4u zZUqzakNojNcO`P^F}2}jZ@NVnpjKKtCN6TBS%zOUI3F{(d;vslBblV;q-;Ry;1LTQ znyblEwJL}pFLm{92c84kFPhTSqK1lt;p5qPCjcB3mvv7Bwb)plqY~KmnhYk4m5Z$s z>Z@^F!hqJTfi;>bskwc~1L2C{+wyQGw&w0fEJU3sWE7{95dGJJ#SQFXsO8}rBz=#9 z?qALEhW)PJ*C}o8^f`UsgG9~rJZ|0)=IvtAffTo&?(eD`&z}#c@x$B1ouaI92vftf zI0uMVW!W>$SL9S{1Ws^PUlAO$xTiSP45Wx+KV3W(6!7ZGo}U|kUHGx_*wHs%?nWcg zfN0Z{&)SK&EmIj|dT^_iKs0lc`Uza*+CuTEtl!$(W!%eJ4Zp+)>M#OoI^Ly@eL+2g zV;O1hosHgsR;%x|X6oMgr*6Y|Vc-&@6A3RqkC?CQMN(In4VaCUgugQRYhA(RPsa~u zS1D#z4q!w+ZzyThhC3fR7mc}sw13y{e|nr5u7(V<4%6A=4|a@u$QCF>P?7R_GNZG? z!ZnvslkPgWgK-JUV9jF?I~l#rj1-C$5-Ur^&Rq?WccDp!769M};dSzY%Ap==X zWBI@cp*tMI3ttFcru2uf3Xj#vo$fiUiqRc9KXeQ2y=X)mHk^QDKrFZnq_kOBEBa8;Z z=ZCu~V-|u)l&UEntX%;04A$*0H#n^>UrpaY` z2VAZ_qy{{=d+5VO}D0XmJ82u88S-GVCdQ4I&t(CfUUCySzG9UD&1tu)nb@k88=-E9F_8s{) zJhsfAxx_!L^hnvZ1kdS=t_2a^I>?-F;K!IS}|L|8;Of+uh+BMo3dG% zwHEABGx8HUH31at*Vi}m1kd-4^!Kt=o))+CSWpQy4%slg^yyV@@~(0hWJV#>sXVTy zmihN!kpWJ~f(>LL=z-N;~b1s#P;4Bh+n(W4WQQBk5~7cTOXzkB%IFRcF6wS0^P!1SimRhEsJA|>OrN+E_q91Lq2f@ZL5^chCIY1= z1|>b^8QC%Ibm29*f%C8ET}~%u%gSm&$O_%+^$M_AWU{Lc9HI7uv$YK!z#$+fUhcB9 zWy;3-(DO4UL;}jKm{gr4<9l%+b)CCU-e%`7V^*e>K8mWjNgLC*_`)~JF@`Y27LYt! zs4z!5(~{6glAMTn8u_C6%dX#idlGiLdxXulgx@-B9s8=+*2pQHK=O5eaX(%p)>ycAZ~n(*5<@oI89bC}RG; z$Iq|4{j=Ej$@rt~V)d2O?VFDDw$}g3zk7~kXs~;J!Y;t&lK4TypK?tu+ZR%(D^YFn zTX?*(hzPjCRbGt0VD<1kv}K7TU_JJ{vHUIdGlq4($mZq_k=FC2m(zpo*mL>MBE0pM z`Ep`WUC?g)k9{>ziMbRTuICJ`RK`gQ>WHSn$5<*+0rKB`|8xKS-+I=hSR%tk>`5P7 zuNvk+<#8I^jwFsetRf#2cL_7ETeHK|=es5>GOvi|Q#oRos;9cIs41q`5;?p>Nw5o4 z4Dj&S7VK6%g*fw6_rkSP?3F_SokI@S9}i`*cqe+&=>akKA@Z*SxN;Dms6aPu6bfA)$Ad{u;mN!U3{fh7D&dFq|{HPvOVu1heE(#$vrPuNO^?^`E^0 zaYgwht+dw+;$i5e&Chn7_QL3;{pNc)j_gidJDU>iXlF{KVbRTxEo!S4!r->nLpA(H zTZy(Hno!Z3yw*bIT6n;6LpJetQHyo_JGR=#cz)H=g(*|*_eN?%<6ikcURl5Ft*rg# zanQ~wE%H3k5fVy6bAc$@yCy3)$!b&qNBSwTU}?6FX7z@ZtGcb9FY{bMjCozXugWQ8 zI1*uAlxO`W8UsBtbi~cb@G~*iv*YNZhJ?!_e+E5w987w4^UHHYi7@u^O(RR9P5hF> z*Cb2yhObTcL>a>`I%p=jm6F7}-UVc$Qm>uys1V~Z^ID}Zjw2NW2M65>+oEYv{)Ma2fJ8ElJ0V@QAu@^$C-dIE|8 zfKog|P?_^aKhzq1b35{Y4516dPw*I-`QS9oIT&ShPXUs2jPkBF zOi@t*l=4MP{vfMYy{f&n@=&7r-q-+lqdHQ;DDfzl>Lyqt-R0Tx))=aw5UvO|7LzTG ztiW+@at<&NUS+fb>3b+IE{6|VzBQ>WvZZ5+L*S;)iB2QT>#JDe%mdK!D0K>FfbxZr za-w^j&pFLR;1=7dT&CGoPh1QCl|TA2*I)FW?_B;!q6;BcYbeIOWb@r?-ritYi4BoZ zOlWi|{Nke@j)p5Ntg3z2v#Il}Mv#bAC3bh66GGJ8b<9GBZczMPuM1!OK8BfjI5<^* z3p_{Atb_ttrTCS5UEaMorNH3=6+t-m49oT9!*ozRk04aWNXznGD6Bjo2pzBtJ>Dxv zT5)LRrJHmH0wfuyy*c)48*9!H<&<>z+)ea)+;9$O(u^rINIA>G8xqdI@=*#QLWzM! z9&~kr6eTRUStc9GRp>>p>pF%fv85%RZvZO;yP9VJbWVzZNjr6U0#8rxgb|<7hn8)) z?4#yyOM6Df7cWjMjuxA_o-wh|5BuxU$o^Us zqGthu!va{#bMr)Z7)_l}_<1f0FVjbGG~NNztJbNwY>I0W@^uyqH>u}@1z^{p@6qgO zNKZPZ(+~qQ*OA=27`63t*x`Ze*9ImeIR5e&pCDs6mIBF?=p-i2Ijy(vRt_S3r~B_515jXLrlo!%qObk_%DpGEfpBVsXmkcgMt@f>YE!yG_&!E{pa}Zm&Corc`Tz=1h$xT?5BqAfk7VuEx&2SE}0;R)sa6 z=DdyeBDx6!Yf8fo8tY*3Sgbz86Vgg#!Eo1t5kMwH0z-HPBRHdAPpc^_crG)lFN$f7 z7su4ng{zsV#fYZUk(8#OYVGLo&^4G#Lg&i_+Gu%o0gKi;g_pk>8)m?}kvsySyCZKNLi^&XnIKDWOwX?OfzTHHVZ3h~ZdNQj1FiRtksvFIn=uy*JK zW!aPQM_*Asz24=UN2FXFc|#en=^4u?To#AH?JkaY2GH0>;b0QGQm8qp-u0sLa2 zsJ7>s^G%CL&ZSK^B-XLN`9AuS@EP=HlqP^!e8xh1Nd zId3OKK*vg6@iP7HS4st0+Enw-kb4JF@>5%9aa0r!#hNB@|<)3vKYvr(lBdp zv7ksUO~Dc%XV-mbOWM{&`DF_Lb-$i&v3lnu_K zD{+9-xl=QsJkkSDBsq&5q?_5sIj7dYI`ksS=0c(U2m5hF&+(S^%#ZuY+lU8af}Xs9 zV7bqh->@C|{H!SCpONe5ZF=JTW|5QqGm#1PL1lRuk#c%bcvv`+FHk#9*1ZD6QpC$- zUclvJ{B1Y;-+LC{4cOK`P*_c{4wKBriec6I6P>TL>t_0p6u6UZN^z+BGE|j*VS4L& zYN?e1*D4T1Oou^zmhNziMjhg6*;Xd*2qRmOvhQu)eh_5AMLt*)lrXl(F)_o1+n-@e z6m(jFYh!5Kkl%a<_Wbt8_H5V8Y*@6yuNScls!P&sE+eYczU^Kmy#AA`QhDQR^AX|K zxI7KKuYQQ49P%3EMR!~uQL=1f*@=^ntyyhz!Yo&EK(1i)8|tx9eYx1d-KE7=-REcvB*C<7O$6@5%U(d9IH|DckdMMR%182zn&DYJy z-vU>h{|FwA)2b&_8E+MUQu(!Pk&5Zoh8J^(wx%4t=dcZ*0k?Nfz)8{zRxCN#3M_K6 zF}L62a@a|d=b}2e31{MIwbc>PghTJI)bToOn0s;T$h!*JsY45~JKIL{<&4R{Z3>i> z9#kkK%f)TE!?~;yp0>_D@HU`KyK%$9&1OF9?c~EKPSdV2S^t4~0jt~c{U#d~Y3+5Z z81O9dl--^vZBWbl3PyFo!#BY@hzppIRJB%pFLIq|%#gC!@;4S>-cB*_Z67e!kJ z2J7(Ogfshj8##qL7yss4yXBHQo0V{`$zM+Qq*1D7J$(RsA z>fxRWOmY|6g(3%yI1F5;G@keJ=FGkPr?qPJsy9x79fs17eOoew-sJU7hf z?V3Ur4Dr7!yB{(cna);=U0LZ&nM9>dVg*5hsPm zb+$p&i$Ub_?0qr@@2nf-M8ogJF|C({Crn+-sb+nOJC0QRZLy2TK1qcc^84lsv}hhs z;}c=Y?QeQ#H*#P%M!TcCW^&xN+>qT?<&ZwYO>TLFN*zJIN z(M3WC2}WU&Sp-N}5d!K?Y*JELu$yAdO4}B0=c_f%X+3ne6JY$cGtHPALA8j~{jLeY z2Am~uSuqJA*7S-bh~IqMV+P#=1pzMAO087|ELq^GWOvm1A+zatWGN6c-7zSZ{IiHa zGYh9WfZ=fsXuYKL5Lb;;Ff}2*a`ot!M}9NcZ;Y%68Rz4(d@{`_nt3a#aO8`ze{uJp zvvBm9hGw9``{zS^R|=$+09E!ERdF8InkgiVTO$pb-a`;W%Mb^$tFIUt0V%1C6CZ)c z{srP^Kcdwjl4ytAbKdQw>6+8d{dwlSA&H)931zm&hcEZtc|9c9#ICmzUcl7IQ7qK^ z2UwT%RQ5&Dpu=>pGM!Cu6 zJju@0>NxkK-h%<9%V9G)SG5|g$p1UeQBi9VhP2?DTl~Rv78^WqjVCEtt5}v62}BLzym_HN-|ZZJ%DqGnk)a zphvLl8Uk!OLFj|NtVr11-OINY@csbbX6N(%C&G+e+ zIsXQb@KM<%mjVwWvB`mG4J#d`=UKX^MoCF35`$B2GCgHVsxq6z|C*UkLZxWjw~t=Q zR|Uiur=L5uym6M$eTai{00_*Hj;3mzQd}G^#gufji7&8Q90Q%|km+MADrZqh4K(LN zW~Bkl2#K^UOE%CNKudEmgQB+Kg2CNB86ka8wvGVNxyTlRIebZM)00QK(GblbOzJ>r zjJUahXN0qq8xkM^d*h@h<_&T4CZm$Lp<0MBmX<&Ce&l4WPqMEZ(RbL&3X$TtQx+cm z4i>-uzj13IZ1yh!HD1+V4ggbSM)T%ttZh*rOFmWwSD}hiYFDn zT0`uND&-tsP$yq%qS_bd6fm}ImjqhqEeL}@=X(tg?)ecO?q59}^P=8AcC};By|PNN zSc2q?1uukJ(gVUOCGie81QQzcy<3mby%INS(xgZCK^Kza7RVN}l6D+vc~vs3Rk_X6 zqVn)yG4dz%99D=`RST+-@=3T`g&?jIWFE)&jciK<$ZkD*-0qZ_`)xuh)46uO9eD!$ z{EspoLmpVYmy|Ksv^Xgr1I_Ab06)R9chKmr!6T9X(idK)aNMy#&$+?n3XBKUM;n|N zJ-&V^f~^**#oA($F^^yL<+L#{hmURx3|2cmJc$0Os~t88fB=jM<8JGl!B*hdMYm5`Qnr&iAAq|^EMAd zMD+zGDP*FW|$C7&R#>4{4FGC3FM!q6-inKUAT3 zi}yavlp+-zCEw>xacS|^&^$Z7OGScW**?`HyGU`nRy`im#M$z zgmJI!+a;T*aGxh}W9uC@(4oKQI|pW>MmEBYB?9DUk)lz*{o(tlz-Z)5OCR zQhB4TFH|xqS5re;k*=QjaexrAz*jg8VVeG{Kf|wW-`#p`=K8lIe06N&FOdhl?95{I zlbeJhmiQDiuH#7yE2J*c+{e~uJL2O4cyF`?VM>R1GN<=AG*h1u4?Y-YCmrS8HC&dYryTPPHh6Es=o!JNWDJ3 z1D!A62I6egSF4#WbY5~v$qvrTRfhiUaE|_%M4ngZ}hT}s?6K|kZSi_a4j7;=N z3>3}o$Bw~MKQ5rx|24dOK7H>r{#xJM0iwMB_xk)PgX2!{Et>?Mi?N%%nC&4qp1k*0 z^J36WeJ|}ZkqN;#kkXKFBcg!2Gic@GEhcMjorzue9!wQ2gzmY1E5(S`1fmFYyFnmB zP1;?05=B(8d3Xd<%ev;NF<^8-&GC9zJ9CD*$<|WG#J?#4(9T(lHo6@|KMxMRbAF3$ z@yFTquOAQXvA*+HvgyUz`8qLb99?18p>EqKHp)4#A4C7c{^GptPu6~h%)p@{njyqM zUTxthlJ``#LJQ@Me1^978eont0ldZPzWi?WaIQ2{mPAuV>*I7#mdZH1qprq)xtQS? zY8ynEzD9bn_(3b`WP2KO?LV)+`>Ne89!&qU;E#7JMw|?fv~C{q`~I^0$`YUHp$o6t zT+4KsM=ta$F6Df=Y}3sen0-yLC-M7~b;8{c7*NS+#EB)ogLC)j02RIFlu!x>U8x5Q zz!PXHr9QX3z5Hp!=>^~}gL7+&#Jr!qW8E5))+ojP;M+iBLWaY%j+< z`zC-Bkd?7;1nVFKFvBt43{Q$s>@9)HL=T^pwz%lIuI3sDq6Z+_am{s^W5GsHIOLE* zDZ<0CK;l3?)?zMbQ%IGl8{g~Tm{dcrP?z-L-+aH!`6ok#6BEF8ge3{0z=6_xecdb# z3sPwL9%czKFZJkTh1BcfpYA>#|0zcJbT^Yz@9+>-jw+@& zSuQ}WbxJoD5aOIA?qIx?P1l-feZ8%NiE~Y|yICEQsUU!gtIJU&f|QPYF$buHk*t}8 z=BRmQe)F3#AHA(u0 zOpdu&H|}Q`1RYdJtqAPy04idRJ3hgpvs^LTVvZbkk*%R?y*8(44ms!h(el@BjwBhh zmHCrj={Y32>swrHbnD;f&-_`O1tU_X+kf+&%_w(|q`Ydmcgx-jU4+yZB01-GX{!1& zvxF$ejUu7aLV+_(P2cg3N*MQwL~k7G2*h>LO>w{*9M9KTtn~?bMDP{)Sm7gs-c-{0 ziq1_dLIcNfiX?T>2I#Oq4+82fF5CEbbL^ZXX5m0BfIDTCC#8lP0(^1IYk>tTwddUW z3}MkZCxe5|HPsk$OQxdGR%24u9dyV(n_ni|uWZk1b*^&0iOJ(=j|M$+;>?YAd~(pum-3VDbfEpp5-GBWK0eZ_^Tmtl1|`k3 z=LmSEHe-DlLmhJ?hi!mFAXf=R&ubQ}?LI~3U#Mj`uD70Kfb@plC>F`f3Lb;HCSGEQ z1Mx1WuWq!vIF%Xwo9~Szj(e!!bB41z<-_Ev8bd)^?RcO~n(9^SHy^!4hL*Qr6jM8) zBnytzXY|YEhMT_+o+Bp44Zw~f0HtSaalFH~~pFaA(U-jYR zZoBEMAJhH`q|)FmC0EX+-aq#1_0Q&Skws5==iX_5N*=4zudmD~AjZH=mD&x#yn{C=-JeP1D?+Z7e$9&xnX9cGo zXsy(+e2RKSr8GK@L87Fv7;U{tPri|tEuzFt{I?kY6X}0>#zMZpH28J#aw60V)AgR| z1|8K&`IC_hi-j`$7@Nth7C=92t`9d)t2GyVnKzs1OdspWw1AVkdp9xr4zL8c5uwTx zx$z{VBuj#0i#-zzz+y|lg4Ny#w}2q?;dsYMC8b257U$vwnJfY8>ZIb0D}fHc#dmhu zf={8PE4`C27(q?}PrTuBd?8mArkm_jdgkO=BtP2f$IXpHg3B>bT#59YU zBv2kAoVvLOuu_>gkD3;kYbKZ7vj~T_tenc2%hdkgd;__k9e0FT{n&Ial78~|4|>h& zwH8*zlKyq+p!BTXL|5@JqwTSW?5YNio$Es*{u|jA8W$0~5*P^j2yDL0YC?QlyTtT! z$(|DLDWy&G$!C!po@Hnz_d9%g7)-3MxtZ|t(dwe7{)7=yh?jqbE0<-1ug& z#pT!^DY`cM=8hGH@JAVsnQaWF55zfyPLE)!ALC|U(?CXCjpKo6T;#5t{(tF5>`Vq4 zTp*%~86-n&;Q?CH(Ry}8F(nbfq7z6mW9{Y`%Jo^Nsl~BRj>e=sjJsW&M4s6@n6`e$ zT3cJ?S|S+YU)DZgGAH5$%?I;VDp~sAEf?kYC8|gW=#GH;8!jh~Vs$&=I+fp)EwLtv zCCU#@AQ7%>I2*np#uK>~#P~A!O^M>1Ebz-)u_#Mh_!hmeT?6oo@}AB$!R#yGfW{E- z6pF7@gn`r+g;f7#T338_bohEmt~j%(K&fuW;jWKjjiN{8`VhL4&&PgjYT~U9tb8!O zdgvMokdIAFVW9*$_+>&XujAbC8Tu-}2E!)1*3o|R{g%#icZPo6mLaFHFq;gNb?e67 zWhX`YQYwmJt=S=F-&MWFBO(RSdR4KyZ zoKu%3ClY=xXxMn>WBbs`*i+LFe*OA8{MTF$j)?E_=vTi!M;-fh`p5g-zs7SPqcA{| zW;n}gX5c0zd+*-RVP^7S)6Gm`^Pdq> zuhWG{ePAM{zwk5!G1{M+xeSRx=vTjum&c)(L#P#yh6%AD&JY2pO-ugeuWM*<#bQ}# zETEZ(g-^nGNSvWuUlhh08#^!v13nF+DNb@c#aa#<7GW2$dwBYQYr)c$2;X3YW^_U4 z+yHg2@&36R>`@wQ6qafD$NeggyYDy1(ib3uiIP~N(fdE6y^=gf^{>4*_3^@V2NRJ8 zW3t_*sZfnOVMtGYM$sRIZQ9WT_Y_;5j02sw3Rp1>aYQ+#lOsn*`h|oql>)Q)c+{p&>c0eOt#fjf<5{&{&ib{ z?zg+LhwrX$x?PjO7UfJEV?>NBqz=?Z!JzU+iTJ|}8nP}lA9CEzEuWLw`bHSy_>oBWzxU?o_hS{Eav z-jKr+@MsjwYMuqzbggiTn!{!Zlq^uQJ`+Gqq%Bk%`FoyX-O^1MU$c=b+dy}Ew78gl zJIm98$%=BkpbLZQGa)R=&2;8QK*&Y_r+fbJHJ?Y7%#gx=eifsRX~} zrRV~fS?eOc3_#4{VKxj+O(BK6k!}1heeiv1+uZ%eFXtVOSLPf`BFe-)Z3w#_77ip@Az^@rP$XJPjtVxS=Qb}Fxhj~yDOo#v;3g-SsU=6_Spls1@$ee5kcM~ZgZVU(#5CL-|Ng$jdJ%w9%r*vdl2DsDr zJ!62)5(|Y8=5g#{H!8~nX$j!K>!b4BHXaiu!aBEux)uJSd!u$;*Ln*>?u#6K;;uYD zQu0s#8ZE9bA6}umHvajZzBP`Y^{^AcZQn2G!*+<`(l)-PEw!Amfj1Bf_1rF0-_AlP z)wUP5lQLJV3f+|TNSgI2@67M|eVD0yw_hnTJH`x6QaH;jBoDAU;gE_5ebT zT=;nBp4tmF`TVX&j$2RF`vOtdHa8Nk9{ap&m+I@8`Bhnsv`uqUqIS?HX&*zxVk?Zv z8tF{7Re0XvyUG*u<$)VRJ~dr`b?)RVroowjk;maC2aMxWmgo*qUj(f9b|&dWVSSt| z?e3MoFV!3m%)X%`x5I_oOSPR^ISJJzjygozhnyE$aSIA5z2L~FaA+qVJy=x%;xzb7 ziFmYG)~MGQy?Q(}OGG3^BpiK*6HcDp2fbS`u<52Uukiu}Ji?kLl{ZU4xpuGW&n;V( z2%(?K1+%=|33>(`fsD>)d-9a>hZEpy=wl^Goeyd#?#d#dM7e}=3)EGcDM=!vQb)br zqyo9gJSj7Pn_g$q)#OozO;ZtYWOt@0L&Gxk!Sf_%@o+1uSc4?!sDrLpVwjNi&HFp3 zDL9fN27A!ZQy&H>1PrnsHwXxV2osRlVo)nl`}h0-22ZcASx&l5EeLvLkdVaWCb{c ziiK0DSpg1dqneq`OwOn{hGuOXQ?qiav|$I%mX?+c)^6uemW?*=+U?y-`&{q2&N*Ms z`}&zr-q%|H=ULCXe|N@-3)k;q6JSNwR|-6gLGpmS?T+ACELcs%eqtNOw(t^{s;CEp z_Y4Pv;LyTNg}?Oud2omO59Du?K#N#NJrL{|gTI8~k?Yo7h|Ia!;Wxkgy`0*Q$HU72 zhsi6~!|S)TR`%k7js$hRz=Pm!$l2`4`?LJU;gpM^wx5J>7Gl3BSFn=TH*wG?p8q@R zgX{ieV`lHVlrk&ls?6%p{I2cfQa$m!3AhAozs=+e~k@%9(_rZz#wFxaZ2C7 zppNJ`NR1q?$OKV!;W53^BC(%T*KcB=FBDGfiyqT&%PcP_O$}p44}1^)Am|n8;<_>V zGze&F#+}A}HL`}TrMwOfaNfZS&iZ>rKvXygZJjJ@PQELVymW&?z!FD0{|!E13KR^s z6b$>Rv{kI(8aI7VVy_D@ya#bJvALf;*vm~xh_Hv+I-BZa&fvaA^%U6BFiGY2iM#@m zNI>6lyJ+}DezN^rvVVO0{&X9XUMM;b?nX$aAW^yKX=+va;jR?cpn)H_buAL$2ey)~Y5UPu5h#8%kB%w17N;zJV<|ZE}xyZROf{}f^ z{I8(~EZ%^Bg@0Emu>_{7FbNqNl3kFhV2#iKDXhth3*Z|c1}(0j3Iw48+z}v43+8*{ zZop1%J>*0X|F@s>^l*m<*+ri}n6Ze^hIV8R+#tP*l*i4C9=z&5B!s?)K*}leW0pT% zWHZ+2B(y*>2*+x4(A?KT5p0$|KVxAlYz1hh8wpwO%PN*MZ?-tOx8~vX^nZ`kCD+(n zBA|3JWD(jJLFYu9VqnHb5VbTB@t&%)BFiaCG@Hu$M@eS7#F4wFMFioi0YQs!NXKpw zjDcnjIYHC6A)@-ZG3@!#v4Em<|0f%emk7i0@#$6ir>zowKF`xt!;?t)DkCV@YB3Vs zvOeL}#N`IlA7|f9l(tPin=A5(?n~79WpfhEzT75$xady_Pnt@pJ}c4t(-zZ5!>>zc z;L?EjAEzD+A}%%NcrU`rp60zg&&_3T3TuifM6hhd7-D6>ER{qQu!1$-&?^azq!$zwgW8lz_aAope5*?vEceuHBc|LmKFyS>dZ+Ke9gJRNtge1BV%4K^&X2i} zQH_=ngU+ul%bvVKGF)+Dp9R9^-(v5GfYc)=5C^`c->E;oxLi>rN&*$9OUA15C9{F( zavWexR)6Bs7Vy`Tz7Kl|^d_;TnjqUNpI4lFlq9QJuw)Vb{QC3TTIWJ{Ue?_%CT9mL z{n%nbLIOySX!C|TAVP?4JW*m-ZcjZHI6XNnNWA-eKkg>ki+08{FRbF(xg&Abx#O1w zsNJWZ&rPaUI={w!@@%WvzpyJf!*zSn#kkUC2@Sgl&9=YAfw6=ZbUcNVtE;y)0!pCV zb}U%ZS4>uUA!g4I$|+A{CA#tqxQV6u6MpOaT*TIGyZFcxxOw-tK{$*V(b$VYy_q#>|0nd->+gxNHROn9d4Z4@a{ELclPB8JaM- z?Ce*(YA6aS&R zde6I;wyx$*^hs+siBxU~P~oO!E>)Z}YreNQdRr8|u$W7?@NxYt z$dw}q^w#%?iGpAKS$o*gV{=f&1HgeMPlkJG-?4IYbs&BSNnln_aCdNFeL<0+(yZ8Aw`-v z&tSuhT$Kd$V2(~>+ybTEbMj9)yPn>1t04-fM?wGl_X^=;wxi!h5Y_G}YBgOn$&g|f z+H9F(9}_R3}wjI1&;*FN=iuR-8U*l z66E^WZY@ykZ0_w12iSoaO&?4zCuLac!;i9VS`}9p3*k}t27>+vXm8IRv*8Ac${P25 z+jQKnj<^{}WIq=tvh?OgYdJbwVA1B_KQ9@@GL({1^DQaq2;`A_;mVcI%<-IsaXW@# z-}df#cIagf{V+7cLqES7%dC>926wN!m-Z=T2T2(nPKoYp5gy|01(8G#0nD5_pGp`% zef$7YZOc=~kkkkp7*fTfY@9KXLW~WfZJ}}_o#|*}V)R7F;9klveNQIp_n&JXj{?{Dgkm(#DQd*bg>&C!A761W$uXPYd;^q^|C4qyogG_) zSm1;OlZ1T26H~etbYq;*pTm{t!x-^SC{nhLZA&E(la4Mph}qg~eymaQ-ule?lw`1M zoeRpXzm6W?2S^GDF+P&f8HE(S=DqTM4kWPC~s$ z6jrrOtn807?v4}cY2BKq!ar%ViBMZO@eT~*NjEouP$2d$FrylW9JH|`J+WbsA$QHo zT`$z^&%yO~xMrY1URDMm&MCPKLa1$^bOSSPMnEsCXls2)qzY0#rr=0M3uPv@QH#_( zV-|KJ2;2?_g>Wuno5&8;`7|5U?>47Fj)UD>_m^zj;YRJ$etr+V7lW>I4!zseneXU= zo;>sFKfdU>m{1Q-@KmADlt{ezKP7dyFj?qJ7D}Jt3PVQ zNK)&ts^(-~dqZU;BB54Hd$@N6MWyUG<}re;5EI0e(Pj`-<-0Xc84e!rjubn#{S*B& z|J&KfC;x6KSpADa`M>_1DEN8Kxu_3!$@2bhpIp#~JF0hSt#xv{J<0YJHV?XaaAOSJ zg>9QENBz082;h9%C^6@^VJ+-U=b>~3Mqcvfgk{se7j8$4#fV-sbe1+ zU|o6C?Wj*7>828Tg=sfip*OTU>=7~3{b>(lXmAfVq_KTw9s)=QxOPn^{K$f`XId#6 zlbpA*T+$iA9u@M!9m%)^u}zU%%-KuF34ZTo3VfZ9S2h;cOo&C?}K7yL-qy7sxrXM z6t*Mot0AamQvdkp)`MYZYl|QzMhwXw!5()>yMYlMRD)VfS#4m4hA(uJaHbl_74}$( zc`hP7U+JhVKcY+4Az^2+zzThYPV3zyAWN}7wp{dm!?GU20#;$$T3q(U(bbvaNFopQ$t%cABN;`o#5MvGEK8DG;0Nb z1D!LD&uPKMS|C7#Ep>yH(~O~}Tn?s`>eiV2rzy>U?d>d)^ID?J>Y zzOu3|M%uJHFI=?L(v6jgoe6H8@>gAi3+OP~8ZXODz-;MUBmDlTG2W3}`1!CJa$Wwe zEAh^m?0V`xLt4X@Z--p1Vhzvw7>mAMxc*1ysi~`9|GxH`bm=+q;BKd+$eDi}M=nuj zFsb*7?xZfZhPBh_iq*{MVItNxVO$(o$B(hckS(tJ2{fgXY?#_9kti1E-*C%ZYPtR$ z0w)9wL8HGf z;u&n(_P@?Ie%Vrtj3{(-SAABH>stF}mn2Vt8`8WyzMAYp*I+_1;<2h+HjnD;vVty* zQNwS5CwmOC3LUi)ECRWwuVgx^44GhLd%5hq=1ax%39ltXV__*Xi2=7IYiuINQL(P& zO1-PXdyM6FJTEi|3qdta1_t5P|qoHWKu;K8aa+Kn=F7K3J8uYtNUL!ZY65i^7U8#hrZ2vzUXYb=dE?neAh35 z6thhocbm--X-yV387!Gx*?%Ke&%j~}HShf_F?cLVUUnT7? zA0%Z$RC=_f1OPUMz>80fuG&%fRDc^~-g17~fbX|FC3B3`&T9Kn*5~-#+&{%xEF?l& zQp>5a!*4xMYj;B2w-pv(TZu)<2oQE{)k(W5uxYx6id&@3E9*XwW*3Gv)YS1gqIAiP z;g-eAt~lQQ+>XfXWrvum1DE$L$3EaOOML|^ND;VLK)hRxvk!$5YVnwba6ri-S>n!_ zJ|j@9g<7G|UxZc?jCuErSr0(bcD17bA-ZU11w8yg+8`*lqOJgF1m{;!VMUB;hMbOa z#$L(p503)oLLXdS*FwbwhHQvBZ}Zw-#bRv^TNG&XFw!P2a_M7AZyb$+sHKHz#5&U~ zOXirplUT+&UI{~|`0zw$PeDQ&Bkt3=K-mbD6O@tI(TBsDMjM+W(EBm)w48^+>J!mm zz#Fm%6W#(20eBf+r&?v+UL<63h)j5ut`-%J{)%4Kx*qssuN5DAL z56;0DtYmuod0|~I+o$CIkkHuHL7Big)$2ae`*c9O#=hFnb*p>r?}(UfT0l`AUGE9X zS#FPDtI>xr78L%Lnl%1(J7zpQ_;~lT*9OUldc6e^rx1{q5MyZ)Y|HKeB_D$amzY(3cJoSW4 z;e%ssTuYwaA3>71-qaddI9NZf7&MfyY;Xu(XR+mA$&3?}a5QR6aM5MKbc&PDkY^J0 z;^}4Kb!p#nAgra&UfU*wmkvKrJ~!{btPiBjdv$%)z<18p1hAm+sP|HyBGvrB&Jp|; zV&pA`HKxzeCL3pzKs8C&y*pwtFEmbbZSl+dJwQ=VY(4h-sz5A`1T8Na0nHEIOgkaF zGT6By%kR44q@B*w_1yo}KUf8Q`fG^A)@dgDE#3Z}Q}i(%d$3V`g)!Yjs=Ja zq85Hxn5;)Vw(wfS5yP)y(^BuvlZ{?hl!4y6ardDAG+h6x&_Ok|OZ6Mh%~<9rqGb>F z`qf#ex==`yXkYNn7kNHD0IF7S={i1PYhf=3P=cxe2}U5AhNn$n++tgt!#=(0vwUZ* zN+AU{Cckc9|ElQa!+L)BaCV*jaif4f6F$48=uJbLkP&-CyXEFxL(#zO_c-4ogS5YW zm)kwJ?p(9%0lc!g{$Ta2zd`*HqYPh&?G-qDMH!r7qEXS%Tgobjhzv`j*Q6)n!pxwu zCwd2i`FIiuD=M7J`Duz`4#=TqhShZVF!x@G2W&rHKnG!$jThR*cxvLS@>a!g8?pgF z;LbM{3S5`J$X|;5w%Mt$(#<<@s1TFm?A2T3kRTL>Hg!!fK&jZ6A6wrxLzy*TE+g}DtWyVMk zZ?44l?*7vNH~s;Wp&cOzrY$aZRe!XM*N-;F!`MmOtaAhJ_+6XjH!6q3>r8->Pegdk zi59apguRSy5}6T8Ja`{<5=`>0#$+UypAK*JrsN3|3+ibCWLQi(su(PF&auU0U@}=& zGJX1ZpRAgZJ!q^HDRL1jsLDro;GKVvT;oQ}qgC-})nhbk$cbpz&(t4ZPgLv~IB{pz zudu^$rve0*=QV+mF#TdHUm!L zJ%~Vrt%q?iV`J-~4ynU0KDiN^u{-Tp72C(^ZjPa(nkTCnXZ*e~=S8;3Hk@#Ai-ygOiSV?>t} zQs#Gn@K=l0Zg059dNsZ3SV=kUTW-36<1c+@4;=E~>GbpGb65&%Xmj9UpJVO{2qcFe zBcwU;C^4KGP(Rv?IeF!y1JzRoQ}UoOXaP?D2$s+kGtn*xK>%y}gN3?hmv%R*tm{7< z$i0d34gQkO&Q2feK%07+E-;3hWWxh**v57Q-%D;ei(oq}J&cpT5DKY~@|h>sMbiaa z>a#;I89>qSGq-*Pnnhy+;-|7IAx3V5fo9b`AXt<9VJHg1X)e?{!n(f~Z7qxsXPZW< zbhu9f*Uy1L!w6uSS2s;bXV4RY#QmXRX@vNQJ)>mV>u0w^;M;yb`Bw|ASqsK$DN#a% zFv1cnv&C7X%?u^!QaPjt-tWtgVUc!thhwn+^Lrf@W(tz}NhfuRX;5x_;UjAT-QQ4> zLC-klkOK&JXh($uviVs^N)HgF6;z@_NikcA2SNyma5WmLzK0=1naJehhv-)}(_c_w z5Q-*l72&o9NHJu#jcBndkv4Eg4Z|9Z-QF-J0sn?nbRE0BJ$N~b%y#j}x2uWR8~g73 zUvHNt6|)7*S6e#DPCSpz>U6-Nh}2O;~a4QPItcV8P&vYpt8;OIBvw0vVKBQu{+BgxayQ7C2+& z&QW5!R`s?jPe6W<5`0Ya{R3FxG;i%*fu|z2XJ^K{ZdFheu;EeWeF7csND%c=zxPynSnK&niG_CG!mBtUMS#K)fv{PKzHYS~C_zwA zVrEJ<%HW_S^K;WySdG2%{6^sd+k9d+wfIs=~l`pf9SxQ@R4|frOm6U zWW>6<>3I|Qs!#KV&gIofQnugI@p|f<_slDZSHW=G{wEhcT|a4H)5i?%VNe1viC87M zoZ=^km>Li!=XM4mwB5=-v^`oRz>&nhUmIfu&&);U$Q|uuhD197%Z?vX3&upP-%-(E zFXFPn`37Gxa zS@;)jnoxT4POBo%aE#6ijWfcJ%bk9!t4*zXdHT%$1ylay`+vGAK7lWf{%}pTXx)Ei z&6fL~G`yI7`ZjL!?R!1{|>cFCWw6!84m(>*_8f!rY)CJ)&l!T~$7+O<%N8@vnufN8d+~kepu3Mvp z8G36xJF~`XMSSxKWzzEQDuJ59)->f?EvYfEVCrufPDsE6vHeOYDQ@atOq) zo$5jN>YSC6DlI4r;U@}O3qDxYscc^U^z3^CY5(WH`02olvTk2cp~=y(@Fq~eW_&0P z`tn+9{rcYDF8JP=`fKmLVz4X}L!;1pdgYLGFc=H=18Y8vVPx5;JOu7CRRS^thd1xU z7SLHgvcM2b$Gsc_v_H$v=_RGx_;cDCzf)}&{Q#sFC9@8oQ z3M7a*26&M$-Nw!sVa&9(akMV02a9C%J_-JY54}|f^R&Q`m*N1BT(Zt_4I6BJxScJu zu^oYS>p1Q_-D$6P{XC&SAV`4c^-JAZ1s}E4}Tnl3q7Hx zw1Hd?=Uc10d9yDxAB2ZN^4aG2_lk9xV=j~WX^sP3*CXk+sJP!dIr?LIm|@99&_mBU zZQrzJ>Qi7A8%oK!wqFRJei5*mc!PY^J(&L8$D!Ab+7+@cikc|IP)5bwxGE%av?KDe zON5Qilt9d#I77@LtTTw{#JnfqO?73LUaQmVl1kF1%1N4tB8ibY>e&7UI)U&|iosa$ zmF)5EN|@3lfHv64P_pwG;VPULvs`rp7QLc)vl05M(Q7WMXd8&;aAw_Y|ueAN`;y31ESd$8o>s=dr z;#JyE2;{-xp!a`=sFyl0k1lAkv6EGlaxA6ncqm~tU5)6i#srC(_wPTPNEMHrXBvPW z;%VERvz9_d0MT9BlYAYxZK)9I9ZjoMpGsJnr92CzA+w@ z;UNAu-w3p%hW8~O%ErWdlA!^?&n~d8NgpRwTOLP3zNKZ>xZ!ELfs6cbmup7_si(O+WTtdBG@X z`Re~NG;I58X|UCat-<7djz?Zz-tqfU^^x<*%`SntX9y?j%eybR9BUnuN9wR~ku`lt zbQ<@z21nxb0#G`jq(~_y-*#xV=2nyjD8+p7QrY6}M+?gEY6S#q&Y2w#atIfvfVhT) zqu~gsSC4TBhl_=D*18;mTuy-D z*#7uCu|_LVMwVyaKKtiP^W}n<*N&35QtLPF+A+R=DqY7}LRAErYIWXAK=lC|-`!Zh z+4NZ^AVVMv){0U;ojviWbY+3(nMtP_8(!DKjhhpiPGsNCc)32m))vtnlesW23_AS> zmQU~pfzPSY<`8}I0DtTbuG7{ogX8-bs-K(InlnWKC?p!m9=_p; zH|`>8ZpGPxZ09;c$p1M%as=g=>aAVGSuzi6)V8lJTPK#IsVCf$qU0VW-@Z za$5{Y`c-*9FS%_-mJvCy%>LXGUa1GYjO%3V8LY(zMq0-u8R6W#ojzmaojkN6aq)yLO+pE4yS^*Xp7FcU?iPfD&5#Mw9F5Hf_v| zWlDH{{!sj{`hB0|=&=(1rR|EbW51gtJqzf$p<)wdG+dSK!ye|+!s>;@<+?LYC+csY zS8x1x>e|nHVH(?6yrWa_rX>|R1Sqgp=EJDs=-72HrDaG|2~2YKd0}_*mElzG?OThY~tylwaLsLf@N3F-u*jHt@%}^TS zqcbdh&XJMJw4cfq_GZgcLmeMz=fa_m0Kk6s(T0s6%8C*k-!oKr5_fOiP zw&-x#;OKR7MrEM@%C3>8<+pS*Fpy$!Vj5UZqs#K{iqoD`+LXb)(4Ki4vuR z16N~=gj9Qar%AgRrJv4+P*scRrl5}~2lHe^l%Dq^!AmA?^h+KmvhDQGBapIx!UDt% z3pRR$TjQPMg`~Y*J9Y-IshobbU??Eu#kIbh>?F+e+9FAoL<^FFeJj8ym$h8Yl_U|7&oP+s-qNJ zu)dfTBOwSp0#`XuopUR*8-I-a;hVH+Q>T+WHME{DjpIk@ARk1t*NCj?Tj=Ai%d(cs z?k=q6Ma5hH()Z`SU-zRgpRVL3sfdyp{{>G0e+w)>9%9WKEr>0cP#Z5G|I=?jP;=+Y zzH|Q`eHZ>bJ^b*-(V+5yu|GQEmGyuMv6f8|-P;+t{Z){(dEcI*b%=)VvFUIcN zHU7yr{l`9|jWdzx2`dsxB#_mTIkR3fP~m$GRc%!|0Q?AC{UpR z0xHR~0aXbwOP9)EAasGnuGDa^rQ(?^?hC=;tD;gEGMBP9>CX|!SR#*Tkqv?^Ei zrsx{hDL3eNTB%k3PGlO?6%N&!QUQ=Eh#^D@_4TgbZ_dp}p5%0QfE8#QOPe_yh5W6t z`Y*cH#dJ}q(Xzjx5+dP1R)9@VlGesCW2)nTqdcpnfV+JGJq>d;`;xfNR$LT6X({Q4 zdW&_w=rnO$6}N}DjwNh%rXI&n@TwbrfysWrBkr1%Im=hTDCvy|j}fMdvBMO5C( zQU`D7O+w5rA2cRilni`a;~flHbi96D^d3lbRHT>7{)yo;|3 zw|N-fH*w_b4Oj7!1!d0FAFHmsj{I2kr#cA~S`#ekQNcC+)Cg~oP-r!Yv*Fk}CaCjw z`PIh**}ZEqG<#26))lB_x8~*wGk7Qa%1erAVysSzph~j{-aCAO-wmNLYb0KBFC{2I zM6mgN)>rVb_@1_MPrm}lz3%8Ezg<5a)x?Jb02B_WG)R~xaRUJq;^l_JT~*D7t3LfH zd3;}-bgVkpV!v?Thb=37)&xy&fBELDPpF$Onu19xw6ash_A+okMH22T&yo6PUB~z4 zsAk)88UEZ$o9*?EaIs0j^>eWcGOYGehc>0M-#Q(LW1pGdxNeu>CN-0{%XPtSt491C zK4Q78j17PV^^R9tMd?p0DS03ucN;*Dml4LEO4tJytWQi-zSj)dle%$Ol!FUl@gNy2 z_Rg(o+dmEMcvJgJ-#3ZY9s>54cJk}QKj|^=qh$c<0)vit7$u2_8f#V1yY|1v_>|l!=q1<>US?@& zqt}X5r41-}`K(o@DJX>9W3fF8W19$WKz#r}vSXfiZG6Vj+=QaY)#YcT7q|Na@oN;k z%5UXw$V$SpN{5Q-tQ?O7Qr>xJM*5V<-L+x35vJz054qVO(ElO9DX{ zYf4ugwiNi+rI$sF_3r0vEcm>zx;3%!2Jw3Ad7JRJ^?P@34G8cFMjx8FvkT4G+vnU? z;9&MK(7C_j&F0Wkt)G*!u65i_749F0_@3bts zXj>iB(1G~YHy%zV68ZE|ZS&(>`}p(-0y01wy%zJEOO0WLFk4y#0tb{U9T$;G_S@m}d#4F)d!HctU}+ z_gRv>nUZ3*{xfRjL3RjVZfv#CkvS!IfqRIfzw|Q#YeHj}+e6fJ=4UU*4$ovcj)pd5 zv8{H}U0{&i=`OCt!im@WrZ$P5Ufr;uyb#7A85^`bJ zDI>ylMwsN`lR&cY`C=o-PfqKF4z`<3F^hlcyZ7YsgGk0$e|438+@cM6&s-W_4pw!2 zi@p3{)3J$*rX|6GkQ-f`M(_JOpXKF8JxJ>uK8x2`sAXhu4g3a+{@R&A$V0^ zby;IVtiG>CfCZQ58=F#xY%gwl#p|m+`yf5ve^VE9NkX%tq zKnVm-8PJ%N;k4r5n1`ps`aS)c-3s*4PYA~mY6bXx#5fa;?O&d@hqFH2~_j9sAPl^XYtEvRV3@4W%) zQP#V=swe2;BD*?p|2mOcb0AiNEmqdt?Qu2qy5gIML0OHsoo68&hb?Z`TdnymzeSkcPx zPg@Ih5=6bJC4n3+5;q>p1;HWwa>?}>E0~gfIHNQs7bUTZkh~!gaLE@IdO!cIaTdz2 z)M4lx$$)tW7up`f)@tunIGjr*E7B*7mkk_{?XlZwbD;Ixt-QAxtAp#4_BZU!?p5I~ zgjN~LYsdZyXLKZPFDL2x!@W8%)RV01ai&*`mTqcZQ8_YU<`#T@%i_}Bt$&ohBB(-` zvqsP^#|w;P@TSr8tIpwwf#F@-{AA~sy^Q7(sFNz#cOqf)hC-`Qs5>Xq1Bwx4)zbhT zmrS^N*WuH@YD~gWbQEZlIW!Ppormu*71quQJ;c45pgV>F2g8k^VPAqC?2KH*WVi3k zf#C`QiJWg0tpIyKdi?ejp^?s>xBib`_j7Nv2M@US`L#|m7VZ{`!PC&*J|Me9jE#zJ z1~mhaghUL&GgNYeul%HHS{^$njK2GdxGyHmF6qpMEpZ*olt)Ty3?U2j%dTKk!Sa0Q z%H?Z9)?yh!+)-lZyc^<1k4%FwV0PajGhHWY+(^ss&8&c9vh;YoJQYf=9^0>m)krf- zi1DL|^BDBNAYTAmj4lgl8#|Y4V+%*a$$Dn>)DmWT;K}G4n2BnSt>~0Jhf=s|E`hTD zV#A?rq}#HdVbMok%(w%qn>(q9BLSR_hxFlojrzT>w?KlTUw~LaFjlre2x|TUQ6P{87AMpx~s3o$uL#Se^?%vU>@;3HfQcqV;083SR$IfNsLG~xu|6a zH-r;$15*CClHLKdCTfVw4+7E-tV(E(^WciMWn%P>x(riOg}5JKYl~)3AK!jPe?O&X zyL@MuYN-O3w==VoG<)r}e3$WxM6i@q_SndpmLZY@S#p1f31JcY(2|(_MV4gisT&Nj z5jL8D!0cd_V^!LmUFE`LH~8W0%6nZBKn)hPO*&<(YH)pU5AGgxt6n( zp&VexlmUThWzNX+^i=nnL#Xf^8v36r4hWGJ3I>b-P&j~z!<~9ua=W0@zT6-j!i#-7 z9qq{UVl0ClOu9yF-+1lj-HK~=(zq==i*IVDBt4;Wn~!K>EP>B(!DD!){jn54r~&&t zk?27d-IxZaa}$U3I9H$L%l&be=bpCk%Hlyt0tIneDoI!$Vv5OLe-}X^WJ`N<)rGT& z1S}Rv#BTU}>g>4D5gDkdDit@hETRLqY4&%(1itL^rs7b2pZ16uO>@uS5H72~-EO); zxGA4Yrx>Y2Bt-d-bHsRX7sXcX0dN$PQuTTHdp3|RK~D%~LIJJ~yB$ns2uB4sFx>r_ z5~^zpFUmZ|>xL-CsbsE%lM}AF?Pd>2wB}3KueiIr?(dH7!Xd`k+`=HJB7wks1>xDb* zin~$&10UC0yQc9I)X?1hm%g8pp&k_A$pv(bU!>@{7c2wev}H6XbVw$@Ry2D-2y*$L z2K`yr9^kU|XXo8#yS$^XU( zBMS?r4HgEiYRppIAvi)>Ga6LtQ^%+hx)s3;G=adW9kT>b12lvU)skVuhHTMOr|3&a z@O&b=-2lTig~Jn%R}<`x81Q{ph#ArNb)NgvF911Z90zzmxvfOnlK=gk1=JfW&AEMi7KSZ{u@+kj_u1qo>CG}Ce9R#Ko~~i=_AV|0pef|Q zq7ioX@|%l%k77%{T<9vP?ya^nyJnU-_bfCCHu~Ck%X9xV4!a!6&OahP8(Dwz_xEp) z-g7tDkBt+Z>~xF!mkz-#*yXaJ*& zNSFW0kzq!^mwL7Bk2ax;bX=XLH|DNCDLL}ZFMVB{7<4+r#9$AgTFrpiwA0tRx_|MS zp~?naa!Ku=u|+ORLgg;f2>P-`?Mp4sF>hM!Aqt;hL3WZ!D|HBDP9{WVX{jZ6AW%fO z9kQfrdn6KJIVDwOASuQtkbe|yo7=Z+b`v#%*}FF28DH(FB>`l_rI8joV$}4VIs~3q zrOVniPD&j(iaiJ4Rj!8l4s}+~n7;md9SXrsyB=MGfhv8y`_ap5q);z(N8RSDMFvP zIXPKd|KI^G5z(kb3H8epv%mEHl6=Sm3feph@K7lPhHof1B48&z0xRj2R)H+hJnMe8 zsh6Yo?}5*eU+azyZ29{5a7|9R2kMWmTG|OiRpSd{Nqcx)g^m78l@R$FhCd%@b}#$` z;kWZfUte$TLGfQrWKP|T4!QL7iZ7x^$)Mtcza${Ltq`IfLb9(TsDPI+MjD3>rXvdp zKB=l}_g{;S09<<5O(+i)fK_n_FT39;wNrq=0aPdS$NM4c-# zLPK1jci2r(4b1WCOi@+CoK;*JAqr~U4+{@F?GFY~B#_vom3=ND-V8`P9Jzcm;4+q+ z303l~b$4$0zbA`ltqEkr_Or9J1%3f7f&8@xv!OC|HxC2jw_P45L*ncx6Pr3>OB ze97svNZI}NvarJQTX&#gW>5;{kYXuVE-^(6?X4WZM&3Pm;oTiI%AgSZteMNw2dfpb zhcaiRQmtRQkd3j0LoUS4*>63vT*#&fRhU!Cpy zYXuuwL(yG8Nqe+UKVLfX*6!pleSaR*@3)WX`Q9OaKf43&?lF^DL4g?vpwnP_0U_5& zE_Z>jL)VnRL9-7_GmawaeGq#7IGkoszZ5hXm zolyy(#$dzvS)b3YCT-{MJC}C&unfQjonw5F08B8|%pJr}Dv?k?JIT`0DP*Hus4onv zSJD=eJqti1jwZbMdaNn-+L=t}=(%eCym*d!RQX}A^x}=6(OpfuiMw*btpU!S zZn{Oh1i;V;e5zG26m4a!bi(ovV~4qh`~Zj?=4HaA&R}{3jyOEQ9;znc)uu>wxVgDJ z%oObfQNif>Fc)wTi?L7;LTUY-0fS6a&e4Cf{Yay(r0*qK;V`EeJVOYcGfnp}R?#;R zZ1l$z%6?F~n>Gc;(UOF17>#hiH~3#e5a9(9Tnx!qgSC=e(xUJ^_`O_h_noJ)s!thi z)waaeLGRdfa6rYK*-uEWm+jLBJ-x3_nVD|Qlm{?W{$tN;G*oS{3lyLr6^LjfLQ!lG zRhE8IT^pxT&%Cb!T~}h%v4`y*%9h5aMqTKxGbg)#TP-lN4j3o%I7AKtvWhmKW@wbb zSVZw!enf=adiU-B^1EmB=Vun^&r!hCV8PTQia1Pa;2O&>iMBOH^*oWhe(E`LLu*7! z$T5rs!N7EvNu#K$@fQJ1@cl;=F1HAr| zHnIL`g;SoYMf&j*16n(B-7F=36wfY-vDq)f$J}&Cs3VDhrRMLD+kNK+l7#{rc&VQ$ zo{R>IyBT4Q>oiyh%hHq!l#A%+JH+fu+l;rVtS|j^Cd})-0>3R`lz49KZB4=Xo1=nh za4>qQm?Ov7;DJHyCp6j+w&TH4G1||oS@#1xjMY9Jphp$3chlWNd)O}4+w)#NVNPV- zlinfXjK0j_YY3rQOrQ(?>ECZZI_LP5`dZ>hoxHedok}JVWtXmrvvF33`U?{iGWupk zAT>Xg6-nnj7~+^=(LiK37yf0MIGfcw%cqCI0yqZPWoOV%sRf%B_cZ<1IgNB557Z*jy?g*7_&9gh`!)KeXbfM zg3e|jY=)1x4^O~)GK&qRUbs8O86_dUe#HR0#z6`q(BkbpMDZ`h0@7(PJY+F@2Fx}? zfoDvHAtbmJQUO6qX7GIw`;vDy?i>zJDf|enq@X_d-k~|RSgg3+3wG-R)|GYM84K-& zY>Ta@PmUnWFn*$uy?fN3Im1+|-+-HD%g!kEr$7o~DaXG$Q)XSXwq!Y23Nge=afo=G z66AAR3jxRH(sEk=dp~-Z2-VM@_4#!C@I-Wt_`$785NS(w6*|iynY1C0QhwtOVRPuY z8*iatpB4!qp-*B_(cCQhez_p7Pv%ugZSt{F_EoVnFbG7JgmA1ld}j!aDPC~XpmEEw z=6Bz&{`1eXvoi&Z`$so*1+3g^x4VDK!vjZ;Rx>?zPFxRJbgisb@c*>;u2D(mZU6Vl zRzxH~R4{D-(bU`kqN!N}h=huTil(Loilk=cuEv_C0Yt=Ixfjg}cZ6kSW{qv8!ENp; zHQR4St<0LP8Qa%PGxMkGzwT?T=YH;IJ?naT-|K$Gf_1()IX~y|+Z?~+_g%_Pp zw7@G#Jhb`Qm-xMt{v^OP_XE>!&(hj;{lAQxT&!SgjQ#m8m&bdH7~Wj2u}b+~Nb;DU2pdV}a-@RaArSPRg$NT2xfRvgU}>&SH`S4`+CK;pUJJ~?|- z9`e;dvB%@D_CFZWmcTju8JLe~{e|>G@NTQ;Py2c%juZ{Qo)5h8;qdXtVFkBgC$HU3 z+*lhC;26GaLRZniFUV9;AlJmx-OBJTp>l%F{K;MlvFYm^d*UdcuyU>H~nQPl8?tst&Pg5WPDw+)MKZr_zeBNlRZSb=Ge)j)+? z58`2MoP zq`!gtkn5)}m#x+P4zj9o8dGk>GjVPRICHOQA9GqhWJ)HKXTFPJS2tP|{oq2* zT#e8k%sIXJ{++1o4TQR1{ z{eiwwMQn@6b%M#e@^+aE5xG5x&O6gL?XJk(vL@X!ZNdE;bH6&!^7|iYD+VnID(25u z$j4s)BKo1V_`)8U_x-Q0ZQ4I{=TdwG>g%csb?12^X#s!F0>qG_fXD6;v0+q+!V~v* z6}($Nn;C{YSMK|T?=jjfc+<>Unh*g&b{+9)>2zE!9Hr@i$7YySG-pqoV;}%t5E2Du zeazu$LFrPKp_c_g_WBJNc%@cAUg`>C?;7!fD~~015x-yJB+-k9R_QlVi<4v!nVGTK z8Vt;I(@i^NGnxxcEJU>bxFkpxm?u`LRhZ7(BXx(DB5k$}UEWuTjz!hWsJWsim5D|>VGEi3Ni8C%CO05(K# zZ3ZFz8;bLbk4dVz))&rLOW*14Uf3eJQg{FM<4@5|Y2K&0$5Segu8^B!dIt5<6-`xV z@(M~i--Sh87de=;t2do^yHXT7+PLy+qCsbLu>wWTo-@pI!d#VO0rw0H^U3E$UC${O zs;-T0NlShDZq1K_MOzZa(CJS@w2yIHa*sb}mgH4L-hPNG4Vp9mCd2zp$1yp zV2yUt3~7D?prVK$*QA;oQ_o#;T&~}t}>VZQ2vGQv+bt*hT#J<@E7oo zUQj;L9l(A|^l-BX8O548xbOn8_LcZlT4xYC1d3SaHLJvPf>Y|j5{b;UsBT{HGMV4G62gmG-Ue-t+cLJ9}*-_PD&FWuF-iumQW9H@h%bhdFpWVAgCi*RL6kY|0>C z7}krOZTCS6pJ)9UGxpPX+CFfs%!!g4X}z>bdw*$c^f9GSocoC{nKA5iAHAur@Go&; z{EP2@%(g^jrrCb^j1VI5#PhFxv@Lb)^_vT-vgKWcTG1s&Y7k1RvBd@YH5~bA zXJ@jCETH+(K!M;&Oq1mb;_6p-e%o+kZ}a}J(lx-N>Z_$fQtZPAf~%zQAXI|*R&+HA zku7G=?}A!mu%PoSK(s>A_MBGEixuGK^mc`G!R@9U5vO-G`9sZl?`^_eF*+fDBW>!W zVZR~KNF!yGTGq8t%Y={p^MZAEdO1TCeF~$laAv@)R3vv{NaXCk86^Y*rLt&{{3Tl; z1Z~{;6SnoN`dWn6bo9vGi;wAkGOqC3l6V-I|Ihu=ubHXOG)q z>1`4VXCmaCMjc<=8zH0?X{3G#k_1Q|Eka0>hA3QqD2d>_+RPmN705ik;@f-Y6|nBQ zdxv*@4pDyn`_SNtHy&>swiIUE&YjgCSrNTqb!%Yt&8IbK=k2arZmee1I$EcssqxUl z<`QD7?@kXJn2!?iDMM`pKpgIAdl+gH{hae(zR!8*H;gfbyhOXw(M0o?V}DXQ z{=v`u?XM5G{kI=o-0z!x`Av{(Cxxa zBKcS@o_&L)-}>7@ZC;ATwdafu8WId0<=vn;S_TkS)dcZ~-lo1rPaMmWSO*Ux3zG`o zPSDDqyyNv&m<&L_88R86rJUsO5k8=_U%RxJ863Q4rbXm@075`i6c%ZX*g~ZmW0Np= z1$D_VQi5va8gJb&TqVNRjzDZbEpxy$KL6$`(zh!VXU^3}oL+y`a~2T0;!_=vYGqUX z#|crOr>xtkbA<9z5z-WA1~M@xcqwh}@iH$k9(e@@zlzsNrFTG$XMC`9ENf^NBH==a z{Z$qA?A#2`ZTityKmC^V_aA3>+&@@7u&L*g&)00v>5C328~!+M)1>tYxN*9}$Kb;} z@~S+>mya5baN$4`Ynts{Y+-!8AA!)mIpwI^cKWSvvEp4xH}xl;RiEB;_wT>{-uW=* zbn=B~KJDk*6WFUUii$IHpKHfE9N-a_Kg5OXP4M|Ok^A{S^Zk!7{!gF443HzUf({`N z&MZ5gdHHhx%g)KG;LTuX7*UUc6*lb2m{8v#rO(X2AKs0G?{mvJ>MYf5cVn%(bP1(KRq4Gn3v&m7DV7Od-LSIDEttLHyn(XZPkm=3w>V2^-0*(Do!jM^<~g@iLx zry;o_*B0DB?F1W-McUH&b-l$-4|GL%v#!O?u}06HR{%-n4l1O?KI$~Ju;JU=>YMFS z>Xp{cG=7S>X;c?B&E}&Jy@zvu+xnZ(CAS#$jvXVOoSzLw*A*yHZn6f!o*btyeD9<` z2q4A$`Z{5&Yw7){(73>gvukG-CgZmq)L&TLM(~?Rb-E551LqAeIm4xWhNp|n+n{NU zzI&%&sQjTt)q_2daBAhZX+PhKl^i}AM9;lzwsQTB59;r(e7`Xw-u6};b!*Jex6WYI zbMI2{(>6!`{>iC=mvUmz{3Oeju&rb8mUS-ABsgY+ru9^an_Y!$6>?QXx){KZ z_-#R+ri>IP*qFyeY+UN!X8+-9bi%V|g4#0^&?Zq3UmVYOaaF$6M8EekKM2OSl-2_8cbd4u7+#uh zF7%<@u;?exis;|t&JbC}MqUbaskLu%E4GxUFSKpw^xYEq{D9R^N`O`;brdyp-Xghc z)G}pQ{oZoP`v~JheR~w~AM=}oLP0ZuRN z?CIDU^GQv zF98+Y@Jd#7h=Jj?_Oe{MV1SeZ(&CFS1B0yQSv4D>P6tw;;+Ymy*jf&`tBLi=V5R?ebWJlmpm;6GAPPMC(s{pd5JJckG!pEg|NiMNCG?p+D|^K#QIR zes^UTA>Vz}(X00A@aw;IjiZ-4&lbMfwLQM=SMLae6FvGDagy+9#_uNvYayVoGY0$P ze>^4o>kqpt9@(B3R{UbQ{T%b=vzKRsA9Te;rngu5r-t4mL5W}+3mk@yc@-fk&ubpv z+)qO4kY+IG$(vxWrHS?bir@dVzWOh}0_7qzGEE3fk)G2$ZIYLF+4@-doQYvzz4wuF z@H4xxm`@7KEMcfLqA>Lvf(zWdKECk~0f{Jq# zatPGRy2tIgNhb>x}5rhLbtWkeP^U zsDL;jX`c4FhuCucp!x0#KXYca?`dIouvw#XBQ&2VrgQI)z(R z5Qan()}9GWYipBcv72hFla&>A^ArJc;?Bj6siVKKN*#A4zv*21^5a>RlV|i@j#}{J zS>f_Mk@vj5eSK|e)wzc$toSH#_t5&buEt|$-${A2)at8oV+Si=hrPQSzi}6(%WCze z&h0A~L+g!ysPC#I4W!riHf_zj=pJrx4@k2~ujt#CROwa!59)l0;oZJpF0Es@n%Ot3sMyR7e_d=u(sr3LGNHf% zZD~O{V@mZdE6uLaSUa9KWmU*h-gyg>gM(B@&_v28HWM`7vg z3QFvH6}^~BF?6wZFhT=|7JFDMhXTpPN1TbJXKnGVJP6kFwJY3r6ZZ)dj$LMpu><^V z2T!_&ZnizUkF?P(cmvh%hi&!#*R+V5vtD-i%j+!={BzW@Q4F$CGc|9s*Ir!lGHXb7 zGPgfBY3rc69BmB}Pmg#Ynk8PMmh>5xdrH@oIMz31Bv*2JN#K@=)?`8`JQfvX9+jA*rNB(m)^^CR zKLdDE}|_^OG&7HGu?2usmmX$vL_Q_kwk`eCJ|8)^?DHrS$qcz_;hYKfT(`E>_Iu%OiEPRFqeKPqL6F zVcAF)WVQlQQ`|Wn{SEFTEOPw#ogj=P?C$D@-1zOaj6a3_l^@}u z?{CWgaBKK6X51S!^wrIK?Ge!f*-;v!nSd%;GhXiOBA*`FQ1Z||jF)gO-#sCV;3_{) z(V-nb$AE9PZdm5mu;C0Z^TCYm_BFluk)M>0z1~>rF8zHbMletrs1?#&Ri>WhJ4}pG z&fPitRPd5f`DwpRtcp!2Z&C1jHL@#Egm9vm(tctT`Or9Z`Bws+7k6pwvB#~0l zVN0T3!=PB)n^6LHelKYJl4_iPJKj{a4XU*OujO%TGxkDll7ffCtSwIN_G96ElYMPM zKX+V31qe_abFzwxkJ8at07=?ZMcrR)gPU{bi z?%}D@+!81MuH3NQKgWe`EU{urKEKRyo;y;|8}U896z0Zx*-6;1p!gL+Pajp*&w6~e zQr+@>cm@jPSA%RkHLQiUvp-Yz;4@H7KiPwm%nrbyW*dO=BxUckATB60udmVPR2e77 zZTi=>GzF%bWI^v@CiD2O`!y|8paHbm5(qPQKrlQ$~i)>`PNc< z^AjPqaSC&7`5qwwW5|2~s*8$Ki?Y70F&oblO^ytL!%wric#Xz%roP#8i_egYIGG0b zV9DHNu818WaPTsjnlr^ic8k5u_9krGPDSW9UE8tinRiV3-`7`03}4LGg#U$lX;IoJyoLvDfF=*!X-Kl> z6gFA#(5yWp^S?L9PA<yDM}%+lc5hZYMpx(%v+Da-U^H@2|Q0`t=oSTS1*?8EpG9!`{#DuRd?C`}I85 zfA@nVJRGJRAvM4FJ_W7&z2fkL`y01Sp4btLnX91727Y6$_r4qKb)l$jHm07N zwmfkNAu!E!Zk8(=aB9P()O!1~0T!Dyrq{oy53#AfC=)p5W52qGY5ogzppXJ?ap zL!srCiR%n1>}eD$79}Fl3*k%;`x-@je32gfftimbCy_W{Cvn8^x~s z*YS4v5=-yE))I$vk=7_>DRVjF*vznb71yeCdKYdOrph;yEd6+u%}AQ980j4s7EU&e z$SFm=cIv#9aJO)mv-DM+Lv#JkRRSRm4<`7mk+A7{g>w;t&pxa9;ydNSKZ z_h2maj2C1uh?nP~MU0BAXZ7oOTX{s*CU*?Ba7d}QrV!qO#hrKMnt8z%!lYPwmfm@N zXqBbu^_H~swuHwdy#1z%@Xl1=iycC=G z2f%yAbN0^ zEqIov)@G}jm}V=I=<$}-;gMohtmt$(CUtztD#r3T-uNU~(#9(lzPWX|yT*H4pzh@K z;pQ_W%ai`?&U0t+`0eAJ7k(K*j`s?z-BL1!?&)M(xNSoh#K{FR!M!ZVv#;Ex&&k)} zpQpA0N58?l4S#!Dt-2>eEhp`g9#tJ|_TN z%}D*GtufdAO)Q0|X{Yf?O?X)SsI++4Dk}qU zhOM=MB26NGUW1LRiR8 z>aSK_fbG*Y|6+4@O?B%JY+)%5WmQJ2d~R--stFya7)BRxS!!iPA^FL?Co;W@%J8uI z+7tN?2h6a&l=8`s-*LKJAf%Tgbb4_*3A)-3`<*vtu)_I8x2>P4dUvOO{A@iqK>OG^ zbrqU6U%HTFm~__5*u%v{>g1i(yO*qi93IXyonfaA64+n(E@VLi@W8dxFZDdd)1Guz z!xnuiy@lq8HlWgz-Jp|BFgkFUNwzI_v5MPc%h;ivbEa9ie`ACSdZTc{Ylj(4<}9t; zR6`fD)_U?d5zT4yK0hXF`$x(>&+JOC_F_s3s(!#(u1R=RZl^sS;#GftQOqY+j1dN` zX)H)SV;0Dk!=c!+gv{HB+e;^hXs(4!R)UKk(w@ets!?)Gr^<40NyB5AHE`=H$g5qO z75q}?8vjPC-}BM+VU*Jfrlo$B*jS3VX*I;UsRkbl7@l|X-zMb#WQ4KI4?cT(?nFxW zmwj+_G-8T2*~tt!Om3*Yd#GIzB-a!#CGerFK&mFK-Y=a`=+eAEz~$Z649J9m^#o4s zAGIkJ=hp@15lR61&{3W?aMa-v%uQ8evAx*1Mf=0VI$5otEDo>_`Hks)_uWXmY|t~;@&jgEv4Zc z9z-riw6_vE6PhU1u3`0Ys!%^ey*!oWIcwxw2g!xOLcBO1X zd9{4YKOWIUv?C6FKWnd2A7otC6mNXoIwT_8^;WR{xqi-~`cD9lx|jk*FV#`otOMQ3 zi>*Aaa6nHj@mVH-3;J&{l+@ja5Sb@~eR5DU*n-;#5z28{SgV@^sHoPEa=+#5321G& z7PZ{`&t=xj@7Zyp^iS}~^6=1h51#@r+OX3JJk{J##(ruyyeEB8JNj6Ag?tq}}z2sR6++6jVvJzGAaEzkJ~Ho3_|-%9O<2T@a|zWzb43P%C^ z{u5r(y5mwM?n1&MjO9JP%$zMS3Si@)8#E)p!lr2|*S`~gBTICI!v25r8`j5)gR2;b zRKQK#mn!x>3M zh8|`Hf|LDoO;HqC*9lC2dqjSM+X!AT3Nb%U%uqlF+!1KD61F{$j0e3)9IS@1@T^rB z_Xzm$<@|_ij4qeq2o%gly{YR#>(6E(r|OV`#Ir_!yf4Us&aF+%esvnozq=MQApY?2 z-19GdA7zOAr{CV{+-uK(_cUb0p|9p%EG{o9n27ONQw|ch#P?AsYtApB#X(Nk<#pbq zI32JKV2NDZ`oqYnnx9v%GV>qg!1fhfZ(c$AIBQB4-wVVlkKIme03$YCX`0F!o4S50 z0A_1_53xtIe6Aza6E6`f+EW)wkBsQeSNRbEyMgCkkv|fet)l1=uCIuZ=P$WJ;9&&~ zS7RE^sYza}EC*X3@-pgbkFLMAWLFSz#S-YRC0>zF?;MYe$UCxQ{OOAJ3dta zB2NcZ8h#otJJs}RE7iRTQS<-Jx0(o5*1F17#F@J%{TyoR)g}4IZ`k9FY1??B&yq31 zc}ewwy;U(n+DJ8&NPW<`l4to`@`v7eksF0&=Dkjk54NNtA};_1JIW`TT(D=>0f)#q zYkL|Lhv%enxnpGvr5RK^0L8KkR8FY5WHlZ@S;wMC>L^N7ha;-0l-<#o4@BWf?EY)@ z7}Q}&2WDhz4lFyN>s^BxfD)ER&sq1)9cUKWUq_`*W94Xmkif~@NmLCz2q(B>Ql796 z_FCZ2fIXub?jhMcIsE*cDDw;7`R%#>(~FO~_CCo)TTdZr zNUXyc1MH9-u4uPjGq&=BWuTCw6FJon};e7F(9d z?%19bMeJ=|p5*e@h0t4WO_fDMgPEYx*a|{cjedZg&+LnV+EMMrBkmV!R?$5!Exxbmzm3E)=1y zA4(!e23?wwgI(QG4WQa4*Pft!h+VLz*8&0O1ME!jRk}Ad`3v8_cMkYZj6UjWwCCR9 zgLJ>rW+`{#YK8J@2SRJVLtv!qFLto^h2UcsJiqNbfYu1vp!CLm;!$~Mkjc*ZU@XZh zc!WO4d6(da%?bh|5o03|2h*?!DTms3A{mOe7J8!Z{SH72hP8_}AMrWsB&7nDJd5Wi_y?nx zYe5pY@?^BZ4?^k*Otto0B`Q;zy?*7rfTH5F=EL`wP|nMu%+n0nI1G;x5!t$`^qsOq zuug0``yv)9>+wauNfE?wJTMi`uScD3wBIg1f9Oav%Q88~0`+7CW64D8&qYj&zN+xb z&gdNOOHaf=d+6J5O<52RJj)!6fF^HHo1r%ad-z`E&(5);fZkat6#(a#Wk+#Tlh2qF zs8Cj(6?Fh_RHdC_H9s41#wk~p1q}FF(ZoCn!7bju04m&oO)KEq=lU@r1xoyG0q0k{ zkpl$8?h;)pr{L{oXJzk*IsDK}OlfZ@IymT1{?WPZ(5(ve1{q6tZ_00idChXYxto^L zS_~Dr_mZt|zP1OA9n~gggUs3~)*!V|*2yxf>;Z@7*6*nA{lfS6oLv8L)BL-a_*i+9 z?3A`VPjs3!1cyLUqO^iDv%uDm$Fe_T@C0*jG06@rs*A;JHRmZmJ7%9dWkq66#fLI~ z){5Pw^zgSCLh5exPRnS<$||JD*CKjk-|L`I(W#IK;ahvNWl#bq1fzjvlR3@g_lSr1*2(x>Me;d=fW6F==qBG zWIyfow;s{s4T=csf>XT}&Jt2=(uT&t&x7QdP6}OJ7<^6|Z%X3xD`Sc8sT3?yEg%4N z(1{KreNS!wP>LW?J^$OKI5uvLl7Q1%nMlxTa( z)881wgDlND*rWZ8;CzrZWtR(Kyc!9kgU5+DtTQ!_i@Qaj%5wb+Gq+VPC9s4|aXj}3 zD|KU*jBt~8$n}cEpm;%Ix8xJK%`H}tZ1q0^X%)S}bwH0LW*P&_OX}r>LCGV*eFf&o za>Y*6VnG9YkhOgsoqmiMjo~PR9Vi4i+0TA=Pi)rmusQ18Ia{kWsjNUpvsEE0?1T;N zIvFyfG!*EZi9lgXv+mHpk45g+>k!ma9()&Y#(0Z@ru)C~8|ZHm^Y7Zb`|DY@_Tv<6 zc$qbrC7lqDU}(8%-02$@>bQ+gpQ#?9!-b##sjpbYL}q~@h2^MZ!mUld^Ud4G4(NJ@ zO|5Tdt;GoW&u5g&D~L5LV-#2I*|!8S#6R3MxpyFk7+txWVr^BpB9$* z(d$iT+u$g@z)e2^GU$>LDY>q>9lLUMDOh+c)hbB9tq@uAmOxMl1a9dRH!$IcEZbd_ zvj@9&)?UH@a_%o-vm94)s^#79OKFFmfQmA&^%r>kVYWcmk8ga zDsbr82_tA5q8~()0^gwO5pciWb*gpIPM+Qeg3fn=Fm6)6bem#B4DuX7LR@YjL7(2r zbk@w9@^ESuYm7m!rA))I4MqaX2vfn80zf2Y0`2AK!>p>Xb+{Gb5I0TAu*?wXmY0e6 ze(Gbnjbn8$Z9rns4q@;N_Aw=Zax$)Kmwe&-=gw(=eL)5WhC$3a9c+lxdK^9P$^x;K6EktR$V2)0d0(o&ZY$W;4= zm+P3z)~*zGk>tX|Gd7vs)aAvkqqk|)9AWLt3lRetj5&E`qB5Eou~Z5Qj+IU&4qPXd zA4^|O`nVWe4OiJf zc0$+!MDY3wj!t%9vAW0Wa3ymar))>%4es^BrV?O$x%?UUeTmDC%Gk_BpvTeGpa3$4 zdr6mMIilydTd2Y?=Ry2~>#=w3F$&1~xCY=Go~Eg+;mT#$P~~Z3VM!(hW;tryfXLy! zh2)2&YbI)sZSODHw_20gD~fKyn>Dl8+rIEM^&j~g5s$jkZ_^pY+piULkv*E4cjLvV zTU~B8rQjlI39C4jtFlOs32@PARNZw+^WoN^WK5sadJ5+pP^&%+?Sq02s8VT+1 zNTkWBJBwR;^MVnZM+H)Ok6@y?sSj=eL)E371et|x*8#73dqS%T!Jfb_a z)Hh32b-)}^0s@?wXmYCeEOz@1HxD!tTuzt?&DIWsr<^HfsCuzEomhl$cR4oll9*Ph z6)L9>uC-;l87v~KoM9kfPf z>VoY#jZ&IQSu@WTLp>`_2KYg!xQxCc8bs$SC8SdRhFjM^W~b<=;c0Q(@V^`dDI&o3Y7PE6|=Ke=TWbwvg#6R z=%5OZu4B#S&@X&{%@p_#Fdk)oma7JR=*`aGMg&p6^72NrCoWeWMtK4 zvAGeU+%yj5=cdli+a&{j)&eA1?1DNJ|J?oJa|}J#TROl9h1a9}DxP%s{OZ+dvP&x+ zyJS(Bzq+z1g&-KV@XS}z2~otVWAtQs7g6U6&`}qw2QP=Srtj5+<7ihU>{aUDyHn7- zzieb#M>{{Rnw;h&e`OZLR9li~X8YX+Joz2kNwqz(O<>=6%ff-R&8TgIulAd%_SfdK zAYd$FDml1KOVj{y8CTgYwwUSFKC?kb8w~UA{+-P9V(nu&hAY>aA7r($-teSz`x^QQ z;FVVPO^*fo(E_^zx=$9)5F=(NG__BT6>;*+=iCJ4^2mBB5S6|Ew%W%oH9`T5?@atsqpjc80rs*|qqWyFJ-QDfqZ5p4o5x~ybsivL+@IMY9RSq z94cffxpsJug~)+&I~JWbV|4ORS_zbs*d52p{dh6l=H+NOTJ9$TY4w$)%fmI?=|(V; zI$V)-LwU-@FAIALxM>lX(3zC4JC0I8LmHZ`YxnrRP)>TyxDLDvF=FOT#!K)bi&i(e z6nSTv>rOfAB9A^MJipB(yp+}i0r(~vPRaEGzO>XI!n#t&VfYD$TN$^Asl$GQ%e_^u z;P*LYFzz>yVrxWMSDT2?;?=yZAj=!fft$ z$ZsSJ@wfz5W@$;n&9hFlKwt_U^v3Q9lv^xbh{|)Pf#h6{yI+NNZz?HY4pI|t%PZTG zuWC{%#Q^73?WE4wN4-|t{j<}uvQAW2!<^Fbj$5X=IZ&9%;EyJkQV=I0(AKxi5yeKj zw8S$XJhWeXNcIB2AE~iM_jrUTQ{!S50R)0beK`4sg@XizQu}Ke{jfU}RT>*5Z`Lb|uD!@_s(WRDXdv~(@ zl{^VL9u+sY$tDpq(h{WD-A`V3Lr&_kNVE&j!Jb{IN^N&^r@E8Py~;6FXm)ihRd@`T z(;#fc6dH7D(oniFYRI#BAd-U}WPWSBl&CT56urpjYV#iIIBcOYJ z#IW=^$_w&;hcG(q2y8sjm4>#UQs)gT+O(p9Vc+ld6gYd^U!=>y|vma$^pZ%dv zQY}A%)6xs>BLfqp{g@u`c~84k-xh$;B)iL6YY~`yNeZk6<+CM^3s`aU^^&hWm`D6v zECW|(UefWd#(?qBR<#ka=jT0SeQ~sEA7RH@~_J| zew9r*z+gjA3!;KlFkIpmnXKNbPPShyP8iruzv~jPZQq5Ng~JOjJG~Lh>=Y1=GlKq4 z{f)bM9sZrB_*L!aMg9p-SCzLoZmL0(#6!$PNF!L$i#7N35xW(Z;W)F9Q;fqVjoq41 zEP|lPbCJgE1`5vbXylhKYrr*-3ND>mx;!iePv@isD+%==es%n8pKx1SoqWh|TvWlN z;HhDcMVZlIezzZEH5KoK*)rsmLZdTva=YGJxJbFX74$_eQt0e_S@@xLPZiHQkGo8Y%Z?Bac z7F&qV?{Duw(p~ZfdER)l@r)K4Zf5vZ{*0exFs(A5TJLw`jjYLJ61XyhS?MUY@$Q;n z)gMbMj;pO8$wt-^XDWwaZs6@C$ML&_K~!cx+SV_SjUnd(z@h#1;~;{wdst4coF<9K z$;T|g?5LW%#hKI~>>HRP|EbKoFSekd6euMZVQ7{`D&a}kq41d;xEQFrcRuZERM=d_ z^M+tKqWNkAvO#KVKZ3ay3`mTH_VFAAHqEn84XDW`Nk>py^e_V32=n(3uKDgu{EmDh z{Sv>g-7N>gBT-ppG-ES}z~4gw|h!bsKHH+!A7`p6~Q07l&_w~1fQaCWim z7x9i3@EE*%$hkcmjcG3Dq7!hTYh3fnU0!NS1EGj>ZW$`XxOjgZq}3C{Qa4)-=*G#yxp0zyP~v6JlM@-xh$WdeMh=9Txvi(n=P z&x=9A84)?c39|EE{&@;Fv6lUj0piZb92nc{kmmxL!pe0<(1N3LM?X@XCR*0%A6a6y3(H9FRy*e!0=F*MJ=taU za;BT*H+hzyI~tm@eZ#E8zuCJ2<)B^+z*s?n)n?k9lO}!5RfA0)j2l)&i7B}_y>LK{ zcJ~qgfk zw35?FclW@T#g~n6N-ptRv)#MmT$RhbS2l98@~M>fxX9k794*?4L~#A)3*R55Ow;=L zR;CLt{e9Tt_cluOKxON-;MYomNRqJdeCp zZlkL0MVB}UX7CSpRa(c@TOHQBJw0ISNr$gILuD6Kpvjh(U&nBN5DKb8(KW%hW1ctv zWph`hFQIN-Px-v>Mx0}V6+Y7I;>3$$l3ysZakAdR4;7|sD|%gpaKCFCcvgotPv|%c z-s0)Pim*dc!9{$lt9jKowLw zw|)f`YC>FsfK1dLo30oCCoX-&h8L+Hshg z5fnL`MFA;KWf7=WHTz@I31xV&%Kh{sB3CAUj372U-tk{FTKU~#LMM;J63+Z{eYLau zpW`zZCQ*&%*UdDwM1!A0S`k@cW1+1U_mE_a&^{o}Ky%BUZBW*pC60A?WTje*SQ7~m zIt&dXCeVcPrU5yFYlFBRB3cK4Zqzx<=3+vVyT%XJSMzg+TxKI9M6mkCCail1s7b!U zq#O5FEaO+$+LTO*TrS~@K^P>+d?w|q4@E1CA)&^H;D>OX^Iin^_}3nb5vOGe)~OG=ewy6ZP39~tYHl^0XT3D)I(7!O zwF)j~y!h81$yC&iiZLdF&a1L-5Wi6+?a#?U672HEoBfuuo`NPz5rfn#xbdHcFfN!+ z8W+jc2B1x7$5$`^dd1Q{>&UP2;WAgPezuO-Ky;&u@QA#f@fBGDYe@8F+j@a~UAC~N z7#bsl2G}-cYBu5dm7=o;-3GYOR#@+EILxE`t(t;m)>#iM>bEt$66%K=ZNRCz5&Tx5 zRJZ+Pw6=YU>w%ef%b|oXT^+cXgO{I4T@oqwWCh{W>FGM#ag}jCYrglxHTs$k-wMkLXMD|E!i zjOx`eaALB1^P7Xnme3`RFMhCf4T~i7`}Jb*ug4f>I2$w1bhmCjU-t_O%COQ{FtXPO zyop{#?83w$v-v*mdg79TJgba_MnuGgEK(C#-_&j%qsS?vj=g3?Tp%Cv*9iS>v{Icl zYKkfh+6J_=$!J!76DYVJX+8lPRdjSEnJijms&`$~=BW7qS_aqh+Did z_Rsh?^{;ab|Gu;Z?OppxHGJ00p2O@F6`Qo#X^PRTB7QBregDN3K1=Awy-_QTtPr_t zhGkB%zQL$8EnAqSS>XNo2ES>j+kV)4BqIq0UKm&%Zq2ZOEK|<7bm&p{ZJq`Z{sgad zyg;i<+Zb0Bo$2u5yM1+enJ$Lvv^g}kXwFrk@o|!5PcVmiql$b|YYU(u)Pp*`HWlgL z`SRs(b*4{`TTU;FVM9z8kSwIzm#+q?x5q1q!h|=Wu$Xuf8&<_m+!yvu>Kof P`oDbt8$$axZu$QJ+4lEo literal 0 HcmV?d00001