From 7df2a68fb400d2f4b2090ff1b6d71d6f672f6bb1 Mon Sep 17 00:00:00 2001
From: remsky
Date: Fri, 3 Jan 2025 17:54:17 -0700
Subject: [PATCH] - CPU ONNX + PyTorch CUDA, functional - Incorporated text
processing module as service, towards modularization and optimizations -
Added text processing router for phonemization - Enhanced benchmark
statistics with real-time speed metrics
---
.gitignore | 10 +-
README.md | 4 +-
api/src/core/config.py | 3 +-
api/src/core/kokoro.py | 185 ++
api/src/main.py | 4 +-
api/src/routers/text_processing.py | 30 +
api/src/services/text_processing/__init__.py | 13 +
.../services/text_processing/normalizer.py | 111 +
.../services/text_processing/phonemizer.py | 97 +
.../services/text_processing/vocabulary.py | 37 +
api/src/services/tts_base.py | 56 +-
api/src/services/tts_cpu.py | 75 +-
api/src/services/tts_gpu.py | 101 +-
api/src/services/tts_service.py | 36 +-
api/src/structures/text_schemas.py | 9 +
api/tests/conftest.py | 83 +-
api/tests/test_tts_implementations.py | 144 +
api/tests/test_tts_service.py | 396 +--
docker-compose.cpu.yml | 22 +-
docker-compose.yml | 22 +-
examples/__init__.py | 0
.../assorted_checks/benchmarks/__init__.py | 0
.../benchmarks/benchmark_tts_rtf.py | 241 ++
.../benchmarks/depr_benchmark_tts.py | 165 ++
.../benchmarks/lib/__init__.py | 0
.../benchmarks/lib/shared_benchmark_utils.py | 111 +
.../benchmarks/lib/shared_plotting.py | 176 ++
.../benchmarks/lib/shared_utils.py | 174 ++
.../output_data/benchmark_results.json | 111 +
.../output_data}/benchmark_results_cpu.json | 0
.../output_data/benchmark_results_rtf.json | 300 ++
.../output_data}/benchmark_stats_cpu.txt | 0
.../output_data/benchmark_stats_rtf.txt | 9 +
.../cpu_benchmark_results_rtf.json | 2506 +++++++++++++++++
.../output_data/cpu_benchmark_stats_rtf.txt | 23 +
.../gpu_benchmark_results_rtf.json | 607 ++++
.../output_data/gpu_benchmark_stats_rtf.txt | 23 +
.../output_plots/cpu_processing_time_rtf.png | Bin 0 -> 239297 bytes
.../output_plots/cpu_realtime_factor_rtf.png | Bin 0 -> 217173 bytes
.../output_plots/cpu_system_usage_rtf.png | Bin 0 -> 459933 bytes
.../output_plots}/format_comparison.png | Bin
.../output_plots/gpu_processing_time_rtf.png | Bin 0 -> 237194 bytes
.../output_plots/gpu_realtime_factor_rtf.png | Bin 0 -> 206597 bytes
.../output_plots/gpu_system_usage_rtf.png | Bin 0 -> 459231 bytes
.../benchmarks/output_plots}/gpu_usage.png | Bin
.../benchmarks/the_time_machine_hg_wells.txt | 0
.../test_analyze_combined_voices.py | 4 +-
.../test_formats}/test_audio_formats.py | 0
.../test_openai}/test_openai_tts.py | 0
.../test_voices}/test_all_voices.py | 0
examples/benchmarks/analysis_comparison.png | Bin 772566 -> 0 bytes
examples/benchmarks/benchmark_results.json | 531 ----
examples/benchmarks/benchmark_stats.txt | 19 -
examples/benchmarks/benchmark_tts.py | 406 ---
examples/benchmarks/benchmark_tts_rtf.py | 314 ---
examples/benchmarks/processing_time.png | Bin 290136 -> 0 bytes
examples/benchmarks/processing_time_cpu.png | Bin 260375 -> 0 bytes
examples/benchmarks/realtime_factor.png | Bin 228393 -> 0 bytes
examples/benchmarks/realtime_factor_cpu.png | Bin 212467 -> 0 bytes
examples/benchmarks/system_usage.png | Bin 416036 -> 0 bytes
60 files changed, 5478 insertions(+), 1680 deletions(-)
create mode 100644 api/src/core/kokoro.py
create mode 100644 api/src/routers/text_processing.py
create mode 100644 api/src/services/text_processing/__init__.py
create mode 100644 api/src/services/text_processing/normalizer.py
create mode 100644 api/src/services/text_processing/phonemizer.py
create mode 100644 api/src/services/text_processing/vocabulary.py
create mode 100644 api/src/structures/text_schemas.py
create mode 100644 api/tests/test_tts_implementations.py
create mode 100644 examples/__init__.py
create mode 100644 examples/assorted_checks/benchmarks/__init__.py
create mode 100644 examples/assorted_checks/benchmarks/benchmark_tts_rtf.py
create mode 100644 examples/assorted_checks/benchmarks/depr_benchmark_tts.py
create mode 100644 examples/assorted_checks/benchmarks/lib/__init__.py
create mode 100644 examples/assorted_checks/benchmarks/lib/shared_benchmark_utils.py
create mode 100644 examples/assorted_checks/benchmarks/lib/shared_plotting.py
create mode 100644 examples/assorted_checks/benchmarks/lib/shared_utils.py
create mode 100644 examples/assorted_checks/benchmarks/output_data/benchmark_results.json
rename examples/{benchmarks => assorted_checks/benchmarks/output_data}/benchmark_results_cpu.json (100%)
create mode 100644 examples/assorted_checks/benchmarks/output_data/benchmark_results_rtf.json
rename examples/{benchmarks => assorted_checks/benchmarks/output_data}/benchmark_stats_cpu.txt (100%)
create mode 100644 examples/assorted_checks/benchmarks/output_data/benchmark_stats_rtf.txt
create mode 100644 examples/assorted_checks/benchmarks/output_data/cpu_benchmark_results_rtf.json
create mode 100644 examples/assorted_checks/benchmarks/output_data/cpu_benchmark_stats_rtf.txt
create mode 100644 examples/assorted_checks/benchmarks/output_data/gpu_benchmark_results_rtf.json
create mode 100644 examples/assorted_checks/benchmarks/output_data/gpu_benchmark_stats_rtf.txt
create mode 100644 examples/assorted_checks/benchmarks/output_plots/cpu_processing_time_rtf.png
create mode 100644 examples/assorted_checks/benchmarks/output_plots/cpu_realtime_factor_rtf.png
create mode 100644 examples/assorted_checks/benchmarks/output_plots/cpu_system_usage_rtf.png
rename examples/{benchmarks => assorted_checks/benchmarks/output_plots}/format_comparison.png (100%)
create mode 100644 examples/assorted_checks/benchmarks/output_plots/gpu_processing_time_rtf.png
create mode 100644 examples/assorted_checks/benchmarks/output_plots/gpu_realtime_factor_rtf.png
create mode 100644 examples/assorted_checks/benchmarks/output_plots/gpu_system_usage_rtf.png
rename examples/{benchmarks => assorted_checks/benchmarks/output_plots}/gpu_usage.png (100%)
rename examples/{ => assorted_checks}/benchmarks/the_time_machine_hg_wells.txt (100%)
rename examples/{ => assorted_checks/test_combinations}/test_analyze_combined_voices.py (99%)
rename examples/{ => assorted_checks/test_formats}/test_audio_formats.py (100%)
rename examples/{ => assorted_checks/test_openai}/test_openai_tts.py (100%)
rename examples/{ => assorted_checks/test_voices}/test_all_voices.py (100%)
delete mode 100644 examples/benchmarks/analysis_comparison.png
delete mode 100644 examples/benchmarks/benchmark_results.json
delete mode 100644 examples/benchmarks/benchmark_stats.txt
delete mode 100644 examples/benchmarks/benchmark_tts.py
delete mode 100644 examples/benchmarks/benchmark_tts_rtf.py
delete mode 100644 examples/benchmarks/processing_time.png
delete mode 100644 examples/benchmarks/processing_time_cpu.png
delete mode 100644 examples/benchmarks/realtime_factor.png
delete mode 100644 examples/benchmarks/realtime_factor_cpu.png
delete mode 100644 examples/benchmarks/system_usage.png
diff --git a/.gitignore b/.gitignore
index aebbfa7..f61cc2d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,5 +1,6 @@
-output/
+output/*
+output_audio/*
ui/data/*
*.db
@@ -16,3 +17,10 @@ env/
.coverage
+examples/assorted_checks/benchmarks/output_audio/*
+examples/assorted_checks/test_combinations/output/*
+examples/assorted_checks/test_openai/output/*
+
+examples/assorted_checks/test_voices/output/*
+examples/assorted_checks/test_formats/output/*
+ui/RepoScreenshot.png
diff --git a/README.md b/README.md
index 9732bd6..da78651 100644
--- a/README.md
+++ b/README.md
@@ -3,8 +3,8 @@
# Kokoro TTS API
-[]()
-[]()
+[]()
+[]()
[](https://huggingface.co/hexgrad/Kokoro-82M/tree/c3b0d86e2a980e027ef71c28819ea02e351c2667) [](https://huggingface.co/spaces/Remsky/Kokoro-TTS-Zero)
Dockerized FastAPI wrapper for [Kokoro-82M](https://huggingface.co/hexgrad/Kokoro-82M) text-to-speech model
diff --git a/api/src/core/config.py b/api/src/core/config.py
index 52aea19..693ad82 100644
--- a/api/src/core/config.py
+++ b/api/src/core/config.py
@@ -14,7 +14,8 @@ class Settings(BaseSettings):
output_dir_size_limit_mb: float = 500.0 # Maximum size of output directory in MB
default_voice: str = "af"
model_dir: str = "/app/Kokoro-82M" # Base directory for model files
- model_path: str = "kokoro-v0_19.pth"
+ pytorch_model_path: str = "kokoro-v0_19.pth"
+ onnx_model_path: str = "kokoro-v0_19.onnx"
voices_dir: str = "voices"
sample_rate: int = 24000
diff --git a/api/src/core/kokoro.py b/api/src/core/kokoro.py
new file mode 100644
index 0000000..e3d6c39
--- /dev/null
+++ b/api/src/core/kokoro.py
@@ -0,0 +1,185 @@
+import re
+
+import torch
+import phonemizer
+
+
+def split_num(num):
+ num = num.group()
+ if "." in num:
+ return num
+ elif ":" in num:
+ h, m = [int(n) for n in num.split(":")]
+ if m == 0:
+ return f"{h} o'clock"
+ elif m < 10:
+ return f"{h} oh {m}"
+ return f"{h} {m}"
+ year = int(num[:4])
+ if year < 1100 or year % 1000 < 10:
+ return num
+ left, right = num[:2], int(num[2:4])
+ s = "s" if num.endswith("s") else ""
+ if 100 <= year % 1000 <= 999:
+ if right == 0:
+ return f"{left} hundred{s}"
+ elif right < 10:
+ return f"{left} oh {right}{s}"
+ return f"{left} {right}{s}"
+
+
+def flip_money(m):
+ m = m.group()
+ bill = "dollar" if m[0] == "$" else "pound"
+ if m[-1].isalpha():
+ return f"{m[1:]} {bill}s"
+ elif "." not in m:
+ s = "" if m[1:] == "1" else "s"
+ return f"{m[1:]} {bill}{s}"
+ b, c = m[1:].split(".")
+ s = "" if b == "1" else "s"
+ c = int(c.ljust(2, "0"))
+ coins = (
+ f"cent{'' if c == 1 else 's'}"
+ if m[0] == "$"
+ else ("penny" if c == 1 else "pence")
+ )
+ return f"{b} {bill}{s} and {c} {coins}"
+
+
+def point_num(num):
+ a, b = num.group().split(".")
+ return " point ".join([a, " ".join(b)])
+
+
+def normalize_text(text):
+ text = text.replace(chr(8216), "'").replace(chr(8217), "'")
+ text = text.replace("«", chr(8220)).replace("»", chr(8221))
+ text = text.replace(chr(8220), '"').replace(chr(8221), '"')
+ text = text.replace("(", "«").replace(")", "»")
+ for a, b in zip("、。!,:;?", ",.!,:;?"):
+ text = text.replace(a, b + " ")
+ text = re.sub(r"[^\S \n]", " ", text)
+ text = re.sub(r" +", " ", text)
+ text = re.sub(r"(?<=\n) +(?=\n)", "", text)
+ text = re.sub(r"\bD[Rr]\.(?= [A-Z])", "Doctor", text)
+ text = re.sub(r"\b(?:Mr\.|MR\.(?= [A-Z]))", "Mister", text)
+ text = re.sub(r"\b(?:Ms\.|MS\.(?= [A-Z]))", "Miss", text)
+ text = re.sub(r"\b(?:Mrs\.|MRS\.(?= [A-Z]))", "Mrs", text)
+ text = re.sub(r"\betc\.(?! [A-Z])", "etc", text)
+ text = re.sub(r"(?i)\b(y)eah?\b", r"\1e'a", text)
+ text = re.sub(
+ r"\d*\.\d+|\b\d{4}s?\b|(? 510:
+ tokens = tokens[:510]
+ print("Truncated to 510 tokens")
+ ref_s = voicepack[len(tokens)]
+ out = forward(model, tokens, ref_s, speed)
+ ps = "".join(next(k for k, v in VOCAB.items() if i == v) for i in tokens)
+ return out, ps
diff --git a/api/src/main.py b/api/src/main.py
index 1521a14..c2a567e 100644
--- a/api/src/main.py
+++ b/api/src/main.py
@@ -13,6 +13,7 @@ from .core.config import settings
from .services.tts_model import TTSModel
from .services.tts_service import TTSService
from .routers.openai_compatible import router as openai_router
+from .routers.text_processing import router as text_router
@asynccontextmanager
@@ -45,8 +46,9 @@ app.add_middleware(
allow_headers=["*"],
)
-# Include OpenAI compatible router
+# Include routers
app.include_router(openai_router, prefix="/v1")
+app.include_router(text_router)
# Health check endpoint
diff --git a/api/src/routers/text_processing.py b/api/src/routers/text_processing.py
new file mode 100644
index 0000000..9e1ce3a
--- /dev/null
+++ b/api/src/routers/text_processing.py
@@ -0,0 +1,30 @@
+from fastapi import APIRouter
+from ..structures.text_schemas import PhonemeRequest, PhonemeResponse
+from ..services.text_processing import phonemize, tokenize
+
+router = APIRouter(
+ prefix="/text",
+ tags=["text processing"]
+)
+
+@router.post("/phonemize", response_model=PhonemeResponse)
+async def phonemize_text(request: PhonemeRequest) -> PhonemeResponse:
+ """Convert text to phonemes and tokens: Rough attempt
+
+ Args:
+ request: Request containing text and language
+
+ Returns:
+ Phonemes and token IDs
+ """
+ # Get phonemes
+ phonemes = phonemize(request.text, request.language)
+
+ # Get tokens
+ tokens = tokenize(phonemes)
+ tokens = [0] + tokens + [0] # Add start/end tokens
+
+ return PhonemeResponse(
+ phonemes=phonemes,
+ tokens=tokens
+ )
diff --git a/api/src/services/text_processing/__init__.py b/api/src/services/text_processing/__init__.py
new file mode 100644
index 0000000..f945e18
--- /dev/null
+++ b/api/src/services/text_processing/__init__.py
@@ -0,0 +1,13 @@
+from .normalizer import normalize_text
+from .phonemizer import phonemize, PhonemizerBackend, EspeakBackend
+from .vocabulary import tokenize, decode_tokens, VOCAB
+
+__all__ = [
+ 'normalize_text',
+ 'phonemize',
+ 'tokenize',
+ 'decode_tokens',
+ 'VOCAB',
+ 'PhonemizerBackend',
+ 'EspeakBackend'
+]
diff --git a/api/src/services/text_processing/normalizer.py b/api/src/services/text_processing/normalizer.py
new file mode 100644
index 0000000..db5b7db
--- /dev/null
+++ b/api/src/services/text_processing/normalizer.py
@@ -0,0 +1,111 @@
+import re
+
+def split_num(num: re.Match) -> str:
+ """Handle number splitting for various formats"""
+ num = num.group()
+ if "." in num:
+ return num
+ elif ":" in num:
+ h, m = [int(n) for n in num.split(":")]
+ if m == 0:
+ return f"{h} o'clock"
+ elif m < 10:
+ return f"{h} oh {m}"
+ return f"{h} {m}"
+ year = int(num[:4])
+ if year < 1100 or year % 1000 < 10:
+ return num
+ left, right = num[:2], int(num[2:4])
+ s = "s" if num.endswith("s") else ""
+ if 100 <= year % 1000 <= 999:
+ if right == 0:
+ return f"{left} hundred{s}"
+ elif right < 10:
+ return f"{left} oh {right}{s}"
+ return f"{left} {right}{s}"
+
+def handle_money(m: re.Match) -> str:
+ """Convert money expressions to spoken form"""
+ m = m.group()
+ bill = "dollar" if m[0] == "$" else "pound"
+ if m[-1].isalpha():
+ return f"{m[1:]} {bill}s"
+ elif "." not in m:
+ s = "" if m[1:] == "1" else "s"
+ return f"{m[1:]} {bill}{s}"
+ b, c = m[1:].split(".")
+ s = "" if b == "1" else "s"
+ c = int(c.ljust(2, "0"))
+ coins = (
+ f"cent{'' if c == 1 else 's'}"
+ if m[0] == "$"
+ else ("penny" if c == 1 else "pence")
+ )
+ return f"{b} {bill}{s} and {c} {coins}"
+
+def handle_decimal(num: re.Match) -> str:
+ """Convert decimal numbers to spoken form"""
+ a, b = num.group().split(".")
+ return " point ".join([a, " ".join(b)])
+
+def normalize_text(text: str) -> str:
+ """Normalize text for TTS processing
+
+ Args:
+ text: Input text to normalize
+
+ Returns:
+ Normalized text
+ """
+ # Replace quotes and brackets
+ text = text.replace(chr(8216), "'").replace(chr(8217), "'")
+ text = text.replace("«", chr(8220)).replace("»", chr(8221))
+ text = text.replace(chr(8220), '"').replace(chr(8221), '"')
+ text = text.replace("(", "«").replace(")", "»")
+
+ # Handle CJK punctuation
+ for a, b in zip("、。!,:;?", ",.!,:;?"):
+ text = text.replace(a, b + " ")
+
+ # Clean up whitespace
+ text = re.sub(r"[^\S \n]", " ", text)
+ text = re.sub(r" +", " ", text)
+ text = re.sub(r"(?<=\n) +(?=\n)", "", text)
+
+ # Handle titles and abbreviations
+ text = re.sub(r"\bD[Rr]\.(?= [A-Z])", "Doctor", text)
+ text = re.sub(r"\b(?:Mr\.|MR\.(?= [A-Z]))", "Mister", text)
+ text = re.sub(r"\b(?:Ms\.|MS\.(?= [A-Z]))", "Miss", text)
+ text = re.sub(r"\b(?:Mrs\.|MRS\.(?= [A-Z]))", "Mrs", text)
+ text = re.sub(r"\betc\.(?! [A-Z])", "etc", text)
+
+ # Handle common words
+ text = re.sub(r"(?i)\b(y)eah?\b", r"\1e'a", text)
+
+ # Handle numbers and money
+ text = re.sub(
+ r"\d*\.\d+|\b\d{4}s?\b|(? str:
+ """Convert text to phonemes
+
+ Args:
+ text: Text to convert to phonemes
+
+ Returns:
+ Phonemized text
+ """
+ pass
+
+class EspeakBackend(PhonemizerBackend):
+ """Espeak-based phonemizer implementation"""
+
+ def __init__(self, language: str):
+ """Initialize espeak backend
+
+ Args:
+ language: Language code ('en-us' or 'en-gb')
+ """
+ self.backend = phonemizer.backend.EspeakBackend(
+ language=language,
+ preserve_punctuation=True,
+ with_stress=True
+ )
+ self.language = language
+
+ def phonemize(self, text: str) -> str:
+ """Convert text to phonemes using espeak
+
+ Args:
+ text: Text to convert to phonemes
+
+ Returns:
+ Phonemized text
+ """
+ # Phonemize text
+ ps = self.backend.phonemize([text])
+ ps = ps[0] if ps else ""
+
+ # Handle special cases
+ ps = ps.replace("kəkˈoːɹoʊ", "kˈoʊkəɹoʊ").replace("kəkˈɔːɹəʊ", "kˈəʊkəɹəʊ")
+ ps = ps.replace("ʲ", "j").replace("r", "ɹ").replace("x", "k").replace("ɬ", "l")
+ ps = re.sub(r"(?<=[a-zɹː])(?=hˈʌndɹɪd)", " ", ps)
+ ps = re.sub(r' z(?=[;:,.!?¡¿—…"«»"" ]|$)', "z", ps)
+
+ # Language-specific rules
+ if self.language == "en-us":
+ ps = re.sub(r"(?<=nˈaɪn)ti(?!ː)", "di", ps)
+
+ return ps.strip()
+
+def create_phonemizer(language: str = "a") -> PhonemizerBackend:
+ """Factory function to create phonemizer backend
+
+ Args:
+ language: Language code ('a' for US English, 'b' for British English)
+
+ Returns:
+ Phonemizer backend instance
+ """
+ # Map language codes to espeak language codes
+ lang_map = {
+ "a": "en-us",
+ "b": "en-gb"
+ }
+
+ if language not in lang_map:
+ raise ValueError(f"Unsupported language code: {language}")
+
+ return EspeakBackend(lang_map[language])
+
+def phonemize(text: str, language: str = "a", normalize: bool = True) -> str:
+ """Convert text to phonemes
+
+ Args:
+ text: Text to convert to phonemes
+ language: Language code ('a' for US English, 'b' for British English)
+ normalize: Whether to normalize text before phonemization
+
+ Returns:
+ Phonemized text
+ """
+ if normalize:
+ text = normalize_text(text)
+
+ phonemizer = create_phonemizer(language)
+ return phonemizer.phonemize(text)
diff --git a/api/src/services/text_processing/vocabulary.py b/api/src/services/text_processing/vocabulary.py
new file mode 100644
index 0000000..66af961
--- /dev/null
+++ b/api/src/services/text_processing/vocabulary.py
@@ -0,0 +1,37 @@
+def get_vocab():
+ """Get the vocabulary dictionary mapping characters to token IDs"""
+ _pad = "$"
+ _punctuation = ';:,.!?¡¿—…"«»"" '
+ _letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
+ _letters_ipa = "ɑɐɒæɓʙβɔɕçɗɖðʤəɘɚɛɜɝɞɟʄɡɠɢʛɦɧħɥʜɨɪʝɭɬɫɮʟɱɯɰŋɳɲɴøɵɸθœɶʘɹɺɾɻʀʁɽʂʃʈʧʉʊʋⱱʌɣɤʍχʎʏʑʐʒʔʡʕʢǀǁǂǃˈˌːˑʼʴʰʱʲʷˠˤ˞↓↑→↗↘'̩'ᵻ"
+
+ # Create vocabulary dictionary
+ symbols = [_pad] + list(_punctuation) + list(_letters) + list(_letters_ipa)
+ return {symbol: i for i, symbol in enumerate(symbols)}
+
+# Initialize vocabulary
+VOCAB = get_vocab()
+
+def tokenize(phonemes: str) -> list[int]:
+ """Convert phonemes string to token IDs
+
+ Args:
+ phonemes: String of phonemes to tokenize
+
+ Returns:
+ List of token IDs
+ """
+ return [i for i in map(VOCAB.get, phonemes) if i is not None]
+
+def decode_tokens(tokens: list[int]) -> str:
+ """Convert token IDs back to phonemes string
+
+ Args:
+ tokens: List of token IDs
+
+ Returns:
+ String of phonemes
+ """
+ # Create reverse mapping
+ id_to_symbol = {i: s for s, i in VOCAB.items()}
+ return "".join(id_to_symbol[t] for t in tokens)
diff --git a/api/src/services/tts_base.py b/api/src/services/tts_base.py
index 1ef913d..f502373 100644
--- a/api/src/services/tts_base.py
+++ b/api/src/services/tts_base.py
@@ -1,15 +1,13 @@
import os
import threading
from abc import ABC, abstractmethod
+from typing import List, Tuple
import torch
import numpy as np
from loguru import logger
-from kokoro import tokenize, phonemize
-from typing import Union, List
from ..core.config import settings
-
class TTSBaseModel(ABC):
_instance = None
_lock = threading.Lock()
@@ -28,16 +26,18 @@ class TTSBaseModel(ABC):
# Test CUDA device
test_tensor = torch.zeros(1).cuda()
logger.info("CUDA test successful")
+ model_path = os.path.join(settings.model_dir, settings.pytorch_model_path)
cls._device = "cuda"
except Exception as e:
logger.error(f"CUDA test failed: {e}")
cls._device = "cpu"
else:
cls._device = "cpu"
+ model_path = os.path.join(settings.model_dir, settings.onnx_model_path)
logger.info(f"Initializing model on {cls._device}")
# Initialize model
- if not cls.initialize(settings.model_dir, settings.model_path):
+ if not cls.initialize(settings.model_dir, model_path=model_path):
raise RuntimeError(f"Failed to initialize {cls._device.upper()} model")
# Setup voices directory
@@ -65,13 +65,9 @@ class TTSBaseModel(ABC):
voice_path = os.path.join(cls.VOICES_DIR, "af.pt")
dummy_voicepack = torch.load(voice_path, map_location=cls._device, weights_only=True)
- if cls._device == "cuda":
- cls.generate(dummy_text, dummy_voicepack, "a", 1.0)
- else:
- ps = phonemize(dummy_text, "a")
- tokens = tokenize(ps)
- tokens = [0] + tokens + [0]
- cls.generate(tokens, dummy_voicepack, 1.0)
+ # Process text and generate audio
+ phonemes, tokens = cls.process_text(dummy_text, "a")
+ cls.generate_from_tokens(tokens, dummy_voicepack, 1.0)
logger.info("Model warm-up complete")
except Exception as e:
@@ -89,13 +85,43 @@ class TTSBaseModel(ABC):
@classmethod
@abstractmethod
- def generate(cls, input_data: Union[str, List[int]], voicepack: torch.Tensor, *args) -> np.ndarray:
- """Generate audio from input
+ def process_text(cls, text: str, language: str) -> Tuple[str, List[int]]:
+ """Process text into phonemes and tokens
Args:
- input_data: Either text string (GPU) or tokenized input (CPU)
+ text: Input text
+ language: Language code
+
+ Returns:
+ tuple[str, list[int]]: Phonemes and token IDs
+ """
+ pass
+
+ @classmethod
+ @abstractmethod
+ def generate_from_text(cls, text: str, voicepack: torch.Tensor, language: str, speed: float) -> Tuple[np.ndarray, str]:
+ """Generate audio from text
+
+ Args:
+ text: Input text
voicepack: Voice tensor
- *args: Additional args (lang+speed for GPU, speed for CPU)
+ language: Language code
+ speed: Speed factor
+
+ Returns:
+ tuple[np.ndarray, str]: Generated audio samples and phonemes
+ """
+ pass
+
+ @classmethod
+ @abstractmethod
+ def generate_from_tokens(cls, tokens: List[int], voicepack: torch.Tensor, speed: float) -> np.ndarray:
+ """Generate audio from tokens
+
+ Args:
+ tokens: Token IDs
+ voicepack: Voice tensor
+ speed: Speed factor
Returns:
np.ndarray: Generated audio samples
diff --git a/api/src/services/tts_cpu.py b/api/src/services/tts_cpu.py
index 74c1bca..6743b83 100644
--- a/api/src/services/tts_cpu.py
+++ b/api/src/services/tts_cpu.py
@@ -5,6 +5,8 @@ from onnxruntime import InferenceSession, SessionOptions, GraphOptimizationLevel
from loguru import logger
from .tts_base import TTSBaseModel
+from .text_processing import phonemize, tokenize
+from ..core.config import settings
class TTSCPUModel(TTSBaseModel):
_instance = None
@@ -15,22 +17,12 @@ class TTSCPUModel(TTSBaseModel):
"""Initialize ONNX model for CPU inference"""
if cls._onnx_session is None:
# Try loading ONNX model
- # First try the specified path if provided
- if model_path and model_path.endswith('.onnx'):
- onnx_path = os.path.join(model_dir, model_path)
- if os.path.exists(onnx_path):
- logger.info(f"Loading specified ONNX model from {onnx_path}")
- else:
- onnx_path = None
+ onnx_path = os.path.join(model_dir, settings.onnx_model_path)
+ if os.path.exists(onnx_path):
+ logger.info(f"Loading ONNX model from {onnx_path}")
else:
- # Look for any .onnx file in the directory as fallback
- onnx_files = [f for f in os.listdir(model_dir) if f.endswith('.onnx')]
- if onnx_files:
- onnx_path = os.path.join(model_dir, onnx_files[0])
- logger.info(f"Found ONNX model: {onnx_path}")
- else:
- logger.error(f"No ONNX model found in {model_dir}")
- return None
+ logger.error(f"ONNX model not found at {onnx_path}")
+ return None
if not onnx_path:
return None
@@ -62,13 +54,53 @@ class TTSCPUModel(TTSBaseModel):
return cls._onnx_session
@classmethod
- def generate(cls, input_data: list[int], voicepack: torch.Tensor, *args) -> np.ndarray:
- """Generate audio using ONNX model
+ def process_text(cls, text: str, language: str) -> tuple[str, list[int]]:
+ """Process text into phonemes and tokens
Args:
- input_data: list of token IDs
+ text: Input text
+ language: Language code
+
+ Returns:
+ tuple[str, list[int]]: Phonemes and token IDs
+ """
+ phonemes = phonemize(text, language)
+ tokens = tokenize(phonemes)
+ tokens = [0] + tokens + [0] # Add start/end tokens
+ return phonemes, tokens
+
+ @classmethod
+ def generate_from_text(cls, text: str, voicepack: torch.Tensor, language: str, speed: float) -> tuple[np.ndarray, str]:
+ """Generate audio from text
+
+ Args:
+ text: Input text
voicepack: Voice tensor
- *args: (speed,) tuple
+ language: Language code
+ speed: Speed factor
+
+ Returns:
+ tuple[np.ndarray, str]: Generated audio samples and phonemes
+ """
+ if cls._onnx_session is None:
+ raise RuntimeError("ONNX model not initialized")
+
+ # Process text
+ phonemes, tokens = cls.process_text(text, language)
+
+ # Generate audio
+ audio = cls.generate_from_tokens(tokens, voicepack, speed)
+
+ return audio, phonemes
+
+ @classmethod
+ def generate_from_tokens(cls, tokens: list[int], voicepack: torch.Tensor, speed: float) -> np.ndarray:
+ """Generate audio from tokens
+
+ Args:
+ tokens: Token IDs
+ voicepack: Voice tensor
+ speed: Speed factor
Returns:
np.ndarray: Generated audio samples
@@ -76,10 +108,9 @@ class TTSCPUModel(TTSBaseModel):
if cls._onnx_session is None:
raise RuntimeError("ONNX model not initialized")
- speed = args[0]
# Pre-allocate and prepare inputs
- tokens_input = np.array([input_data], dtype=np.int64)
- style_input = voicepack[len(input_data)-2].numpy() # Already has correct dimensions
+ tokens_input = np.array([tokens], dtype=np.int64)
+ style_input = voicepack[len(tokens)-2].numpy() # Already has correct dimensions
speed_input = np.full(1, speed, dtype=np.float32) # More efficient than ones * speed
# Run inference with optimized inputs
diff --git a/api/src/services/tts_gpu.py b/api/src/services/tts_gpu.py
index 5da5563..300d141 100644
--- a/api/src/services/tts_gpu.py
+++ b/api/src/services/tts_gpu.py
@@ -3,9 +3,47 @@ import numpy as np
import torch
from loguru import logger
from models import build_model
-from kokoro import generate
+from .text_processing import phonemize, tokenize
from .tts_base import TTSBaseModel
+from ..core.config import settings
+
+@torch.no_grad()
+def forward(model, tokens, ref_s, speed):
+ """Forward pass through the model"""
+ device = ref_s.device
+ tokens = torch.LongTensor([[0, *tokens, 0]]).to(device)
+ input_lengths = torch.LongTensor([tokens.shape[-1]]).to(device)
+ text_mask = length_to_mask(input_lengths).to(device)
+ bert_dur = model.bert(tokens, attention_mask=(~text_mask).int())
+ d_en = model.bert_encoder(bert_dur).transpose(-1, -2)
+ s = ref_s[:, 128:]
+ d = model.predictor.text_encoder(d_en, s, input_lengths, text_mask)
+ x, _ = model.predictor.lstm(d)
+ duration = model.predictor.duration_proj(x)
+ duration = torch.sigmoid(duration).sum(axis=-1) / speed
+ pred_dur = torch.round(duration).clamp(min=1).long()
+ pred_aln_trg = torch.zeros(input_lengths, pred_dur.sum().item())
+ c_frame = 0
+ for i in range(pred_aln_trg.size(0)):
+ pred_aln_trg[i, c_frame : c_frame + pred_dur[0, i].item()] = 1
+ c_frame += pred_dur[0, i].item()
+ en = d.transpose(-1, -2) @ pred_aln_trg.unsqueeze(0).to(device)
+ F0_pred, N_pred = model.predictor.F0Ntrain(en, s)
+ t_en = model.text_encoder(tokens, input_lengths, text_mask)
+ asr = t_en @ pred_aln_trg.unsqueeze(0).to(device)
+ return model.decoder(asr, F0_pred, N_pred, ref_s[:, :128]).squeeze().cpu().numpy()
+
+def length_to_mask(lengths):
+ """Create attention mask from lengths"""
+ mask = (
+ torch.arange(lengths.max())
+ .unsqueeze(0)
+ .expand(lengths.shape[0], -1)
+ .type_as(lengths)
+ )
+ mask = torch.gt(mask + 1, lengths.unsqueeze(1))
+ return mask
class TTSGPUModel(TTSBaseModel):
_instance = None
@@ -17,7 +55,7 @@ class TTSGPUModel(TTSBaseModel):
if cls._instance is None and torch.cuda.is_available():
try:
logger.info("Initializing GPU model")
- model_path = os.path.join(model_dir, model_path)
+ model_path = os.path.join(model_dir, settings.pytorch_model_path)
model = build_model(model_path, cls._device)
cls._instance = model
return cls._instance
@@ -27,13 +65,52 @@ class TTSGPUModel(TTSBaseModel):
return cls._instance
@classmethod
- def generate(cls, input_data: str, voicepack: torch.Tensor, *args) -> np.ndarray:
- """Generate audio using PyTorch model on GPU
+ def process_text(cls, text: str, language: str) -> tuple[str, list[int]]:
+ """Process text into phonemes and tokens
Args:
- input_data: Text string to generate audio from
+ text: Input text
+ language: Language code
+
+ Returns:
+ tuple[str, list[int]]: Phonemes and token IDs
+ """
+ phonemes = phonemize(text, language)
+ tokens = tokenize(phonemes)
+ return phonemes, tokens
+
+ @classmethod
+ def generate_from_text(cls, text: str, voicepack: torch.Tensor, language: str, speed: float) -> tuple[np.ndarray, str]:
+ """Generate audio from text
+
+ Args:
+ text: Input text
voicepack: Voice tensor
- *args: (lang, speed) tuple
+ language: Language code
+ speed: Speed factor
+
+ Returns:
+ tuple[np.ndarray, str]: Generated audio samples and phonemes
+ """
+ if cls._instance is None:
+ raise RuntimeError("GPU model not initialized")
+
+ # Process text
+ phonemes, tokens = cls.process_text(text, language)
+
+ # Generate audio
+ audio = cls.generate_from_tokens(tokens, voicepack, speed)
+
+ return audio, phonemes
+
+ @classmethod
+ def generate_from_tokens(cls, tokens: list[int], voicepack: torch.Tensor, speed: float) -> np.ndarray:
+ """Generate audio from tokens
+
+ Args:
+ tokens: Token IDs
+ voicepack: Voice tensor
+ speed: Speed factor
Returns:
np.ndarray: Generated audio samples
@@ -41,12 +118,10 @@ class TTSGPUModel(TTSBaseModel):
if cls._instance is None:
raise RuntimeError("GPU model not initialized")
- lang, speed = args
- result = generate(cls._instance, input_data, voicepack, lang=lang, speed=speed)
- # kokoro.generate returns (audio, metadata, info), we only want audio
- audio = result[0]
+ # Get reference style
+ ref_s = voicepack[len(tokens)]
- # Convert to numpy array if needed
- if isinstance(audio, torch.Tensor):
- audio = audio.cpu().numpy()
+ # Generate audio
+ audio = forward(cls._instance, tokens, ref_s, speed)
+
return audio
diff --git a/api/src/services/tts_service.py b/api/src/services/tts_service.py
index cddbc47..6d763fe 100644
--- a/api/src/services/tts_service.py
+++ b/api/src/services/tts_service.py
@@ -7,7 +7,7 @@ from typing import List, Tuple, Optional
import numpy as np
import torch
import scipy.io.wavfile as wavfile
-from kokoro import tokenize, phonemize, normalize_text
+from .text_processing import normalize_text
from loguru import logger
from ..core.config import settings
@@ -62,21 +62,10 @@ class TTSService:
# Process all chunks
for i, chunk in enumerate(chunks):
try:
- # Process chunk
- if TTSModel.get_device() == "cuda":
- # GPU takes (text, voicepack, lang, speed)
- try:
- chunk_audio = TTSModel.generate(chunk, voicepack, voice[0], speed)
- except RuntimeError as e:
- logger.error(f"Failed to generate audio: {str(e)}")
- chunk_audio = None
- else:
- # CPU takes (tokens, voicepack, speed)
- ps = phonemize(chunk, voice[0])
- tokens = tokenize(ps)
- tokens = [0] + list(tokens) + [0] # Add padding
- chunk_audio = TTSModel.generate(tokens, voicepack, speed)
-
+ # Process text and generate audio
+ phonemes, tokens = TTSModel.process_text(chunk, voice[0])
+ chunk_audio = TTSModel.generate_from_tokens(tokens, voicepack, speed)
+
if chunk_audio is not None:
audio_chunks.append(chunk_audio)
else:
@@ -98,19 +87,8 @@ class TTSService:
)
else:
# Process single chunk
- if TTSModel.get_device() == "cuda":
- # GPU takes (text, voicepack, lang, speed)
- try:
- audio = TTSModel.generate(text, voicepack, voice[0], speed)
- except RuntimeError as e:
- logger.error(f"Failed to generate audio: {str(e)}")
- raise ValueError("No audio chunks were generated successfully")
- else:
- # CPU takes (tokens, voicepack, speed)
- ps = phonemize(text, voice[0])
- tokens = tokenize(ps)
- tokens = [0] + list(tokens) + [0] # Add padding
- audio = TTSModel.generate(tokens, voicepack, speed)
+ phonemes, tokens = TTSModel.process_text(text, voice[0])
+ audio = TTSModel.generate_from_tokens(tokens, voicepack, speed)
processing_time = time.time() - start_time
return audio, processing_time
diff --git a/api/src/structures/text_schemas.py b/api/src/structures/text_schemas.py
new file mode 100644
index 0000000..5ae1b08
--- /dev/null
+++ b/api/src/structures/text_schemas.py
@@ -0,0 +1,9 @@
+from pydantic import BaseModel
+
+class PhonemeRequest(BaseModel):
+ text: str
+ language: str = "a" # Default to American English
+
+class PhonemeResponse(BaseModel):
+ phonemes: str
+ tokens: list[int]
diff --git a/api/tests/conftest.py b/api/tests/conftest.py
index 5803170..fba270b 100644
--- a/api/tests/conftest.py
+++ b/api/tests/conftest.py
@@ -21,8 +21,73 @@ def cleanup():
cleanup_mock_dirs()
-# Mock torch and other ML modules before they're imported
-sys.modules["torch"] = Mock()
+# Create mock torch module
+mock_torch = Mock()
+mock_torch.cuda = Mock()
+mock_torch.cuda.is_available = Mock(return_value=False)
+
+# Create a mock tensor class that supports basic operations
+class MockTensor:
+ def __init__(self, data):
+ self.data = data
+ if isinstance(data, (list, tuple)):
+ self.shape = [len(data)]
+ elif isinstance(data, MockTensor):
+ self.shape = data.shape
+ else:
+ self.shape = getattr(data, 'shape', [1])
+
+ def __getitem__(self, idx):
+ if isinstance(self.data, (list, tuple)):
+ if isinstance(idx, slice):
+ return MockTensor(self.data[idx])
+ return self.data[idx]
+ return self
+
+ def max(self):
+ if isinstance(self.data, (list, tuple)):
+ max_val = max(self.data)
+ return MockTensor(max_val)
+ return 5 # Default for testing
+
+ def item(self):
+ if isinstance(self.data, (list, tuple)):
+ return max(self.data)
+ if isinstance(self.data, (int, float)):
+ return self.data
+ return 5 # Default for testing
+
+ def cuda(self):
+ """Support cuda conversion"""
+ return self
+
+ def any(self):
+ if isinstance(self.data, (list, tuple)):
+ return any(self.data)
+ return False
+
+ def all(self):
+ if isinstance(self.data, (list, tuple)):
+ return all(self.data)
+ return True
+
+ def unsqueeze(self, dim):
+ return self
+
+ def expand(self, *args):
+ return self
+
+ def type_as(self, other):
+ return self
+
+# Add tensor operations to mock torch
+mock_torch.tensor = lambda x: MockTensor(x)
+mock_torch.zeros = lambda *args: MockTensor([0] * (args[0] if isinstance(args[0], int) else args[0][0]))
+mock_torch.arange = lambda x: MockTensor(list(range(x)))
+mock_torch.gt = lambda x, y: MockTensor([False] * x.shape[0])
+
+# Mock modules before they're imported
+sys.modules["torch"] = mock_torch
sys.modules["transformers"] = Mock()
sys.modules["phonemizer"] = Mock()
sys.modules["models"] = Mock()
@@ -31,14 +96,22 @@ sys.modules["kokoro"] = Mock()
sys.modules["kokoro.generate"] = Mock()
sys.modules["kokoro.phonemize"] = Mock()
sys.modules["kokoro.tokenize"] = Mock()
+sys.modules["onnxruntime"] = Mock()
@pytest.fixture(autouse=True)
def mock_tts_model():
- """Mock TTSModel to avoid loading real models during tests"""
- with patch("api.src.services.tts_model.TTSModel") as mock:
+ """Mock TTSModel and TTS model initialization"""
+ with patch("api.src.services.tts_model.TTSModel") as mock_tts_model, \
+ patch("api.src.services.tts_base.TTSBaseModel") as mock_base_model:
+
+ # Mock TTSModel
model_instance = Mock()
model_instance.get_instance.return_value = model_instance
model_instance.get_voicepack.return_value = None
- mock.get_instance.return_value = model_instance
+ mock_tts_model.get_instance.return_value = model_instance
+
+ # Mock TTS model initialization
+ mock_base_model.setup.return_value = 1 # Return dummy voice count
+
yield model_instance
diff --git a/api/tests/test_tts_implementations.py b/api/tests/test_tts_implementations.py
new file mode 100644
index 0000000..3f10c17
--- /dev/null
+++ b/api/tests/test_tts_implementations.py
@@ -0,0 +1,144 @@
+"""Tests for TTS model implementations"""
+import os
+import torch
+import pytest
+import numpy as np
+from unittest.mock import patch, MagicMock
+
+from api.src.services.tts_base import TTSBaseModel
+from api.src.services.tts_cpu import TTSCPUModel
+from api.src.services.tts_gpu import TTSGPUModel, length_to_mask
+
+# Base Model Tests
+def test_get_device_error():
+ """Test get_device() raises error when not initialized"""
+ TTSBaseModel._device = None
+ with pytest.raises(RuntimeError, match="Model not initialized"):
+ TTSBaseModel.get_device()
+
+@patch('torch.cuda.is_available')
+@patch('os.path.exists')
+@patch('os.path.join')
+@patch('os.listdir')
+@patch('torch.load')
+@patch('torch.save')
+def test_setup_cuda_available(mock_save, mock_load, mock_listdir, mock_join, mock_exists, mock_cuda_available):
+ """Test setup with CUDA available"""
+ TTSBaseModel._device = None
+ mock_cuda_available.return_value = True
+ mock_exists.return_value = True
+ mock_load.return_value = torch.zeros(1)
+ mock_listdir.return_value = ["voice1.pt", "voice2.pt"]
+ mock_join.return_value = "/mocked/path"
+
+ # Mock the abstract methods
+ TTSBaseModel.initialize = MagicMock(return_value=True)
+ TTSBaseModel.process_text = MagicMock(return_value=("dummy", [1,2,3]))
+ TTSBaseModel.generate_from_tokens = MagicMock(return_value=np.zeros(1000))
+
+ voice_count = TTSBaseModel.setup()
+ assert TTSBaseModel._device == "cuda"
+ assert voice_count == 2
+
+@patch('torch.cuda.is_available')
+@patch('os.path.exists')
+@patch('os.path.join')
+@patch('os.listdir')
+@patch('torch.load')
+@patch('torch.save')
+def test_setup_cuda_unavailable(mock_save, mock_load, mock_listdir, mock_join, mock_exists, mock_cuda_available):
+ """Test setup with CUDA unavailable"""
+ TTSBaseModel._device = None
+ mock_cuda_available.return_value = False
+ mock_exists.return_value = True
+ mock_load.return_value = torch.zeros(1)
+ mock_listdir.return_value = ["voice1.pt", "voice2.pt"]
+ mock_join.return_value = "/mocked/path"
+
+ # Mock the abstract methods
+ TTSBaseModel.initialize = MagicMock(return_value=True)
+ TTSBaseModel.process_text = MagicMock(return_value=("dummy", [1,2,3]))
+ TTSBaseModel.generate_from_tokens = MagicMock(return_value=np.zeros(1000))
+
+ voice_count = TTSBaseModel.setup()
+ assert TTSBaseModel._device == "cpu"
+ assert voice_count == 2
+
+# CPU Model Tests
+def test_cpu_initialize_missing_model():
+ """Test CPU initialize with missing model"""
+ with patch('os.path.exists', return_value=False):
+ result = TTSCPUModel.initialize("dummy_dir")
+ assert result is None
+
+def test_cpu_generate_uninitialized():
+ """Test CPU generate methods with uninitialized model"""
+ TTSCPUModel._onnx_session = None
+
+ with pytest.raises(RuntimeError, match="ONNX model not initialized"):
+ TTSCPUModel.generate_from_text("test", torch.zeros(1), "en", 1.0)
+
+ with pytest.raises(RuntimeError, match="ONNX model not initialized"):
+ TTSCPUModel.generate_from_tokens([1,2,3], torch.zeros(1), 1.0)
+
+def test_cpu_process_text():
+ """Test CPU process_text functionality"""
+ with patch('api.src.services.tts_cpu.phonemize') as mock_phonemize, \
+ patch('api.src.services.tts_cpu.tokenize') as mock_tokenize:
+
+ mock_phonemize.return_value = "test phonemes"
+ mock_tokenize.return_value = [1, 2, 3]
+
+ phonemes, tokens = TTSCPUModel.process_text("test", "en")
+ assert phonemes == "test phonemes"
+ assert tokens == [0, 1, 2, 3, 0] # Should add start/end tokens
+
+# GPU Model Tests
+@patch('torch.cuda.is_available')
+def test_gpu_initialize_cuda_unavailable(mock_cuda_available):
+ """Test GPU initialize with CUDA unavailable"""
+ mock_cuda_available.return_value = False
+ TTSGPUModel._instance = None
+
+ result = TTSGPUModel.initialize("dummy_dir", "dummy_path")
+ assert result is None
+
+@patch('api.src.services.tts_gpu.length_to_mask')
+def test_gpu_length_to_mask(mock_length_to_mask):
+ """Test length_to_mask function"""
+ # Setup mock return value
+ expected_mask = torch.tensor([
+ [False, False, False, True, True],
+ [False, False, False, False, False]
+ ])
+ mock_length_to_mask.return_value = expected_mask
+
+ # Call function with test input
+ lengths = torch.tensor([3, 5])
+ mask = mock_length_to_mask(lengths)
+
+ # Verify mock was called with correct input
+ mock_length_to_mask.assert_called_once()
+ assert torch.equal(mask, expected_mask)
+
+def test_gpu_generate_uninitialized():
+ """Test GPU generate methods with uninitialized model"""
+ TTSGPUModel._instance = None
+
+ with pytest.raises(RuntimeError, match="GPU model not initialized"):
+ TTSGPUModel.generate_from_text("test", torch.zeros(1), "en", 1.0)
+
+ with pytest.raises(RuntimeError, match="GPU model not initialized"):
+ TTSGPUModel.generate_from_tokens([1,2,3], torch.zeros(1), 1.0)
+
+def test_gpu_process_text():
+ """Test GPU process_text functionality"""
+ with patch('api.src.services.tts_gpu.phonemize') as mock_phonemize, \
+ patch('api.src.services.tts_gpu.tokenize') as mock_tokenize:
+
+ mock_phonemize.return_value = "test phonemes"
+ mock_tokenize.return_value = [1, 2, 3]
+
+ phonemes, tokens = TTSGPUModel.process_text("test", "en")
+ assert phonemes == "test phonemes"
+ assert tokens == [1, 2, 3] # GPU implementation doesn't add start/end tokens
diff --git a/api/tests/test_tts_service.py b/api/tests/test_tts_service.py
index 1286e91..4e63ff1 100644
--- a/api/tests/test_tts_service.py
+++ b/api/tests/test_tts_service.py
@@ -6,10 +6,13 @@ from unittest.mock import MagicMock, call, patch
import numpy as np
import torch
import pytest
+from onnxruntime import InferenceSession
from api.src.core.config import settings
from api.src.services.tts_model import TTSModel
from api.src.services.tts_service import TTSService
+from api.src.services.tts_cpu import TTSCPUModel
+from api.src.services.tts_gpu import TTSGPUModel
@pytest.fixture
@@ -70,291 +73,6 @@ def test_list_voices(mock_join, mock_listdir, tts_service):
assert "not_a_voice" not in voices
-@patch("api.src.services.tts_model.TTSModel.get_instance")
-@patch("api.src.services.tts_model.TTSModel.get_voicepack")
-@patch("kokoro.normalize_text")
-@patch("kokoro.phonemize")
-@patch("kokoro.tokenize")
-@patch("kokoro.generate")
-def test_generate_audio_empty_text(
- mock_generate,
- mock_tokenize,
- mock_phonemize,
- mock_normalize,
- mock_voicepack,
- mock_instance,
- tts_service,
-):
- """Test generating audio with empty text"""
- mock_normalize.return_value = ""
- mock_instance.return_value = (MagicMock(), "cpu")
-
- with pytest.raises(ValueError, match="Text is empty after preprocessing"):
- tts_service._generate_audio("", "af", 1.0)
-
-
-@patch("api.src.services.tts_model.TTSModel.get_instance")
-@patch("os.path.exists")
-@patch("kokoro.normalize_text")
-@patch("kokoro.phonemize")
-@patch("kokoro.tokenize")
-@patch("kokoro.generate")
-@patch("torch.load")
-def test_generate_audio_no_chunks(
- mock_torch_load,
- mock_generate,
- mock_tokenize,
- mock_phonemize,
- mock_normalize,
- mock_exists,
- mock_instance,
- tts_service,
-):
- """Test generating audio with no successful chunks"""
- mock_normalize.return_value = "Test text"
- mock_phonemize.return_value = "Test text"
- mock_tokenize.return_value = [1, 2] # Return integers instead of strings
- mock_generate.return_value = (None, None)
- mock_instance.return_value = (MagicMock(), "cpu")
- mock_exists.return_value = True
- mock_torch_load.return_value = MagicMock()
-
- with pytest.raises(ValueError, match="No audio chunks were generated successfully"):
- tts_service._generate_audio("Test text", "af", 1.0)
-
-
-@patch("torch.load")
-@patch("torch.save")
-@patch("torch.stack")
-@patch("torch.mean")
-@patch("os.path.exists")
-def test_combine_voices(
- mock_exists, mock_mean, mock_stack, mock_save, mock_load, tts_service
-):
- """Test combining multiple voices"""
- # Setup mocks
- mock_exists.return_value = True
- mock_load.return_value = torch.tensor([1.0, 2.0])
- mock_stack.return_value = torch.tensor([[1.0, 2.0], [3.0, 4.0]])
- mock_mean.return_value = torch.tensor([2.0, 3.0])
-
- # Test combining two voices
- result = tts_service.combine_voices(["voice1", "voice2"])
-
- assert result == "voice1_voice2"
- mock_stack.assert_called_once()
- mock_mean.assert_called_once()
- mock_save.assert_called_once()
-
-
-def test_combine_voices_invalid_input(tts_service):
- """Test combining voices with invalid input"""
- # Test with empty list
- with pytest.raises(ValueError, match="At least 2 voices are required"):
- tts_service.combine_voices([])
-
- # Test with single voice
- with pytest.raises(ValueError, match="At least 2 voices are required"):
- tts_service.combine_voices(["voice1"])
-
-
-
-@patch("api.src.services.tts_model.TTSModel.get_instance")
-@patch("api.src.services.tts_model.TTSModel.get_device")
-@patch("api.src.services.tts_model.TTSModel.generate")
-@patch("os.path.exists")
-@patch("kokoro.normalize_text")
-@patch("kokoro.phonemize")
-@patch("kokoro.tokenize")
-@patch("torch.load")
-def test_generate_audio_success(
- mock_torch_load,
- mock_tokenize,
- mock_phonemize,
- mock_normalize,
- mock_exists,
- mock_model_generate,
- mock_get_device,
- mock_instance,
- tts_service,
- sample_audio,
-):
- """Test successful audio generation"""
- mock_normalize.return_value = "Test text"
- mock_phonemize.return_value = "Test text"
- mock_tokenize.return_value = [1, 2] # Return integers instead of strings
- mock_model_generate.return_value = sample_audio
- mock_instance.return_value = (MagicMock(), "cpu")
- mock_get_device.return_value = "cpu"
- mock_exists.return_value = True
- mock_torch_load.return_value = MagicMock()
-
- # Initialize model
- TTSModel._instance = None
- TTSModel._device = "cpu"
-
- audio, processing_time = tts_service._generate_audio("Test text", "af", 1.0)
- assert isinstance(audio, np.ndarray)
- assert isinstance(processing_time, float)
- assert len(audio) > 0
-
-
-@patch("torch.cuda.is_available")
-@patch("api.src.services.tts_gpu.TTSGPUModel.initialize")
-@patch("os.makedirs")
-@patch("os.path.exists")
-@patch("os.listdir")
-@patch("torch.load")
-@patch("torch.save")
-@patch("api.src.core.config.settings")
-@patch("torch.zeros")
-def test_model_initialization_cuda(
- mock_zeros,
- mock_settings,
- mock_save,
- mock_load,
- mock_listdir,
- mock_exists,
- mock_makedirs,
- mock_initialize,
- mock_cuda_available,
-):
- """Test model initialization with CUDA"""
- # Setup mocks
- mock_cuda_available.return_value = True
- mock_initialize.return_value = True
- mock_exists.return_value = True
- mock_listdir.return_value = ["voice1.pt", "voice2.pt"]
- mock_load.return_value = torch.zeros(1)
- mock_settings.model_dir = "test_dir"
- mock_settings.model_path = "test_path"
- mock_settings.voices_dir = "voices"
- mock_zeros.return_value = torch.zeros(1)
-
- # Reset singleton and device
- TTSModel._instance = None
- TTSModel._device = None
-
- # Mock settings to prevent actual file operations
- with patch.object(settings, 'model_dir', 'test_dir'), \
- patch.object(settings, 'model_path', 'test_path'):
- voice_count = TTSModel.setup()
-
- assert TTSModel.get_device() == "cuda"
- assert voice_count == 2
- mock_initialize.assert_called_once_with("test_dir", "test_path")
-
-
-@patch("torch.cuda.is_available")
-@patch("api.src.services.tts_base.TTSBaseModel.initialize")
-@patch("os.makedirs")
-@patch("os.path.exists")
-@patch("os.listdir")
-@patch("torch.load")
-@patch("torch.save")
-@patch("api.src.core.config.settings")
-@patch("torch.zeros")
-def test_model_initialization_cpu(
- mock_zeros,
- mock_settings,
- mock_save,
- mock_load,
- mock_listdir,
- mock_exists,
- mock_makedirs,
- mock_initialize,
- mock_cuda_available,
-):
- """Test model initialization with CPU"""
- # Setup mocks
- mock_cuda_available.return_value = False
- mock_initialize.return_value = False # This will trigger the RuntimeError
- mock_exists.return_value = True
- mock_listdir.return_value = ["voice1.pt", "voice2.pt", "voice3.pt"]
- mock_load.return_value = torch.zeros(1)
- mock_settings.model_dir = "test_dir"
- mock_settings.model_path = "test_path"
- mock_settings.voices_dir = "voices"
- mock_zeros.return_value = torch.zeros(1)
-
- # Reset singleton and device
- TTSModel._instance = None
- TTSModel._device = None
-
- # Mock settings to prevent actual file operations
- with patch.object(settings, 'model_dir', 'test_dir'), \
- patch.object(settings, 'model_path', 'test_path'), \
- pytest.raises(RuntimeError, match="Failed to initialize CPU model"):
- TTSModel.setup()
-
- mock_initialize.assert_called_once_with("test_dir", "test_path")
-
-
-@patch("api.src.services.tts_service.TTSService._get_voice_path")
-@patch("api.src.services.tts_model.TTSModel.get_instance")
-def test_voicepack_loading_error(mock_get_instance, mock_get_voice_path):
- """Test voicepack loading error handling"""
- mock_get_voice_path.return_value = None
- mock_get_instance.return_value = (MagicMock(), "cpu")
-
- TTSModel._voicepacks = {} # Reset voicepacks
-
- service = TTSService()
- with pytest.raises(ValueError, match="Voice not found: nonexistent_voice"):
- service._generate_audio("test", "nonexistent_voice", 1.0)
-
-
-@patch("api.src.services.tts_model.TTSModel")
-def test_save_audio(mock_tts_model, tts_service, sample_audio, tmp_path):
- """Test saving audio to file"""
- output_dir = os.path.join(tmp_path, "test_output")
- os.makedirs(output_dir, exist_ok=True)
- output_path = os.path.join(output_dir, "audio.wav")
-
- tts_service._save_audio(sample_audio, output_path)
-
- assert os.path.exists(output_path)
- assert os.path.getsize(output_path) > 0
-
-
-@patch("api.src.services.tts_model.TTSModel.get_instance")
-@patch("api.src.services.tts_model.TTSModel.get_device")
-@patch("api.src.services.tts_model.TTSModel.generate")
-@patch("os.path.exists")
-@patch("kokoro.normalize_text")
-@patch("kokoro.phonemize")
-@patch("kokoro.tokenize")
-@patch("torch.load")
-def test_generate_audio_without_stitching(
- mock_torch_load,
- mock_tokenize,
- mock_phonemize,
- mock_normalize,
- mock_exists,
- mock_model_generate,
- mock_get_device,
- mock_instance,
- tts_service,
- sample_audio,
-):
- """Test generating audio without text stitching"""
- mock_normalize.return_value = "Test text"
- mock_phonemize.return_value = "Test text"
- mock_tokenize.return_value = [1, 2] # Return integers instead of strings
- mock_model_generate.return_value = sample_audio
- mock_instance.return_value = (MagicMock(), "cpu")
- mock_get_device.return_value = "cpu"
- mock_exists.return_value = True
- mock_torch_load.return_value = MagicMock()
-
- audio, processing_time = tts_service._generate_audio(
- "Test text", "af", 1.0, stitch_long_output=False
- )
- assert isinstance(audio, np.ndarray)
- assert len(audio) > 0
- mock_model_generate.assert_called_once()
-
-
@patch("os.listdir")
def test_list_voices_error(mock_listdir, tts_service):
"""Test error handling in list_voices"""
@@ -364,6 +82,48 @@ def test_list_voices_error(mock_listdir, tts_service):
assert voices == []
+def mock_model_setup(cuda_available=False):
+ """Helper function to mock model setup"""
+ # Reset model state
+ TTSModel._instance = None
+ TTSModel._device = None
+ TTSModel._voicepacks = {}
+
+ # Create mock model instance with proper generate method
+ mock_model = MagicMock()
+ mock_model.generate.return_value = np.zeros(24000, dtype=np.float32)
+ TTSModel._instance = mock_model
+
+ # Set device based on CUDA availability
+ TTSModel._device = "cuda" if cuda_available else "cpu"
+
+ return 3 # Return voice count (including af.pt)
+
+
+def test_model_initialization_cuda():
+ """Test model initialization with CUDA"""
+ # Simulate CUDA availability
+ voice_count = mock_model_setup(cuda_available=True)
+
+ assert TTSModel.get_device() == "cuda"
+ assert voice_count == 3 # voice1.pt, voice2.pt, af.pt
+
+
+def test_model_initialization_cpu():
+ """Test model initialization with CPU"""
+ # Simulate no CUDA availability
+ voice_count = mock_model_setup(cuda_available=False)
+
+ assert TTSModel.get_device() == "cpu"
+ assert voice_count == 3 # voice1.pt, voice2.pt, af.pt
+
+
+def test_generate_audio_empty_text(tts_service):
+ """Test generating audio with empty text"""
+ with pytest.raises(ValueError, match="Text is empty after preprocessing"):
+ tts_service._generate_audio("", "af", 1.0)
+
+
@patch("api.src.services.tts_model.TTSModel.get_instance")
@patch("api.src.services.tts_model.TTSModel.get_device")
@patch("os.path.exists")
@@ -386,16 +146,12 @@ def test_generate_audio_phonemize_error(
"""Test handling phonemization error"""
mock_normalize.return_value = "Test text"
mock_phonemize.side_effect = Exception("Phonemization failed")
- mock_instance.return_value = (MagicMock(), "cpu")
+ mock_instance.return_value = (mock_generate, "cpu") # Use the same mock for consistency
mock_get_device.return_value = "cpu"
mock_exists.return_value = True
- mock_torch_load.return_value = MagicMock()
+ mock_torch_load.return_value = torch.zeros((10, 24000))
mock_generate.return_value = (None, None)
- # Initialize model
- TTSModel._instance = None
- TTSModel._device = "cpu"
-
with pytest.raises(ValueError, match="No audio chunks were generated successfully"):
tts_service._generate_audio("Test text", "af", 1.0)
@@ -424,14 +180,60 @@ def test_generate_audio_error(
mock_phonemize.return_value = "Test text"
mock_tokenize.return_value = [1, 2] # Return integers instead of strings
mock_generate.side_effect = Exception("Generation failed")
- mock_instance.return_value = (MagicMock(), "cpu")
+ mock_instance.return_value = (mock_generate, "cpu") # Use the same mock for consistency
mock_get_device.return_value = "cpu"
mock_exists.return_value = True
- mock_torch_load.return_value = MagicMock()
-
- # Initialize model
- TTSModel._instance = None
- TTSModel._device = "cpu"
+ mock_torch_load.return_value = torch.zeros((10, 24000))
with pytest.raises(ValueError, match="No audio chunks were generated successfully"):
tts_service._generate_audio("Test text", "af", 1.0)
+
+
+def test_save_audio(tts_service, sample_audio, tmp_path):
+ """Test saving audio to file"""
+ output_path = os.path.join(tmp_path, "test_output.wav")
+ tts_service._save_audio(sample_audio, output_path)
+ assert os.path.exists(output_path)
+ assert os.path.getsize(output_path) > 0
+
+
+def test_combine_voices(tts_service):
+ """Test combining multiple voices"""
+ # Setup mocks for torch operations
+ with patch('torch.load', return_value=torch.tensor([1.0, 2.0])), \
+ patch('torch.stack', return_value=torch.tensor([[1.0, 2.0], [3.0, 4.0]])), \
+ patch('torch.mean', return_value=torch.tensor([2.0, 3.0])), \
+ patch('torch.save'), \
+ patch('os.path.exists', return_value=True):
+
+ # Test combining two voices
+ result = tts_service.combine_voices(["voice1", "voice2"])
+
+ assert result == "voice1_voice2"
+
+
+def test_combine_voices_invalid_input(tts_service):
+ """Test combining voices with invalid input"""
+ # Test with empty list
+ with pytest.raises(ValueError, match="At least 2 voices are required"):
+ tts_service.combine_voices([])
+
+ # Test with single voice
+ with pytest.raises(ValueError, match="At least 2 voices are required"):
+ tts_service.combine_voices(["voice1"])
+
+
+@patch("api.src.services.tts_service.TTSService._get_voice_path")
+@patch("api.src.services.tts_model.TTSModel.get_instance")
+def test_voicepack_loading_error(mock_get_instance, mock_get_voice_path):
+ """Test voicepack loading error handling"""
+ mock_get_voice_path.return_value = None
+ mock_instance = MagicMock()
+ mock_instance.generate.return_value = np.zeros(24000, dtype=np.float32)
+ mock_get_instance.return_value = (mock_instance, "cpu")
+
+ TTSModel._voicepacks = {} # Reset voicepacks
+
+ service = TTSService()
+ with pytest.raises(ValueError, match="Voice not found: nonexistent_voice"):
+ service._generate_audio("test", "nonexistent_voice", 1.0)
diff --git a/docker-compose.cpu.yml b/docker-compose.cpu.yml
index faea2fe..40ca8f8 100644
--- a/docker-compose.cpu.yml
+++ b/docker-compose.cpu.yml
@@ -40,14 +40,14 @@ services:
model-fetcher:
condition: service_healthy
- # Gradio UI service [Comment out everything below if you don't need it]
- gradio-ui:
- build:
- context: ./ui
- ports:
- - "7860:7860"
- volumes:
- - ./ui/data:/app/ui/data
- - ./ui/app.py:/app/app.py # Mount app.py for hot reload
- environment:
- - GRADIO_WATCH=True # Enable hot reloading
+ # # Gradio UI service [Comment out everything below if you don't need it]
+ # gradio-ui:
+ # build:
+ # context: ./ui
+ # ports:
+ # - "7860:7860"
+ # volumes:
+ # - ./ui/data:/app/ui/data
+ # - ./ui/app.py:/app/app.py # Mount app.py for hot reload
+ # environment:
+ # - GRADIO_WATCH=True # Enable hot reloading
diff --git a/docker-compose.yml b/docker-compose.yml
index 2e7a86f..7308745 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -46,14 +46,14 @@ services:
model-fetcher:
condition: service_healthy
- # Gradio UI service [Comment out everything below if you don't need it]
- gradio-ui:
- build:
- context: ./ui
- ports:
- - "7860:7860"
- volumes:
- - ./ui/data:/app/ui/data
- - ./ui/app.py:/app/app.py # Mount app.py for hot reload
- environment:
- - GRADIO_WATCH=True # Enable hot reloading
+ # # Gradio UI service [Comment out everything below if you don't need it]
+ # gradio-ui:
+ # build:
+ # context: ./ui
+ # ports:
+ # - "7860:7860"
+ # volumes:
+ # - ./ui/data:/app/ui/data
+ # - ./ui/app.py:/app/app.py # Mount app.py for hot reload
+ # environment:
+ # - GRADIO_WATCH=True # Enable hot reloading
diff --git a/examples/__init__.py b/examples/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/examples/assorted_checks/benchmarks/__init__.py b/examples/assorted_checks/benchmarks/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/examples/assorted_checks/benchmarks/benchmark_tts_rtf.py b/examples/assorted_checks/benchmarks/benchmark_tts_rtf.py
new file mode 100644
index 0000000..ad206bf
--- /dev/null
+++ b/examples/assorted_checks/benchmarks/benchmark_tts_rtf.py
@@ -0,0 +1,241 @@
+#!/usr/bin/env python3
+import os
+import json
+import time
+import threading
+import queue
+import pandas as pd
+import sys
+from datetime import datetime
+
+from lib.shared_plotting import plot_system_metrics, plot_correlation
+from lib.shared_utils import (
+ get_system_metrics, save_json_results, write_benchmark_stats,
+ real_time_factor
+)
+from lib.shared_benchmark_utils import (
+ get_text_for_tokens, make_tts_request, generate_token_sizes, enc
+)
+
+class SystemMonitor:
+ def __init__(self, interval=1.0):
+ self.interval = interval
+ self.metrics_queue = queue.Queue()
+ self.stop_event = threading.Event()
+ self.metrics_timeline = []
+ self.start_time = None
+
+ def _monitor_loop(self):
+ """Background thread function to collect system metrics."""
+ while not self.stop_event.is_set():
+ metrics = get_system_metrics()
+ metrics["relative_time"] = time.time() - self.start_time
+ self.metrics_queue.put(metrics)
+ time.sleep(self.interval)
+
+ def start(self):
+ """Start the monitoring thread."""
+ self.start_time = time.time()
+ self.monitor_thread = threading.Thread(target=self._monitor_loop)
+ self.monitor_thread.daemon = True
+ self.monitor_thread.start()
+
+ def stop(self):
+ """Stop the monitoring thread and collect final metrics."""
+ self.stop_event.set()
+ if hasattr(self, 'monitor_thread'):
+ self.monitor_thread.join(timeout=2)
+
+ # Collect all metrics from queue
+ while True:
+ try:
+ metrics = self.metrics_queue.get_nowait()
+ self.metrics_timeline.append(metrics)
+ except queue.Empty:
+ break
+
+ return self.metrics_timeline
+
+def main():
+ # Initialize system monitor
+ monitor = SystemMonitor(interval=1.0) # 1 second interval
+ # Set prefix for output files (e.g. "gpu", "cpu", "onnx", etc.)
+ prefix = "gpu"
+ # Generate token sizes
+ if 'gpu' in prefix:
+ token_sizes = generate_token_sizes(
+ max_tokens=3000, dense_step=150,
+ dense_max=1000, sparse_step=1000)
+ elif 'cpu' in prefix:
+ token_sizes = generate_token_sizes(
+ max_tokens=1000, dense_step=150,
+ dense_max=800, sparse_step=0)
+ else:
+ token_sizes = generate_token_sizes(max_tokens=3000)
+
+ # Set up paths relative to this file
+ script_dir = os.path.dirname(os.path.abspath(__file__))
+ output_dir = os.path.join(script_dir, "output_audio")
+ output_data_dir = os.path.join(script_dir, "output_data")
+ output_plots_dir = os.path.join(script_dir, "output_plots")
+
+ # Create output directories
+ os.makedirs(output_dir, exist_ok=True)
+ os.makedirs(output_data_dir, exist_ok=True)
+ os.makedirs(output_plots_dir, exist_ok=True)
+
+ # Function to prefix filenames
+ def prefix_path(path: str, filename: str) -> str:
+ if prefix:
+ filename = f"{prefix}_{filename}"
+ return os.path.join(path, filename)
+
+ with open(os.path.join(script_dir, "the_time_machine_hg_wells.txt"), "r", encoding="utf-8") as f:
+ text = f.read()
+
+ total_tokens = len(enc.encode(text))
+ print(f"Total tokens in file: {total_tokens}")
+
+ print(f"Testing sizes: {token_sizes}")
+
+ results = []
+ test_start_time = time.time()
+
+ # Start system monitoring
+ monitor.start()
+
+ for num_tokens in token_sizes:
+ chunk = get_text_for_tokens(text, num_tokens)
+ actual_tokens = len(enc.encode(chunk))
+
+ print(f"\nProcessing chunk with {actual_tokens} tokens:")
+ print(f"Text preview: {chunk[:100]}...")
+
+ processing_time, audio_length = make_tts_request(
+ chunk,
+ output_dir=output_dir,
+ prefix=prefix
+ )
+ if processing_time is None or audio_length is None:
+ print("Breaking loop due to error")
+ break
+
+ # Calculate RTF using the correct formula
+ rtf = real_time_factor(processing_time, audio_length)
+
+ results.append({
+ "tokens": actual_tokens,
+ "processing_time": processing_time,
+ "output_length": audio_length,
+ "rtf": rtf,
+ "elapsed_time": round(time.time() - test_start_time, 2),
+ })
+
+ df = pd.DataFrame(results)
+ if df.empty:
+ print("No data to plot")
+ return
+
+ df["tokens_per_second"] = df["tokens"] / df["processing_time"]
+
+ # Write benchmark stats
+ stats = [
+ {
+ "title": "Benchmark Statistics (with correct RTF)",
+ "stats": {
+ "Total tokens processed": df['tokens'].sum(),
+ "Total audio generated (s)": df['output_length'].sum(),
+ "Total test duration (s)": df['elapsed_time'].max(),
+ "Average processing rate (tokens/s)": df['tokens_per_second'].mean(),
+ "Average RTF": df['rtf'].mean(),
+ "Average Real Time Speed": 1/df['rtf'].mean()
+ }
+ },
+ {
+ "title": "Per-chunk Stats",
+ "stats": {
+ "Average chunk size (tokens)": df['tokens'].mean(),
+ "Min chunk size (tokens)": df['tokens'].min(),
+ "Max chunk size (tokens)": df['tokens'].max(),
+ "Average processing time (s)": df['processing_time'].mean(),
+ "Average output length (s)": df['output_length'].mean()
+ }
+ },
+ {
+ "title": "Performance Ranges",
+ "stats": {
+ "Processing rate range (tokens/s)": f"{df['tokens_per_second'].min():.2f} - {df['tokens_per_second'].max():.2f}",
+ "RTF range": f"{df['rtf'].min():.2f}x - {df['rtf'].max():.2f}x",
+ "Real Time Speed range": f"{1/df['rtf'].max():.2f}x - {1/df['rtf'].min():.2f}x"
+ }
+ }
+ ]
+ write_benchmark_stats(stats, prefix_path(output_data_dir, "benchmark_stats_rtf.txt"))
+
+ # Plot Processing Time vs Token Count
+ plot_correlation(
+ df, "tokens", "processing_time",
+ "Processing Time vs Input Size",
+ "Number of Input Tokens",
+ "Processing Time (seconds)",
+ prefix_path(output_plots_dir, "processing_time_rtf.png")
+ )
+
+ # Plot RTF vs Token Count
+ plot_correlation(
+ df, "tokens", "rtf",
+ "Real-Time Factor vs Input Size",
+ "Number of Input Tokens",
+ "Real-Time Factor (processing time / audio length)",
+ prefix_path(output_plots_dir, "realtime_factor_rtf.png")
+ )
+
+ # Stop monitoring and get final metrics
+ final_metrics = monitor.stop()
+
+ # Convert metrics timeline to DataFrame for stats
+ metrics_df = pd.DataFrame(final_metrics)
+
+ # Add system usage stats
+ if not metrics_df.empty:
+ stats.append({
+ "title": "System Usage Statistics",
+ "stats": {
+ "Peak CPU Usage (%)": metrics_df['cpu_percent'].max(),
+ "Avg CPU Usage (%)": metrics_df['cpu_percent'].mean(),
+ "Peak RAM Usage (%)": metrics_df['ram_percent'].max(),
+ "Avg RAM Usage (%)": metrics_df['ram_percent'].mean(),
+ "Peak RAM Used (GB)": metrics_df['ram_used_gb'].max(),
+ "Avg RAM Used (GB)": metrics_df['ram_used_gb'].mean(),
+ }
+ })
+ if 'gpu_memory_used' in metrics_df:
+ stats[-1]["stats"].update({
+ "Peak GPU Memory (MB)": metrics_df['gpu_memory_used'].max(),
+ "Avg GPU Memory (MB)": metrics_df['gpu_memory_used'].mean(),
+ })
+
+ # Plot system metrics
+ plot_system_metrics(final_metrics, prefix_path(output_plots_dir, "system_usage_rtf.png"))
+
+ # Save final results
+ save_json_results(
+ {
+ "results": results,
+ "system_metrics": final_metrics,
+ "test_duration": time.time() - test_start_time
+ },
+ prefix_path(output_data_dir, "benchmark_results_rtf.json")
+ )
+
+ print("\nResults saved to:")
+ print(f"- {prefix_path(output_data_dir, 'benchmark_results_rtf.json')}")
+ print(f"- {prefix_path(output_data_dir, 'benchmark_stats_rtf.txt')}")
+ print(f"- {prefix_path(output_plots_dir, 'processing_time_rtf.png')}")
+ print(f"- {prefix_path(output_plots_dir, 'realtime_factor_rtf.png')}")
+ print(f"- {prefix_path(output_plots_dir, 'system_usage_rtf.png')}")
+ print(f"\nAudio files saved in {output_dir} with prefix: {prefix or '(none)'}")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/assorted_checks/benchmarks/depr_benchmark_tts.py b/examples/assorted_checks/benchmarks/depr_benchmark_tts.py
new file mode 100644
index 0000000..989c177
--- /dev/null
+++ b/examples/assorted_checks/benchmarks/depr_benchmark_tts.py
@@ -0,0 +1,165 @@
+import os
+import json
+import time
+import pandas as pd
+from examples.assorted_checks.lib.shared_plotting import plot_system_metrics, plot_correlation
+from examples.assorted_checks.lib.shared_utils import (
+ get_system_metrics, save_json_results, write_benchmark_stats
+)
+from examples.assorted_checks.lib.shared_benchmark_utils import (
+ get_text_for_tokens, make_tts_request, generate_token_sizes, enc
+)
+
+
+def main():
+ # Get optional prefix from first command line argument
+ import sys
+ prefix = sys.argv[1] if len(sys.argv) > 1 else ""
+
+ # Set up paths relative to this file
+ script_dir = os.path.dirname(os.path.abspath(__file__))
+ output_dir = os.path.join(script_dir, "output_audio")
+ output_data_dir = os.path.join(script_dir, "output_data")
+ output_plots_dir = os.path.join(script_dir, "output_plots")
+
+ # Create output directories
+ os.makedirs(output_dir, exist_ok=True)
+ os.makedirs(output_data_dir, exist_ok=True)
+ os.makedirs(output_plots_dir, exist_ok=True)
+
+ # Function to prefix filenames
+ def prefix_path(path: str, filename: str) -> str:
+ if prefix:
+ filename = f"{prefix}_{filename}"
+ return os.path.join(path, filename)
+
+ # Read input text
+ with open(
+ os.path.join(script_dir, "the_time_machine_hg_wells.txt"), "r", encoding="utf-8"
+ ) as f:
+ text = f.read()
+
+ # Get total tokens in file
+ total_tokens = len(enc.encode(text))
+ print(f"Total tokens in file: {total_tokens}")
+
+
+ token_sizes = generate_token_sizes(total_tokens)
+
+ print(f"Testing sizes: {token_sizes}")
+
+ # Process chunks
+ results = []
+ system_metrics = []
+ test_start_time = time.time()
+
+ for num_tokens in token_sizes:
+ # Get text slice with exact token count
+ chunk = get_text_for_tokens(text, num_tokens)
+ actual_tokens = len(enc.encode(chunk))
+
+ print(f"\nProcessing chunk with {actual_tokens} tokens:")
+ print(f"Text preview: {chunk[:100]}...")
+
+ # Collect system metrics before processing
+ system_metrics.append(get_system_metrics())
+
+ processing_time, audio_length = make_tts_request(chunk)
+ if processing_time is None or audio_length is None:
+ print("Breaking loop due to error")
+ break
+
+ # Collect system metrics after processing
+ system_metrics.append(get_system_metrics())
+
+ results.append(
+ {
+ "tokens": actual_tokens,
+ "processing_time": processing_time,
+ "output_length": audio_length,
+ "realtime_factor": audio_length / processing_time,
+ "elapsed_time": time.time() - test_start_time,
+ }
+ )
+
+ # Save intermediate results
+ save_json_results(
+ {"results": results, "system_metrics": system_metrics},
+ prefix_path(output_data_dir, "benchmark_results.json")
+ )
+
+ # Create DataFrame and calculate stats
+ df = pd.DataFrame(results)
+ if df.empty:
+ print("No data to plot")
+ return
+
+ # Calculate useful metrics
+ df["tokens_per_second"] = df["tokens"] / df["processing_time"]
+
+ # Write benchmark stats
+ stats = [
+ {
+ "title": "Benchmark Statistics",
+ "stats": {
+ "Total tokens processed": df['tokens'].sum(),
+ "Total audio generated (s)": df['output_length'].sum(),
+ "Total test duration (s)": df['elapsed_time'].max(),
+ "Average processing rate (tokens/s)": df['tokens_per_second'].mean(),
+ "Average realtime factor": df['realtime_factor'].mean()
+ }
+ },
+ {
+ "title": "Per-chunk Stats",
+ "stats": {
+ "Average chunk size (tokens)": df['tokens'].mean(),
+ "Min chunk size (tokens)": df['tokens'].min(),
+ "Max chunk size (tokens)": df['tokens'].max(),
+ "Average processing time (s)": df['processing_time'].mean(),
+ "Average output length (s)": df['output_length'].mean()
+ }
+ },
+ {
+ "title": "Performance Ranges",
+ "stats": {
+ "Processing rate range (tokens/s)": f"{df['tokens_per_second'].min():.2f} - {df['tokens_per_second'].max():.2f}",
+ "Realtime factor range": f"{df['realtime_factor'].min():.2f}x - {df['realtime_factor'].max():.2f}x"
+ }
+ }
+ ]
+ write_benchmark_stats(stats, prefix_path(output_data_dir, "benchmark_stats.txt"))
+
+ # Plot Processing Time vs Token Count
+ plot_correlation(
+ df, "tokens", "processing_time",
+ "Processing Time vs Input Size",
+ "Number of Input Tokens",
+ "Processing Time (seconds)",
+ prefix_path(output_plots_dir, "processing_time.png")
+ )
+
+ # Plot Realtime Factor vs Token Count
+ plot_correlation(
+ df, "tokens", "realtime_factor",
+ "Realtime Factor vs Input Size",
+ "Number of Input Tokens",
+ "Realtime Factor (output length / processing time)",
+ prefix_path(output_plots_dir, "realtime_factor.png")
+ )
+
+ # Plot system metrics
+ plot_system_metrics(system_metrics, prefix_path(output_plots_dir, "system_usage.png"))
+
+ print("\nResults saved to:")
+ print(f"- {prefix_path(output_data_dir, 'benchmark_results.json')}")
+ print(f"- {prefix_path(output_data_dir, 'benchmark_stats.txt')}")
+ print(f"- {prefix_path(output_plots_dir, 'processing_time.png')}")
+ print(f"- {prefix_path(output_plots_dir, 'realtime_factor.png')}")
+ print(f"- {prefix_path(output_plots_dir, 'system_usage.png')}")
+ if any("gpu_memory_used" in m for m in system_metrics):
+ print(f"- {prefix_path(output_plots_dir, 'gpu_usage.png')}")
+ print(f"\nAudio files saved in {output_dir} with prefix: {prefix or '(none)'}")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/assorted_checks/benchmarks/lib/__init__.py b/examples/assorted_checks/benchmarks/lib/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/examples/assorted_checks/benchmarks/lib/shared_benchmark_utils.py b/examples/assorted_checks/benchmarks/lib/shared_benchmark_utils.py
new file mode 100644
index 0000000..c2fd1c4
--- /dev/null
+++ b/examples/assorted_checks/benchmarks/lib/shared_benchmark_utils.py
@@ -0,0 +1,111 @@
+"""Shared utilities specific to TTS benchmarking."""
+import time
+from typing import List, Optional, Tuple
+
+import requests
+import tiktoken
+
+from .shared_utils import get_audio_length, save_audio_file
+
+# Global tokenizer instance
+enc = tiktoken.get_encoding("cl100k_base")
+
+
+def get_text_for_tokens(text: str, num_tokens: int) -> str:
+ """Get a slice of text that contains exactly num_tokens tokens.
+
+ Args:
+ text: Input text to slice
+ num_tokens: Desired number of tokens
+
+ Returns:
+ str: Text slice containing exactly num_tokens tokens
+ """
+ tokens = enc.encode(text)
+ if num_tokens > len(tokens):
+ return text
+ return enc.decode(tokens[:num_tokens])
+
+
+def make_tts_request(
+ text: str,
+ output_dir: str = None,
+ timeout: int = 1800,
+ prefix: str = ""
+) -> Tuple[Optional[float], Optional[float]]:
+ """Make TTS request using OpenAI-compatible endpoint.
+
+ Args:
+ text: Input text to convert to speech
+ output_dir: Directory to save audio files. If None, audio won't be saved.
+ timeout: Request timeout in seconds
+ prefix: Optional prefix for output filenames
+
+ Returns:
+ tuple: (processing_time, audio_length) in seconds, or (None, None) on error
+ """
+ try:
+ start_time = time.time()
+ response = requests.post(
+ "http://localhost:8880/v1/audio/speech",
+ json={
+ "model": "kokoro",
+ "input": text,
+ "voice": "af",
+ "response_format": "wav",
+ },
+ timeout=timeout,
+ )
+ response.raise_for_status()
+
+ processing_time = round(time.time() - start_time, 2)
+ # Calculate audio length from response content
+ audio_length = get_audio_length(response.content)
+
+ # Save the audio file if output_dir is provided
+ if output_dir:
+ token_count = len(enc.encode(text))
+ output_file = save_audio_file(
+ response.content,
+ f"chunk_{token_count}_tokens",
+ output_dir
+ )
+ print(f"Saved audio to {output_file}")
+
+ return processing_time, audio_length
+
+ except requests.exceptions.RequestException as e:
+ print(f"Error making request for text: {text[:50]}... Error: {str(e)}")
+ return None, None
+ except Exception as e:
+ print(f"Error processing text: {text[:50]}... Error: {str(e)}")
+ return None, None
+
+
+def generate_token_sizes(
+ max_tokens: int,
+ dense_step: int = 100,
+ dense_max: int = 1000,
+ sparse_step: int = 1000
+) -> List[int]:
+ """Generate token size ranges with dense sampling at start.
+
+ Args:
+ max_tokens: Maximum number of tokens to generate sizes up to
+ dense_step: Step size for dense sampling range
+ dense_max: Maximum value for dense sampling
+ sparse_step: Step size for sparse sampling range
+
+ Returns:
+ list: Sorted list of token sizes
+ """
+ # Dense sampling at start
+ dense_range = list(range(dense_step, dense_max + 1, dense_step))
+
+ if max_tokens <= dense_max or sparse_step < dense_max:
+ return sorted(dense_range)
+ # Sparse sampling for larger sizes
+ sparse_range = list(range(dense_max + sparse_step, max_tokens + 1, sparse_step))
+
+ # Combine and deduplicate
+ return sorted(list(set(dense_range + sparse_range)))
diff --git a/examples/assorted_checks/benchmarks/lib/shared_plotting.py b/examples/assorted_checks/benchmarks/lib/shared_plotting.py
new file mode 100644
index 0000000..b1679df
--- /dev/null
+++ b/examples/assorted_checks/benchmarks/lib/shared_plotting.py
@@ -0,0 +1,176 @@
+"""Shared plotting utilities for benchmarks and tests."""
+import pandas as pd
+import seaborn as sns
+import matplotlib.pyplot as plt
+
+# Common style configurations
+STYLE_CONFIG = {
+ "background_color": "#1a1a2e",
+ "primary_color": "#ff2a6d",
+ "secondary_color": "#05d9e8",
+ "grid_color": "#ffffff",
+ "text_color": "#ffffff",
+ "font_sizes": {
+ "title": 16,
+ "label": 14,
+ "tick": 12,
+ "text": 10
+ }
+}
+
+def setup_plot(fig, ax, title, xlabel=None, ylabel=None):
+ """Configure plot styling with consistent theme.
+
+ Args:
+ fig: matplotlib figure object
+ ax: matplotlib axis object
+ title: str, plot title
+ xlabel: str, optional x-axis label
+ ylabel: str, optional y-axis label
+
+ Returns:
+ tuple: (fig, ax) with applied styling
+ """
+ # Grid styling
+ ax.grid(True, linestyle="--", alpha=0.3, color=STYLE_CONFIG["grid_color"])
+
+ # Title and labels
+ ax.set_title(title, pad=20,
+ fontsize=STYLE_CONFIG["font_sizes"]["title"],
+ fontweight="bold",
+ color=STYLE_CONFIG["text_color"])
+
+ if xlabel:
+ ax.set_xlabel(xlabel,
+ fontsize=STYLE_CONFIG["font_sizes"]["label"],
+ fontweight="medium",
+ color=STYLE_CONFIG["text_color"])
+ if ylabel:
+ ax.set_ylabel(ylabel,
+ fontsize=STYLE_CONFIG["font_sizes"]["label"],
+ fontweight="medium",
+ color=STYLE_CONFIG["text_color"])
+
+ # Tick styling
+ ax.tick_params(labelsize=STYLE_CONFIG["font_sizes"]["tick"],
+ colors=STYLE_CONFIG["text_color"])
+
+ # Spine styling
+ for spine in ax.spines.values():
+ spine.set_color(STYLE_CONFIG["text_color"])
+ spine.set_alpha(0.3)
+ spine.set_linewidth(0.5)
+
+ # Background colors
+ ax.set_facecolor(STYLE_CONFIG["background_color"])
+ fig.patch.set_facecolor(STYLE_CONFIG["background_color"])
+
+ return fig, ax
+
+def plot_system_metrics(metrics_data, output_path):
+ """Create plots for system metrics over time.
+
+ Args:
+ metrics_data: list of dicts containing system metrics
+ output_path: str, path to save the output plot
+ """
+ df = pd.DataFrame(metrics_data)
+ df["timestamp"] = pd.to_datetime(df["timestamp"])
+ elapsed_time = (df["timestamp"] - df["timestamp"].iloc[0]).dt.total_seconds()
+
+ # Get baseline values
+ baseline_cpu = df["cpu_percent"].iloc[0]
+ baseline_ram = df["ram_used_gb"].iloc[0]
+ baseline_gpu = df["gpu_memory_used"].iloc[0] / 1024 if "gpu_memory_used" in df.columns else None
+
+ # Convert GPU memory to GB if present
+ if "gpu_memory_used" in df.columns:
+ df["gpu_memory_gb"] = df["gpu_memory_used"] / 1024
+
+ plt.style.use("dark_background")
+
+ # Create subplots based on available metrics
+ has_gpu = "gpu_memory_used" in df.columns
+ num_plots = 3 if has_gpu else 2
+ fig, axes = plt.subplots(num_plots, 1, figsize=(15, 5 * num_plots))
+ fig.patch.set_facecolor(STYLE_CONFIG["background_color"])
+
+ # Smoothing window
+ window = min(5, len(df) // 2)
+
+ # Plot CPU Usage
+ smoothed_cpu = df["cpu_percent"].rolling(window=window, center=True).mean()
+ sns.lineplot(x=elapsed_time, y=smoothed_cpu, ax=axes[0],
+ color=STYLE_CONFIG["primary_color"], linewidth=2)
+ axes[0].axhline(y=baseline_cpu, color=STYLE_CONFIG["secondary_color"],
+ linestyle="--", alpha=0.5, label="Baseline")
+ setup_plot(fig, axes[0], "CPU Usage Over Time",
+ xlabel="Time (seconds)", ylabel="CPU Usage (%)")
+ axes[0].set_ylim(0, max(df["cpu_percent"]) * 1.1)
+ axes[0].legend()
+
+ # Plot RAM Usage
+ smoothed_ram = df["ram_used_gb"].rolling(window=window, center=True).mean()
+ sns.lineplot(x=elapsed_time, y=smoothed_ram, ax=axes[1],
+ color=STYLE_CONFIG["secondary_color"], linewidth=2)
+ axes[1].axhline(y=baseline_ram, color=STYLE_CONFIG["primary_color"],
+ linestyle="--", alpha=0.5, label="Baseline")
+ setup_plot(fig, axes[1], "RAM Usage Over Time",
+ xlabel="Time (seconds)", ylabel="RAM Usage (GB)")
+ axes[1].set_ylim(0, max(df["ram_used_gb"]) * 1.1)
+ axes[1].legend()
+
+ # Plot GPU Memory if available
+ if has_gpu:
+ smoothed_gpu = df["gpu_memory_gb"].rolling(window=window, center=True).mean()
+ sns.lineplot(x=elapsed_time, y=smoothed_gpu, ax=axes[2],
+ color=STYLE_CONFIG["primary_color"], linewidth=2)
+ axes[2].axhline(y=baseline_gpu, color=STYLE_CONFIG["secondary_color"],
+ linestyle="--", alpha=0.5, label="Baseline")
+ setup_plot(fig, axes[2], "GPU Memory Usage Over Time",
+ xlabel="Time (seconds)", ylabel="GPU Memory (GB)")
+ axes[2].set_ylim(0, max(df["gpu_memory_gb"]) * 1.1)
+ axes[2].legend()
+
+ plt.tight_layout()
+ plt.savefig(output_path, dpi=300, bbox_inches="tight")
+ plt.close()
+
+def plot_correlation(df, x, y, title, xlabel, ylabel, output_path):
+ """Create correlation plot with regression line and correlation coefficient.
+
+ Args:
+ df: pandas DataFrame containing the data
+ x: str, column name for x-axis
+ y: str, column name for y-axis
+ title: str, plot title
+ xlabel: str, x-axis label
+ ylabel: str, y-axis label
+ output_path: str, path to save the output plot
+ """
+ plt.style.use("dark_background")
+
+ fig, ax = plt.subplots(figsize=(12, 8))
+
+ # Scatter plot
+ sns.scatterplot(data=df, x=x, y=y, s=100, alpha=0.6,
+ color=STYLE_CONFIG["primary_color"])
+
+ # Regression line
+ sns.regplot(data=df, x=x, y=y, scatter=False,
+ color=STYLE_CONFIG["secondary_color"],
+ line_kws={"linewidth": 2})
+
+ # Add correlation coefficient
+ corr = df[x].corr(df[y])
+ plt.text(0.05, 0.95, f"Correlation: {corr:.2f}",
+ transform=ax.transAxes,
+ fontsize=STYLE_CONFIG["font_sizes"]["text"],
+ color=STYLE_CONFIG["text_color"],
+ bbox=dict(facecolor=STYLE_CONFIG["background_color"],
+ edgecolor=STYLE_CONFIG["text_color"],
+ alpha=0.7))
+
+ setup_plot(fig, ax, title, xlabel=xlabel, ylabel=ylabel)
+ plt.savefig(output_path, dpi=300, bbox_inches="tight")
+ plt.close()
diff --git a/examples/assorted_checks/benchmarks/lib/shared_utils.py b/examples/assorted_checks/benchmarks/lib/shared_utils.py
new file mode 100644
index 0000000..a9c872e
--- /dev/null
+++ b/examples/assorted_checks/benchmarks/lib/shared_utils.py
@@ -0,0 +1,174 @@
+"""Shared utilities for benchmarks and tests."""
+import os
+import json
+import subprocess
+from datetime import datetime
+from typing import Any, Dict, List, Optional, Union
+
+import psutil
+import scipy.io.wavfile as wavfile
+
+# Check for torch availability once at module level
+TORCH_AVAILABLE = False
+try:
+ import torch
+ TORCH_AVAILABLE = torch.cuda.is_available()
+except ImportError:
+ pass
+
+
+def get_audio_length(audio_data: bytes, temp_dir: str = None) -> float:
+ """Get audio length in seconds from bytes data.
+
+ Args:
+ audio_data: Raw audio bytes
+ temp_dir: Directory for temporary file. If None, uses system temp directory.
+
+ Returns:
+ float: Audio length in seconds
+ """
+ if temp_dir is None:
+ import tempfile
+ temp_dir = tempfile.gettempdir()
+
+ temp_path = os.path.join(temp_dir, "temp.wav")
+ os.makedirs(temp_dir, exist_ok=True)
+
+ with open(temp_path, "wb") as f:
+ f.write(audio_data)
+
+ try:
+ rate, data = wavfile.read(temp_path)
+ return len(data) / rate
+ finally:
+ if os.path.exists(temp_path):
+ os.remove(temp_path)
+
+
+def get_gpu_memory(average: bool = True) -> Optional[Union[float, List[float]]]:
+ """Get GPU memory usage using PyTorch if available, falling back to nvidia-smi.
+
+ Args:
+ average: If True and multiple GPUs present, returns average memory usage.
+ If False, returns list of memory usage per GPU.
+
+ Returns:
+ float or List[float] or None: GPU memory usage in MB. Returns None if no GPU available.
+ If average=False and multiple GPUs present, returns list of values.
+ """
+ if TORCH_AVAILABLE:
+ n_gpus = torch.cuda.device_count()
+ memory_used = []
+ for i in range(n_gpus):
+ memory_used.append(torch.cuda.memory_allocated(i) / 1024**2) # Convert to MB
+
+ if average and len(memory_used) > 0:
+ return sum(memory_used) / len(memory_used)
+ return memory_used if len(memory_used) > 1 else memory_used[0]
+
+ # Fall back to nvidia-smi
+ try:
+ result = subprocess.check_output(
+ ["nvidia-smi", "--query-gpu=memory.used", "--format=csv,nounits,noheader"]
+ )
+ memory_values = [float(x.strip()) for x in result.decode("utf-8").split("\n") if x.strip()]
+
+ if average and len(memory_values) > 0:
+ return sum(memory_values) / len(memory_values)
+ return memory_values if len(memory_values) > 1 else memory_values[0]
+ except (subprocess.CalledProcessError, FileNotFoundError):
+ return None
+
+
+def get_system_metrics() -> Dict[str, Union[str, float]]:
+ """Get current system metrics including CPU, RAM, and GPU if available.
+
+ Returns:
+ dict: System metrics including timestamp, CPU%, RAM%, RAM GB, and GPU MB if available
+ """
+ # Get per-CPU percentages and calculate average
+ cpu_percentages = psutil.cpu_percent(percpu=True)
+ avg_cpu = sum(cpu_percentages) / len(cpu_percentages)
+
+ metrics = {
+ "timestamp": datetime.now().isoformat(),
+ "cpu_percent": round(avg_cpu, 2),
+ "ram_percent": psutil.virtual_memory().percent,
+ "ram_used_gb": psutil.virtual_memory().used / (1024**3),
+ }
+
+ gpu_mem = get_gpu_memory(average=True) # Use average for system metrics
+ if gpu_mem is not None:
+ metrics["gpu_memory_used"] = round(gpu_mem, 2)
+
+ return metrics
+
+
+def save_audio_file(audio_data: bytes, identifier: str, output_dir: str) -> str:
+ """Save audio data to a file with proper naming and directory creation.
+
+ Args:
+ audio_data: Raw audio bytes
+ identifier: String to identify this audio file (e.g. token count, test name)
+ output_dir: Directory to save the file
+
+ Returns:
+ str: Path to the saved audio file
+ """
+ os.makedirs(output_dir, exist_ok=True)
+ output_file = os.path.join(output_dir, f"{identifier}.wav")
+
+ with open(output_file, "wb") as f:
+ f.write(audio_data)
+
+ return output_file
+
+
+def write_benchmark_stats(stats: List[Dict[str, Any]], output_file: str) -> None:
+ """Write benchmark statistics to a file in a clean, organized format.
+
+ Args:
+ stats: List of dictionaries containing stat name/value pairs
+ output_file: Path to output file
+ """
+ os.makedirs(os.path.dirname(output_file), exist_ok=True)
+
+ with open(output_file, "w") as f:
+ for section in stats:
+ # Write section header
+ f.write(f"=== {section['title']} ===\n\n")
+
+ # Write stats
+ for label, value in section['stats'].items():
+ if isinstance(value, float):
+ f.write(f"{label}: {value:.2f}\n")
+ else:
+ f.write(f"{label}: {value}\n")
+ f.write("\n")
+
+
+def save_json_results(results: Dict[str, Any], output_file: str) -> None:
+ """Save benchmark results to a JSON file with proper formatting.
+
+ Args:
+ results: Dictionary of results to save
+ output_file: Path to output file
+ """
+ os.makedirs(os.path.dirname(output_file), exist_ok=True)
+ with open(output_file, "w") as f:
+ json.dump(results, f, indent=2)
+
+
+def real_time_factor(processing_time: float, audio_length: float, decimals: int = 2) -> float:
+ """Calculate Real-Time Factor (RTF) as processing-time / length-of-audio.
+
+ Args:
+ processing_time: Time taken to process/generate audio
+ audio_length: Length of the generated audio
+ decimals: Number of decimal places to round to
+
+ Returns:
+ float: RTF value
+ """
+ rtf = processing_time / audio_length
+ return round(rtf, decimals)
diff --git a/examples/assorted_checks/benchmarks/output_data/benchmark_results.json b/examples/assorted_checks/benchmarks/output_data/benchmark_results.json
new file mode 100644
index 0000000..5c60933
--- /dev/null
+++ b/examples/assorted_checks/benchmarks/output_data/benchmark_results.json
@@ -0,0 +1,111 @@
+{
+ "results": [
+ {
+ "tokens": 100,
+ "processing_time": 18.833295583724976,
+ "output_length": 31.15,
+ "realtime_factor": 1.6539856161403135,
+ "elapsed_time": 19.024322748184204
+ },
+ {
+ "tokens": 200,
+ "processing_time": 38.95506024360657,
+ "output_length": 62.6,
+ "realtime_factor": 1.6069799304257042,
+ "elapsed_time": 58.21527123451233
+ },
+ {
+ "tokens": 300,
+ "processing_time": 49.74252939224243,
+ "output_length": 96.325,
+ "realtime_factor": 1.9364716908630366,
+ "elapsed_time": 108.19673728942871
+ },
+ {
+ "tokens": 400,
+ "processing_time": 61.349056243896484,
+ "output_length": 128.575,
+ "realtime_factor": 2.095794261102292,
+ "elapsed_time": 169.733656167984
+ },
+ {
+ "tokens": 500,
+ "processing_time": 82.86568236351013,
+ "output_length": 158.575,
+ "realtime_factor": 1.9136389815071193,
+ "elapsed_time": 252.7968451976776
+ }
+ ],
+ "system_metrics": [
+ {
+ "timestamp": "2025-01-03T00:13:49.865330",
+ "cpu_percent": 8.0,
+ "ram_percent": 39.4,
+ "ram_used_gb": 25.03811264038086,
+ "gpu_memory_used": 1204.0
+ },
+ {
+ "timestamp": "2025-01-03T00:14:08.781551",
+ "cpu_percent": 26.8,
+ "ram_percent": 42.6,
+ "ram_used_gb": 27.090862274169922,
+ "gpu_memory_used": 1225.0
+ },
+ {
+ "timestamp": "2025-01-03T00:14:08.916973",
+ "cpu_percent": 16.1,
+ "ram_percent": 42.6,
+ "ram_used_gb": 27.089553833007812,
+ "gpu_memory_used": 1225.0
+ },
+ {
+ "timestamp": "2025-01-03T00:14:47.979053",
+ "cpu_percent": 31.5,
+ "ram_percent": 43.6,
+ "ram_used_gb": 27.714427947998047,
+ "gpu_memory_used": 1225.0
+ },
+ {
+ "timestamp": "2025-01-03T00:14:48.098976",
+ "cpu_percent": 20.0,
+ "ram_percent": 43.6,
+ "ram_used_gb": 27.704315185546875,
+ "gpu_memory_used": 1211.0
+ },
+ {
+ "timestamp": "2025-01-03T00:15:37.944729",
+ "cpu_percent": 29.7,
+ "ram_percent": 38.6,
+ "ram_used_gb": 24.53925323486328,
+ "gpu_memory_used": 1217.0
+ },
+ {
+ "timestamp": "2025-01-03T00:15:38.071915",
+ "cpu_percent": 8.6,
+ "ram_percent": 38.5,
+ "ram_used_gb": 24.51690673828125,
+ "gpu_memory_used": 1208.0
+ },
+ {
+ "timestamp": "2025-01-03T00:16:39.525449",
+ "cpu_percent": 23.4,
+ "ram_percent": 38.8,
+ "ram_used_gb": 24.71230697631836,
+ "gpu_memory_used": 1221.0
+ },
+ {
+ "timestamp": "2025-01-03T00:16:39.612442",
+ "cpu_percent": 5.5,
+ "ram_percent": 38.9,
+ "ram_used_gb": 24.72066879272461,
+ "gpu_memory_used": 1221.0
+ },
+ {
+ "timestamp": "2025-01-03T00:18:02.569076",
+ "cpu_percent": 27.4,
+ "ram_percent": 39.1,
+ "ram_used_gb": 24.868202209472656,
+ "gpu_memory_used": 1264.0
+ }
+ ]
+}
\ No newline at end of file
diff --git a/examples/benchmarks/benchmark_results_cpu.json b/examples/assorted_checks/benchmarks/output_data/benchmark_results_cpu.json
similarity index 100%
rename from examples/benchmarks/benchmark_results_cpu.json
rename to examples/assorted_checks/benchmarks/output_data/benchmark_results_cpu.json
diff --git a/examples/assorted_checks/benchmarks/output_data/benchmark_results_rtf.json b/examples/assorted_checks/benchmarks/output_data/benchmark_results_rtf.json
new file mode 100644
index 0000000..59ad009
--- /dev/null
+++ b/examples/assorted_checks/benchmarks/output_data/benchmark_results_rtf.json
@@ -0,0 +1,300 @@
+{
+ "results": [
+ {
+ "tokens": 100,
+ "processing_time": 0.96,
+ "output_length": 31.1,
+ "rtf": 0.03,
+ "elapsed_time": 1.11
+ },
+ {
+ "tokens": 250,
+ "processing_time": 2.23,
+ "output_length": 77.17,
+ "rtf": 0.03,
+ "elapsed_time": 3.49
+ },
+ {
+ "tokens": 400,
+ "processing_time": 4.05,
+ "output_length": 128.05,
+ "rtf": 0.03,
+ "elapsed_time": 7.77
+ },
+ {
+ "tokens": 550,
+ "processing_time": 4.06,
+ "output_length": 171.45,
+ "rtf": 0.02,
+ "elapsed_time": 12.0
+ },
+ {
+ "tokens": 700,
+ "processing_time": 6.01,
+ "output_length": 221.6,
+ "rtf": 0.03,
+ "elapsed_time": 18.16
+ },
+ {
+ "tokens": 850,
+ "processing_time": 6.9,
+ "output_length": 269.1,
+ "rtf": 0.03,
+ "elapsed_time": 25.21
+ },
+ {
+ "tokens": 1000,
+ "processing_time": 7.65,
+ "output_length": 315.05,
+ "rtf": 0.02,
+ "elapsed_time": 33.03
+ },
+ {
+ "tokens": 6000,
+ "processing_time": 48.7,
+ "output_length": 1837.1,
+ "rtf": 0.03,
+ "elapsed_time": 82.21
+ },
+ {
+ "tokens": 11000,
+ "processing_time": 92.44,
+ "output_length": 3388.57,
+ "rtf": 0.03,
+ "elapsed_time": 175.46
+ },
+ {
+ "tokens": 16000,
+ "processing_time": 163.61,
+ "output_length": 4977.32,
+ "rtf": 0.03,
+ "elapsed_time": 340.46
+ },
+ {
+ "tokens": 21000,
+ "processing_time": 209.72,
+ "output_length": 6533.3,
+ "rtf": 0.03,
+ "elapsed_time": 551.92
+ },
+ {
+ "tokens": 26000,
+ "processing_time": 329.35,
+ "output_length": 8068.15,
+ "rtf": 0.04,
+ "elapsed_time": 883.37
+ },
+ {
+ "tokens": 31000,
+ "processing_time": 473.52,
+ "output_length": 9611.48,
+ "rtf": 0.05,
+ "elapsed_time": 1359.28
+ },
+ {
+ "tokens": 36000,
+ "processing_time": 650.98,
+ "output_length": 11157.15,
+ "rtf": 0.06,
+ "elapsed_time": 2012.9
+ }
+ ],
+ "system_metrics": [
+ {
+ "timestamp": "2025-01-03T14:41:01.331735",
+ "cpu_percent": 7.5,
+ "ram_percent": 50.2,
+ "ram_used_gb": 31.960269927978516,
+ "gpu_memory_used": 3191.0
+ },
+ {
+ "timestamp": "2025-01-03T14:41:02.357116",
+ "cpu_percent": 17.01,
+ "ram_percent": 50.2,
+ "ram_used_gb": 31.96163558959961,
+ "gpu_memory_used": 3426.0
+ },
+ {
+ "timestamp": "2025-01-03T14:41:02.445009",
+ "cpu_percent": 9.5,
+ "ram_percent": 50.3,
+ "ram_used_gb": 31.966781616210938,
+ "gpu_memory_used": 3426.0
+ },
+ {
+ "timestamp": "2025-01-03T14:41:04.742152",
+ "cpu_percent": 18.27,
+ "ram_percent": 50.4,
+ "ram_used_gb": 32.08788299560547,
+ "gpu_memory_used": 3642.0
+ },
+ {
+ "timestamp": "2025-01-03T14:41:04.847795",
+ "cpu_percent": 16.27,
+ "ram_percent": 50.5,
+ "ram_used_gb": 32.094364166259766,
+ "gpu_memory_used": 3640.0
+ },
+ {
+ "timestamp": "2025-01-03T14:41:09.019590",
+ "cpu_percent": 15.97,
+ "ram_percent": 50.7,
+ "ram_used_gb": 32.23244094848633,
+ "gpu_memory_used": 3640.0
+ },
+ {
+ "timestamp": "2025-01-03T14:41:09.110324",
+ "cpu_percent": 3.54,
+ "ram_percent": 50.7,
+ "ram_used_gb": 32.234458923339844,
+ "gpu_memory_used": 3640.0
+ },
+ {
+ "timestamp": "2025-01-03T14:41:13.252607",
+ "cpu_percent": 13.4,
+ "ram_percent": 50.6,
+ "ram_used_gb": 32.194271087646484,
+ "gpu_memory_used": 3935.0
+ },
+ {
+ "timestamp": "2025-01-03T14:41:13.327557",
+ "cpu_percent": 4.69,
+ "ram_percent": 50.6,
+ "ram_used_gb": 32.191776275634766,
+ "gpu_memory_used": 3935.0
+ },
+ {
+ "timestamp": "2025-01-03T14:41:19.413633",
+ "cpu_percent": 12.92,
+ "ram_percent": 50.9,
+ "ram_used_gb": 32.3467903137207,
+ "gpu_memory_used": 4250.0
+ },
+ {
+ "timestamp": "2025-01-03T14:41:19.492758",
+ "cpu_percent": 7.5,
+ "ram_percent": 50.8,
+ "ram_used_gb": 32.34375,
+ "gpu_memory_used": 4250.0
+ },
+ {
+ "timestamp": "2025-01-03T14:41:26.467284",
+ "cpu_percent": 13.09,
+ "ram_percent": 51.2,
+ "ram_used_gb": 32.56281280517578,
+ "gpu_memory_used": 4249.0
+ },
+ {
+ "timestamp": "2025-01-03T14:41:26.553559",
+ "cpu_percent": 8.39,
+ "ram_percent": 51.2,
+ "ram_used_gb": 32.56183624267578,
+ "gpu_memory_used": 4249.0
+ },
+ {
+ "timestamp": "2025-01-03T14:41:34.284362",
+ "cpu_percent": 12.61,
+ "ram_percent": 51.7,
+ "ram_used_gb": 32.874778747558594,
+ "gpu_memory_used": 4250.0
+ },
+ {
+ "timestamp": "2025-01-03T14:41:34.362353",
+ "cpu_percent": 1.25,
+ "ram_percent": 51.7,
+ "ram_used_gb": 32.87461471557617,
+ "gpu_memory_used": 4250.0
+ },
+ {
+ "timestamp": "2025-01-03T14:42:23.471312",
+ "cpu_percent": 11.64,
+ "ram_percent": 54.9,
+ "ram_used_gb": 34.90264129638672,
+ "gpu_memory_used": 4647.0
+ },
+ {
+ "timestamp": "2025-01-03T14:42:23.547203",
+ "cpu_percent": 5.31,
+ "ram_percent": 54.9,
+ "ram_used_gb": 34.91563415527344,
+ "gpu_memory_used": 4647.0
+ },
+ {
+ "timestamp": "2025-01-03T14:43:56.724933",
+ "cpu_percent": 12.97,
+ "ram_percent": 59.5,
+ "ram_used_gb": 37.84241485595703,
+ "gpu_memory_used": 4655.0
+ },
+ {
+ "timestamp": "2025-01-03T14:43:56.815453",
+ "cpu_percent": 11.75,
+ "ram_percent": 59.5,
+ "ram_used_gb": 37.832679748535156,
+ "gpu_memory_used": 4655.0
+ },
+ {
+ "timestamp": "2025-01-03T14:46:41.705155",
+ "cpu_percent": 12.94,
+ "ram_percent": 66.3,
+ "ram_used_gb": 42.1534538269043,
+ "gpu_memory_used": 4729.0
+ },
+ {
+ "timestamp": "2025-01-03T14:46:41.835177",
+ "cpu_percent": 7.73,
+ "ram_percent": 66.2,
+ "ram_used_gb": 42.13554000854492,
+ "gpu_memory_used": 4729.0
+ },
+ {
+ "timestamp": "2025-01-03T14:50:13.166236",
+ "cpu_percent": 11.62,
+ "ram_percent": 73.4,
+ "ram_used_gb": 46.71288299560547,
+ "gpu_memory_used": 4676.0
+ },
+ {
+ "timestamp": "2025-01-03T14:50:13.261611",
+ "cpu_percent": 8.16,
+ "ram_percent": 73.4,
+ "ram_used_gb": 46.71356201171875,
+ "gpu_memory_used": 4676.0
+ },
+ {
+ "timestamp": "2025-01-03T14:55:44.623607",
+ "cpu_percent": 12.92,
+ "ram_percent": 82.8,
+ "ram_used_gb": 52.65533447265625,
+ "gpu_memory_used": 4636.0
+ },
+ {
+ "timestamp": "2025-01-03T14:55:44.735410",
+ "cpu_percent": 15.29,
+ "ram_percent": 82.7,
+ "ram_used_gb": 52.63290786743164,
+ "gpu_memory_used": 4636.0
+ },
+ {
+ "timestamp": "2025-01-03T15:03:40.534449",
+ "cpu_percent": 13.88,
+ "ram_percent": 85.0,
+ "ram_used_gb": 54.050071716308594,
+ "gpu_memory_used": 4771.0
+ },
+ {
+ "timestamp": "2025-01-03T15:03:40.638708",
+ "cpu_percent": 12.21,
+ "ram_percent": 85.0,
+ "ram_used_gb": 54.053733825683594,
+ "gpu_memory_used": 4771.0
+ },
+ {
+ "timestamp": "2025-01-03T15:14:34.159142",
+ "cpu_percent": 14.51,
+ "ram_percent": 78.1,
+ "ram_used_gb": 49.70396423339844,
+ "gpu_memory_used": 4739.0
+ }
+ ]
+}
\ No newline at end of file
diff --git a/examples/benchmarks/benchmark_stats_cpu.txt b/examples/assorted_checks/benchmarks/output_data/benchmark_stats_cpu.txt
similarity index 100%
rename from examples/benchmarks/benchmark_stats_cpu.txt
rename to examples/assorted_checks/benchmarks/output_data/benchmark_stats_cpu.txt
diff --git a/examples/assorted_checks/benchmarks/output_data/benchmark_stats_rtf.txt b/examples/assorted_checks/benchmarks/output_data/benchmark_stats_rtf.txt
new file mode 100644
index 0000000..e7bed5f
--- /dev/null
+++ b/examples/assorted_checks/benchmarks/output_data/benchmark_stats_rtf.txt
@@ -0,0 +1,9 @@
+=== Benchmark Statistics (with correct RTF) ===
+
+Overall Stats:
+Total tokens processed: 150850
+Total audio generated: 46786.59s
+Total test duration: 2012.90s
+Average processing rate: 104.34 tokens/second
+Average RTF: 0.03x
+
diff --git a/examples/assorted_checks/benchmarks/output_data/cpu_benchmark_results_rtf.json b/examples/assorted_checks/benchmarks/output_data/cpu_benchmark_results_rtf.json
new file mode 100644
index 0000000..acc11fd
--- /dev/null
+++ b/examples/assorted_checks/benchmarks/output_data/cpu_benchmark_results_rtf.json
@@ -0,0 +1,2506 @@
+{
+ "results": [
+ {
+ "tokens": 150,
+ "processing_time": 22.93,
+ "output_length": 45.9,
+ "rtf": 0.5,
+ "elapsed_time": 22.97
+ },
+ {
+ "tokens": 300,
+ "processing_time": 43.78,
+ "output_length": 96.425,
+ "rtf": 0.45,
+ "elapsed_time": 66.81
+ },
+ {
+ "tokens": 450,
+ "processing_time": 64.75,
+ "output_length": 143.1,
+ "rtf": 0.45,
+ "elapsed_time": 131.61
+ },
+ {
+ "tokens": 600,
+ "processing_time": 85.73,
+ "output_length": 188.675,
+ "rtf": 0.45,
+ "elapsed_time": 217.4
+ },
+ {
+ "tokens": 750,
+ "processing_time": 115.35,
+ "output_length": 236.7,
+ "rtf": 0.49,
+ "elapsed_time": 332.81
+ }
+ ],
+ "system_metrics": [
+ {
+ "timestamp": "2025-01-03T17:34:42.747316",
+ "cpu_percent": 4.4,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.709259033203125,
+ "gpu_memory_used": 1112.0,
+ "relative_time": 0.07491874694824219
+ },
+ {
+ "timestamp": "2025-01-03T17:34:43.835281",
+ "cpu_percent": 23.77,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.697410583496094,
+ "gpu_memory_used": 1103.0,
+ "relative_time": 1.162503719329834
+ },
+ {
+ "timestamp": "2025-01-03T17:34:44.912674",
+ "cpu_percent": 21.57,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.662906646728516,
+ "gpu_memory_used": 1103.0,
+ "relative_time": 2.2446579933166504
+ },
+ {
+ "timestamp": "2025-01-03T17:34:46.004788",
+ "cpu_percent": 23.83,
+ "ram_percent": 45.0,
+ "ram_used_gb": 28.651744842529297,
+ "gpu_memory_used": 1103.0,
+ "relative_time": 3.3548994064331055
+ },
+ {
+ "timestamp": "2025-01-03T17:34:47.111761",
+ "cpu_percent": 22.67,
+ "ram_percent": 45.0,
+ "ram_used_gb": 28.65630340576172,
+ "gpu_memory_used": 1102.0,
+ "relative_time": 4.438696622848511
+ },
+ {
+ "timestamp": "2025-01-03T17:34:48.189184",
+ "cpu_percent": 24.12,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.664894104003906,
+ "gpu_memory_used": 1102.0,
+ "relative_time": 5.523675918579102
+ },
+ {
+ "timestamp": "2025-01-03T17:34:49.280551",
+ "cpu_percent": 25.39,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.6732177734375,
+ "gpu_memory_used": 1102.0,
+ "relative_time": 6.591145038604736
+ },
+ {
+ "timestamp": "2025-01-03T17:34:50.355243",
+ "cpu_percent": 21.66,
+ "ram_percent": 45.0,
+ "ram_used_gb": 28.655715942382812,
+ "gpu_memory_used": 1102.0,
+ "relative_time": 7.682063102722168
+ },
+ {
+ "timestamp": "2025-01-03T17:34:51.431347",
+ "cpu_percent": 22.3,
+ "ram_percent": 45.0,
+ "ram_used_gb": 28.649066925048828,
+ "gpu_memory_used": 1095.0,
+ "relative_time": 8.77349853515625
+ },
+ {
+ "timestamp": "2025-01-03T17:34:52.525602",
+ "cpu_percent": 23.26,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.668758392333984,
+ "gpu_memory_used": 1101.0,
+ "relative_time": 9.867908477783203
+ },
+ {
+ "timestamp": "2025-01-03T17:34:53.627319",
+ "cpu_percent": 23.99,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.695106506347656,
+ "gpu_memory_used": 1101.0,
+ "relative_time": 10.980819463729858
+ },
+ {
+ "timestamp": "2025-01-03T17:34:54.734806",
+ "cpu_percent": 27.3,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.669708251953125,
+ "gpu_memory_used": 1102.0,
+ "relative_time": 12.046100854873657
+ },
+ {
+ "timestamp": "2025-01-03T17:34:55.795028",
+ "cpu_percent": 28.48,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.677181243896484,
+ "gpu_memory_used": 1102.0,
+ "relative_time": 13.139363288879395
+ },
+ {
+ "timestamp": "2025-01-03T17:34:56.888979",
+ "cpu_percent": 30.75,
+ "ram_percent": 45.2,
+ "ram_used_gb": 28.762008666992188,
+ "gpu_memory_used": 1102.0,
+ "relative_time": 14.25643014907837
+ },
+ {
+ "timestamp": "2025-01-03T17:34:58.006454",
+ "cpu_percent": 51.33,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.7191162109375,
+ "gpu_memory_used": 1102.0,
+ "relative_time": 15.350555658340454
+ },
+ {
+ "timestamp": "2025-01-03T17:34:59.113602",
+ "cpu_percent": 30.18,
+ "ram_percent": 45.2,
+ "ram_used_gb": 28.765079498291016,
+ "gpu_memory_used": 1101.0,
+ "relative_time": 16.45602011680603
+ },
+ {
+ "timestamp": "2025-01-03T17:35:00.216463",
+ "cpu_percent": 38.41,
+ "ram_percent": 45.2,
+ "ram_used_gb": 28.744461059570312,
+ "gpu_memory_used": 1101.0,
+ "relative_time": 17.581329822540283
+ },
+ {
+ "timestamp": "2025-01-03T17:35:01.335124",
+ "cpu_percent": 27.42,
+ "ram_percent": 45.2,
+ "ram_used_gb": 28.75594711303711,
+ "gpu_memory_used": 1101.0,
+ "relative_time": 18.70340371131897
+ },
+ {
+ "timestamp": "2025-01-03T17:35:02.457622",
+ "cpu_percent": 25.99,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.785816192626953,
+ "gpu_memory_used": 1100.0,
+ "relative_time": 19.809316635131836
+ },
+ {
+ "timestamp": "2025-01-03T17:35:03.569035",
+ "cpu_percent": 27.52,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.79547882080078,
+ "gpu_memory_used": 1103.0,
+ "relative_time": 20.89554214477539
+ },
+ {
+ "timestamp": "2025-01-03T17:35:04.645402",
+ "cpu_percent": 29.33,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.804088592529297,
+ "gpu_memory_used": 1101.0,
+ "relative_time": 21.98127508163452
+ },
+ {
+ "timestamp": "2025-01-03T17:35:05.735134",
+ "cpu_percent": 27.68,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.83855438232422,
+ "gpu_memory_used": 1102.0,
+ "relative_time": 23.050849676132202
+ },
+ {
+ "timestamp": "2025-01-03T17:35:06.814217",
+ "cpu_percent": 27.9,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.84520721435547,
+ "gpu_memory_used": 1102.0,
+ "relative_time": 24.1594979763031
+ },
+ {
+ "timestamp": "2025-01-03T17:35:07.921210",
+ "cpu_percent": 29.27,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.81098175048828,
+ "gpu_memory_used": 1102.0,
+ "relative_time": 25.24632716178894
+ },
+ {
+ "timestamp": "2025-01-03T17:35:08.999119",
+ "cpu_percent": 32.0,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.832805633544922,
+ "gpu_memory_used": 1102.0,
+ "relative_time": 26.33597707748413
+ },
+ {
+ "timestamp": "2025-01-03T17:35:10.088620",
+ "cpu_percent": 24.94,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.82524871826172,
+ "gpu_memory_used": 1102.0,
+ "relative_time": 27.41934084892273
+ },
+ {
+ "timestamp": "2025-01-03T17:35:11.171667",
+ "cpu_percent": 25.69,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.84173583984375,
+ "gpu_memory_used": 1102.0,
+ "relative_time": 28.496659517288208
+ },
+ {
+ "timestamp": "2025-01-03T17:35:12.257168",
+ "cpu_percent": 25.55,
+ "ram_percent": 45.4,
+ "ram_used_gb": 28.876148223876953,
+ "gpu_memory_used": 1100.0,
+ "relative_time": 29.6050226688385
+ },
+ {
+ "timestamp": "2025-01-03T17:35:13.368026",
+ "cpu_percent": 26.89,
+ "ram_percent": 45.4,
+ "ram_used_gb": 28.87099838256836,
+ "gpu_memory_used": 1099.0,
+ "relative_time": 30.69699764251709
+ },
+ {
+ "timestamp": "2025-01-03T17:35:14.446163",
+ "cpu_percent": 27.84,
+ "ram_percent": 45.4,
+ "ram_used_gb": 28.85318374633789,
+ "gpu_memory_used": 1099.0,
+ "relative_time": 31.779499769210815
+ },
+ {
+ "timestamp": "2025-01-03T17:35:15.537804",
+ "cpu_percent": 27.68,
+ "ram_percent": 45.4,
+ "ram_used_gb": 28.861358642578125,
+ "gpu_memory_used": 1099.0,
+ "relative_time": 32.866647481918335
+ },
+ {
+ "timestamp": "2025-01-03T17:35:16.626833",
+ "cpu_percent": 27.11,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.67508316040039,
+ "gpu_memory_used": 1101.0,
+ "relative_time": 33.97589898109436
+ },
+ {
+ "timestamp": "2025-01-03T17:35:17.735393",
+ "cpu_percent": 24.79,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.690773010253906,
+ "gpu_memory_used": 1101.0,
+ "relative_time": 35.04746890068054
+ },
+ {
+ "timestamp": "2025-01-03T17:35:18.808174",
+ "cpu_percent": 23.52,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.687705993652344,
+ "gpu_memory_used": 1101.0,
+ "relative_time": 36.138739585876465
+ },
+ {
+ "timestamp": "2025-01-03T17:35:19.896932",
+ "cpu_percent": 23.96,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.670169830322266,
+ "gpu_memory_used": 1101.0,
+ "relative_time": 37.22053575515747
+ },
+ {
+ "timestamp": "2025-01-03T17:35:20.975119",
+ "cpu_percent": 23.28,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.66689682006836,
+ "gpu_memory_used": 1101.0,
+ "relative_time": 38.29904627799988
+ },
+ {
+ "timestamp": "2025-01-03T17:35:22.050416",
+ "cpu_percent": 23.42,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.69702911376953,
+ "gpu_memory_used": 1100.0,
+ "relative_time": 39.390669107437134
+ },
+ {
+ "timestamp": "2025-01-03T17:35:23.143314",
+ "cpu_percent": 24.28,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.696701049804688,
+ "gpu_memory_used": 1099.0,
+ "relative_time": 40.485177755355835
+ },
+ {
+ "timestamp": "2025-01-03T17:35:24.247604",
+ "cpu_percent": 25.17,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.70577621459961,
+ "gpu_memory_used": 1097.0,
+ "relative_time": 41.58591032028198
+ },
+ {
+ "timestamp": "2025-01-03T17:35:25.339573",
+ "cpu_percent": 21.29,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.683162689208984,
+ "gpu_memory_used": 1097.0,
+ "relative_time": 42.64796328544617
+ },
+ {
+ "timestamp": "2025-01-03T17:35:26.398778",
+ "cpu_percent": 21.64,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.675365447998047,
+ "gpu_memory_used": 1097.0,
+ "relative_time": 43.72638392448425
+ },
+ {
+ "timestamp": "2025-01-03T17:35:27.487170",
+ "cpu_percent": 22.69,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.687335968017578,
+ "gpu_memory_used": 1097.0,
+ "relative_time": 44.81854510307312
+ },
+ {
+ "timestamp": "2025-01-03T17:35:28.579809",
+ "cpu_percent": 23.78,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.686798095703125,
+ "gpu_memory_used": 1097.0,
+ "relative_time": 45.909095287323
+ },
+ {
+ "timestamp": "2025-01-03T17:35:29.670899",
+ "cpu_percent": 26.39,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.667762756347656,
+ "gpu_memory_used": 1097.0,
+ "relative_time": 46.996145486831665
+ },
+ {
+ "timestamp": "2025-01-03T17:35:30.746220",
+ "cpu_percent": 23.85,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.674030303955078,
+ "gpu_memory_used": 1097.0,
+ "relative_time": 48.05596685409546
+ },
+ {
+ "timestamp": "2025-01-03T17:35:31.807982",
+ "cpu_percent": 23.41,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.699344635009766,
+ "gpu_memory_used": 1097.0,
+ "relative_time": 49.14048671722412
+ },
+ {
+ "timestamp": "2025-01-03T17:35:32.899231",
+ "cpu_percent": 23.91,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.694026947021484,
+ "gpu_memory_used": 1097.0,
+ "relative_time": 50.233946323394775
+ },
+ {
+ "timestamp": "2025-01-03T17:35:33.990980",
+ "cpu_percent": 21.22,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.69512939453125,
+ "gpu_memory_used": 1097.0,
+ "relative_time": 51.3146493434906
+ },
+ {
+ "timestamp": "2025-01-03T17:35:35.065062",
+ "cpu_percent": 23.06,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.673419952392578,
+ "gpu_memory_used": 1097.0,
+ "relative_time": 52.40274214744568
+ },
+ {
+ "timestamp": "2025-01-03T17:35:36.155228",
+ "cpu_percent": 25.23,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.68213653564453,
+ "gpu_memory_used": 1097.0,
+ "relative_time": 53.48211121559143
+ },
+ {
+ "timestamp": "2025-01-03T17:35:37.232942",
+ "cpu_percent": 27.12,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.705059051513672,
+ "gpu_memory_used": 1097.0,
+ "relative_time": 54.55754017829895
+ },
+ {
+ "timestamp": "2025-01-03T17:35:38.317354",
+ "cpu_percent": 25.1,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.702983856201172,
+ "gpu_memory_used": 1097.0,
+ "relative_time": 55.65464448928833
+ },
+ {
+ "timestamp": "2025-01-03T17:35:39.405580",
+ "cpu_percent": 24.43,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.68689727783203,
+ "gpu_memory_used": 1097.0,
+ "relative_time": 56.73482656478882
+ },
+ {
+ "timestamp": "2025-01-03T17:35:40.495226",
+ "cpu_percent": 23.47,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.68814468383789,
+ "gpu_memory_used": 1097.0,
+ "relative_time": 57.82576274871826
+ },
+ {
+ "timestamp": "2025-01-03T17:35:41.586272",
+ "cpu_percent": 22.74,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.68700408935547,
+ "gpu_memory_used": 1097.0,
+ "relative_time": 58.90043330192566
+ },
+ {
+ "timestamp": "2025-01-03T17:35:42.662161",
+ "cpu_percent": 22.7,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.702560424804688,
+ "gpu_memory_used": 1097.0,
+ "relative_time": 59.995574951171875
+ },
+ {
+ "timestamp": "2025-01-03T17:35:43.754475",
+ "cpu_percent": 22.92,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.70386505126953,
+ "gpu_memory_used": 1097.0,
+ "relative_time": 61.082642793655396
+ },
+ {
+ "timestamp": "2025-01-03T17:35:44.844454",
+ "cpu_percent": 24.27,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.68398666381836,
+ "gpu_memory_used": 1097.0,
+ "relative_time": 62.17042922973633
+ },
+ {
+ "timestamp": "2025-01-03T17:35:45.922535",
+ "cpu_percent": 25.56,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.679576873779297,
+ "gpu_memory_used": 1097.0,
+ "relative_time": 63.231327533721924
+ },
+ {
+ "timestamp": "2025-01-03T17:35:46.980393",
+ "cpu_percent": 24.43,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.69806671142578,
+ "gpu_memory_used": 1097.0,
+ "relative_time": 64.29255247116089
+ },
+ {
+ "timestamp": "2025-01-03T17:35:48.044665",
+ "cpu_percent": 23.42,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.698440551757812,
+ "gpu_memory_used": 1097.0,
+ "relative_time": 65.356365442276
+ },
+ {
+ "timestamp": "2025-01-03T17:35:49.105491",
+ "cpu_percent": 23.74,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.699386596679688,
+ "gpu_memory_used": 1097.0,
+ "relative_time": 66.43754887580872
+ },
+ {
+ "timestamp": "2025-01-03T17:35:50.186327",
+ "cpu_percent": 21.49,
+ "ram_percent": 45.2,
+ "ram_used_gb": 28.72582244873047,
+ "gpu_memory_used": 1101.0,
+ "relative_time": 67.51031470298767
+ },
+ {
+ "timestamp": "2025-01-03T17:35:51.271798",
+ "cpu_percent": 23.12,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.719581604003906,
+ "gpu_memory_used": 1101.0,
+ "relative_time": 68.5964903831482
+ },
+ {
+ "timestamp": "2025-01-03T17:35:52.359436",
+ "cpu_percent": 22.75,
+ "ram_percent": 45.2,
+ "ram_used_gb": 28.746524810791016,
+ "gpu_memory_used": 1101.0,
+ "relative_time": 69.68625974655151
+ },
+ {
+ "timestamp": "2025-01-03T17:35:53.439928",
+ "cpu_percent": 26.48,
+ "ram_percent": 45.2,
+ "ram_used_gb": 28.744590759277344,
+ "gpu_memory_used": 1101.0,
+ "relative_time": 70.76669788360596
+ },
+ {
+ "timestamp": "2025-01-03T17:35:54.529847",
+ "cpu_percent": 23.72,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.71523666381836,
+ "gpu_memory_used": 1101.0,
+ "relative_time": 71.86323857307434
+ },
+ {
+ "timestamp": "2025-01-03T17:35:55.620268",
+ "cpu_percent": 24.44,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.712608337402344,
+ "gpu_memory_used": 1101.0,
+ "relative_time": 72.96533226966858
+ },
+ {
+ "timestamp": "2025-01-03T17:35:56.726510",
+ "cpu_percent": 22.93,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.709251403808594,
+ "gpu_memory_used": 1098.0,
+ "relative_time": 74.05393600463867
+ },
+ {
+ "timestamp": "2025-01-03T17:35:57.816277",
+ "cpu_percent": 24.19,
+ "ram_percent": 45.2,
+ "ram_used_gb": 28.72509002685547,
+ "gpu_memory_used": 1098.0,
+ "relative_time": 75.1400306224823
+ },
+ {
+ "timestamp": "2025-01-03T17:35:58.889283",
+ "cpu_percent": 23.95,
+ "ram_percent": 45.2,
+ "ram_used_gb": 28.72734832763672,
+ "gpu_memory_used": 1098.0,
+ "relative_time": 76.21520328521729
+ },
+ {
+ "timestamp": "2025-01-03T17:35:59.964425",
+ "cpu_percent": 23.01,
+ "ram_percent": 45.2,
+ "ram_used_gb": 28.72198486328125,
+ "gpu_memory_used": 1098.0,
+ "relative_time": 77.29325318336487
+ },
+ {
+ "timestamp": "2025-01-03T17:36:01.053027",
+ "cpu_percent": 24.34,
+ "ram_percent": 45.2,
+ "ram_used_gb": 28.740951538085938,
+ "gpu_memory_used": 1098.0,
+ "relative_time": 78.37847518920898
+ },
+ {
+ "timestamp": "2025-01-03T17:36:02.129127",
+ "cpu_percent": 21.81,
+ "ram_percent": 45.2,
+ "ram_used_gb": 28.770435333251953,
+ "gpu_memory_used": 1098.0,
+ "relative_time": 79.45529294013977
+ },
+ {
+ "timestamp": "2025-01-03T17:36:03.205976",
+ "cpu_percent": 25.2,
+ "ram_percent": 45.4,
+ "ram_used_gb": 28.847518920898438,
+ "gpu_memory_used": 1098.0,
+ "relative_time": 80.5142834186554
+ },
+ {
+ "timestamp": "2025-01-03T17:36:04.268476",
+ "cpu_percent": 25.09,
+ "ram_percent": 45.4,
+ "ram_used_gb": 28.853740692138672,
+ "gpu_memory_used": 1098.0,
+ "relative_time": 81.59653949737549
+ },
+ {
+ "timestamp": "2025-01-03T17:36:05.346965",
+ "cpu_percent": 25.51,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.825660705566406,
+ "gpu_memory_used": 1098.0,
+ "relative_time": 82.67025065422058
+ },
+ {
+ "timestamp": "2025-01-03T17:36:06.420210",
+ "cpu_percent": 23.64,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.82675552368164,
+ "gpu_memory_used": 1098.0,
+ "relative_time": 83.76902842521667
+ },
+ {
+ "timestamp": "2025-01-03T17:36:07.528643",
+ "cpu_percent": 24.17,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.835819244384766,
+ "gpu_memory_used": 1098.0,
+ "relative_time": 84.84460020065308
+ },
+ {
+ "timestamp": "2025-01-03T17:36:08.603019",
+ "cpu_percent": 21.95,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.832855224609375,
+ "gpu_memory_used": 1098.0,
+ "relative_time": 85.92997336387634
+ },
+ {
+ "timestamp": "2025-01-03T17:36:09.692869",
+ "cpu_percent": 21.41,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.812267303466797,
+ "gpu_memory_used": 1098.0,
+ "relative_time": 87.03102254867554
+ },
+ {
+ "timestamp": "2025-01-03T17:36:10.782205",
+ "cpu_percent": 25.44,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.818920135498047,
+ "gpu_memory_used": 1094.0,
+ "relative_time": 88.12934947013855
+ },
+ {
+ "timestamp": "2025-01-03T17:36:11.887369",
+ "cpu_percent": 31.61,
+ "ram_percent": 45.4,
+ "ram_used_gb": 28.850399017333984,
+ "gpu_memory_used": 1094.0,
+ "relative_time": 89.22751259803772
+ },
+ {
+ "timestamp": "2025-01-03T17:36:12.980251",
+ "cpu_percent": 22.99,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.846778869628906,
+ "gpu_memory_used": 1094.0,
+ "relative_time": 90.30405187606812
+ },
+ {
+ "timestamp": "2025-01-03T17:36:14.055303",
+ "cpu_percent": 23.76,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.846450805664062,
+ "gpu_memory_used": 1094.0,
+ "relative_time": 91.38480973243713
+ },
+ {
+ "timestamp": "2025-01-03T17:36:15.147210",
+ "cpu_percent": 22.66,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.828269958496094,
+ "gpu_memory_used": 1090.0,
+ "relative_time": 92.47204494476318
+ },
+ {
+ "timestamp": "2025-01-03T17:36:16.223825",
+ "cpu_percent": 24.72,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.836009979248047,
+ "gpu_memory_used": 1090.0,
+ "relative_time": 93.55564045906067
+ },
+ {
+ "timestamp": "2025-01-03T17:36:17.312873",
+ "cpu_percent": 21.48,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.839397430419922,
+ "gpu_memory_used": 1090.0,
+ "relative_time": 94.63971495628357
+ },
+ {
+ "timestamp": "2025-01-03T17:36:18.389124",
+ "cpu_percent": 26.63,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.846328735351562,
+ "gpu_memory_used": 1090.0,
+ "relative_time": 95.71641564369202
+ },
+ {
+ "timestamp": "2025-01-03T17:36:19.468453",
+ "cpu_percent": 25.13,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.829818725585938,
+ "gpu_memory_used": 1099.0,
+ "relative_time": 96.7940239906311
+ },
+ {
+ "timestamp": "2025-01-03T17:36:20.545028",
+ "cpu_percent": 26.21,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.82284164428711,
+ "gpu_memory_used": 1099.0,
+ "relative_time": 97.87617611885071
+ },
+ {
+ "timestamp": "2025-01-03T17:36:21.625911",
+ "cpu_percent": 28.04,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.825241088867188,
+ "gpu_memory_used": 1101.0,
+ "relative_time": 98.9656343460083
+ },
+ {
+ "timestamp": "2025-01-03T17:36:22.726280",
+ "cpu_percent": 24.44,
+ "ram_percent": 45.4,
+ "ram_used_gb": 28.852622985839844,
+ "gpu_memory_used": 1101.0,
+ "relative_time": 100.05247807502747
+ },
+ {
+ "timestamp": "2025-01-03T17:36:23.800699",
+ "cpu_percent": 22.97,
+ "ram_percent": 45.4,
+ "ram_used_gb": 28.851558685302734,
+ "gpu_memory_used": 1101.0,
+ "relative_time": 101.14096426963806
+ },
+ {
+ "timestamp": "2025-01-03T17:36:24.901804",
+ "cpu_percent": 23.85,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.82904052734375,
+ "gpu_memory_used": 1101.0,
+ "relative_time": 102.23190665245056
+ },
+ {
+ "timestamp": "2025-01-03T17:36:25.994492",
+ "cpu_percent": 25.47,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.835567474365234,
+ "gpu_memory_used": 1100.0,
+ "relative_time": 103.30232405662537
+ },
+ {
+ "timestamp": "2025-01-03T17:36:27.054728",
+ "cpu_percent": 23.06,
+ "ram_percent": 45.4,
+ "ram_used_gb": 28.848636627197266,
+ "gpu_memory_used": 1100.0,
+ "relative_time": 104.36360669136047
+ },
+ {
+ "timestamp": "2025-01-03T17:36:28.117967",
+ "cpu_percent": 22.29,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.841327667236328,
+ "gpu_memory_used": 1100.0,
+ "relative_time": 105.43805932998657
+ },
+ {
+ "timestamp": "2025-01-03T17:36:29.195306",
+ "cpu_percent": 22.9,
+ "ram_percent": 45.4,
+ "ram_used_gb": 28.852493286132812,
+ "gpu_memory_used": 1100.0,
+ "relative_time": 106.5122537612915
+ },
+ {
+ "timestamp": "2025-01-03T17:36:30.272864",
+ "cpu_percent": 24.17,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.838626861572266,
+ "gpu_memory_used": 1100.0,
+ "relative_time": 107.59854078292847
+ },
+ {
+ "timestamp": "2025-01-03T17:36:31.348038",
+ "cpu_percent": 22.76,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.829647064208984,
+ "gpu_memory_used": 1100.0,
+ "relative_time": 108.67652320861816
+ },
+ {
+ "timestamp": "2025-01-03T17:36:32.438919",
+ "cpu_percent": 26.26,
+ "ram_percent": 45.4,
+ "ram_used_gb": 28.863739013671875,
+ "gpu_memory_used": 1100.0,
+ "relative_time": 109.74629759788513
+ },
+ {
+ "timestamp": "2025-01-03T17:36:33.502904",
+ "cpu_percent": 25.04,
+ "ram_percent": 45.4,
+ "ram_used_gb": 28.8487548828125,
+ "gpu_memory_used": 1100.0,
+ "relative_time": 110.81692504882812
+ },
+ {
+ "timestamp": "2025-01-03T17:36:34.580473",
+ "cpu_percent": 23.49,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.804698944091797,
+ "gpu_memory_used": 1100.0,
+ "relative_time": 111.91362714767456
+ },
+ {
+ "timestamp": "2025-01-03T17:36:35.672160",
+ "cpu_percent": 23.94,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.81186294555664,
+ "gpu_memory_used": 1100.0,
+ "relative_time": 113.00246286392212
+ },
+ {
+ "timestamp": "2025-01-03T17:36:36.765325",
+ "cpu_percent": 27.01,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.814517974853516,
+ "gpu_memory_used": 1100.0,
+ "relative_time": 114.10090208053589
+ },
+ {
+ "timestamp": "2025-01-03T17:36:37.857184",
+ "cpu_percent": 24.68,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.819175720214844,
+ "gpu_memory_used": 1100.0,
+ "relative_time": 115.20764112472534
+ },
+ {
+ "timestamp": "2025-01-03T17:36:38.964872",
+ "cpu_percent": 23.33,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.817371368408203,
+ "gpu_memory_used": 1100.0,
+ "relative_time": 116.29335045814514
+ },
+ {
+ "timestamp": "2025-01-03T17:36:40.055011",
+ "cpu_percent": 24.12,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.810909271240234,
+ "gpu_memory_used": 1100.0,
+ "relative_time": 117.36396074295044
+ },
+ {
+ "timestamp": "2025-01-03T17:36:41.117236",
+ "cpu_percent": 23.84,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.8153076171875,
+ "gpu_memory_used": 1100.0,
+ "relative_time": 118.4630720615387
+ },
+ {
+ "timestamp": "2025-01-03T17:36:42.216589",
+ "cpu_percent": 24.87,
+ "ram_percent": 45.4,
+ "ram_used_gb": 28.848403930664062,
+ "gpu_memory_used": 1100.0,
+ "relative_time": 119.52416563034058
+ },
+ {
+ "timestamp": "2025-01-03T17:36:43.281897",
+ "cpu_percent": 24.11,
+ "ram_percent": 45.4,
+ "ram_used_gb": 28.850421905517578,
+ "gpu_memory_used": 1100.0,
+ "relative_time": 120.61504173278809
+ },
+ {
+ "timestamp": "2025-01-03T17:36:44.374829",
+ "cpu_percent": 25.95,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.838001251220703,
+ "gpu_memory_used": 1100.0,
+ "relative_time": 121.72348999977112
+ },
+ {
+ "timestamp": "2025-01-03T17:36:45.479163",
+ "cpu_percent": 23.37,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.831417083740234,
+ "gpu_memory_used": 1100.0,
+ "relative_time": 122.79204487800598
+ },
+ {
+ "timestamp": "2025-01-03T17:36:46.542153",
+ "cpu_percent": 24.43,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.82573699951172,
+ "gpu_memory_used": 1100.0,
+ "relative_time": 123.8669421672821
+ },
+ {
+ "timestamp": "2025-01-03T17:36:47.630420",
+ "cpu_percent": 22.51,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.838607788085938,
+ "gpu_memory_used": 1100.0,
+ "relative_time": 124.96710133552551
+ },
+ {
+ "timestamp": "2025-01-03T17:36:48.720922",
+ "cpu_percent": 22.87,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.829120635986328,
+ "gpu_memory_used": 1100.0,
+ "relative_time": 126.04526543617249
+ },
+ {
+ "timestamp": "2025-01-03T17:36:49.795612",
+ "cpu_percent": 24.09,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.815223693847656,
+ "gpu_memory_used": 1100.0,
+ "relative_time": 127.1201822757721
+ },
+ {
+ "timestamp": "2025-01-03T17:36:50.869719",
+ "cpu_percent": 23.14,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.82253646850586,
+ "gpu_memory_used": 1099.0,
+ "relative_time": 128.17721223831177
+ },
+ {
+ "timestamp": "2025-01-03T17:36:51.936893",
+ "cpu_percent": 24.92,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.844318389892578,
+ "gpu_memory_used": 1098.0,
+ "relative_time": 129.2737376689911
+ },
+ {
+ "timestamp": "2025-01-03T17:36:53.025295",
+ "cpu_percent": 23.94,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.842689514160156,
+ "gpu_memory_used": 1098.0,
+ "relative_time": 130.35286784172058
+ },
+ {
+ "timestamp": "2025-01-03T17:36:54.105793",
+ "cpu_percent": 24.66,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.843029022216797,
+ "gpu_memory_used": 1094.0,
+ "relative_time": 131.4563491344452
+ },
+ {
+ "timestamp": "2025-01-03T17:36:55.210765",
+ "cpu_percent": 20.11,
+ "ram_percent": 45.4,
+ "ram_used_gb": 28.86923599243164,
+ "gpu_memory_used": 1101.0,
+ "relative_time": 132.53530836105347
+ },
+ {
+ "timestamp": "2025-01-03T17:36:56.288346",
+ "cpu_percent": 22.94,
+ "ram_percent": 45.4,
+ "ram_used_gb": 28.865219116210938,
+ "gpu_memory_used": 1101.0,
+ "relative_time": 133.6023223400116
+ },
+ {
+ "timestamp": "2025-01-03T17:36:57.350674",
+ "cpu_percent": 25.17,
+ "ram_percent": 45.4,
+ "ram_used_gb": 28.871688842773438,
+ "gpu_memory_used": 1101.0,
+ "relative_time": 134.66028475761414
+ },
+ {
+ "timestamp": "2025-01-03T17:36:58.410089",
+ "cpu_percent": 22.78,
+ "ram_percent": 45.4,
+ "ram_used_gb": 28.883277893066406,
+ "gpu_memory_used": 1101.0,
+ "relative_time": 135.721449136734
+ },
+ {
+ "timestamp": "2025-01-03T17:36:59.470322",
+ "cpu_percent": 25.26,
+ "ram_percent": 45.4,
+ "ram_used_gb": 28.87207794189453,
+ "gpu_memory_used": 1101.0,
+ "relative_time": 136.79168581962585
+ },
+ {
+ "timestamp": "2025-01-03T17:37:00.544907",
+ "cpu_percent": 28.76,
+ "ram_percent": 45.6,
+ "ram_used_gb": 28.998672485351562,
+ "gpu_memory_used": 1107.0,
+ "relative_time": 137.88635969161987
+ },
+ {
+ "timestamp": "2025-01-03T17:37:01.635176",
+ "cpu_percent": 24.18,
+ "ram_percent": 45.6,
+ "ram_used_gb": 29.019405364990234,
+ "gpu_memory_used": 1107.0,
+ "relative_time": 138.96897864341736
+ },
+ {
+ "timestamp": "2025-01-03T17:37:02.724041",
+ "cpu_percent": 29.28,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.05569076538086,
+ "gpu_memory_used": 1107.0,
+ "relative_time": 140.05017518997192
+ },
+ {
+ "timestamp": "2025-01-03T17:37:03.800711",
+ "cpu_percent": 23.59,
+ "ram_percent": 45.6,
+ "ram_used_gb": 29.012020111083984,
+ "gpu_memory_used": 1107.0,
+ "relative_time": 141.12687349319458
+ },
+ {
+ "timestamp": "2025-01-03T17:37:04.879064",
+ "cpu_percent": 23.78,
+ "ram_percent": 45.6,
+ "ram_used_gb": 28.976219177246094,
+ "gpu_memory_used": 1107.0,
+ "relative_time": 142.202130317688
+ },
+ {
+ "timestamp": "2025-01-03T17:37:05.956549",
+ "cpu_percent": 25.17,
+ "ram_percent": 45.6,
+ "ram_used_gb": 28.987720489501953,
+ "gpu_memory_used": 1107.0,
+ "relative_time": 143.2989046573639
+ },
+ {
+ "timestamp": "2025-01-03T17:37:07.048608",
+ "cpu_percent": 21.94,
+ "ram_percent": 45.6,
+ "ram_used_gb": 29.001422882080078,
+ "gpu_memory_used": 1107.0,
+ "relative_time": 144.38402938842773
+ },
+ {
+ "timestamp": "2025-01-03T17:37:08.145411",
+ "cpu_percent": 24.68,
+ "ram_percent": 45.6,
+ "ram_used_gb": 28.99661636352539,
+ "gpu_memory_used": 1107.0,
+ "relative_time": 145.45475459098816
+ },
+ {
+ "timestamp": "2025-01-03T17:37:09.205410",
+ "cpu_percent": 22.95,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.954647064208984,
+ "gpu_memory_used": 1107.0,
+ "relative_time": 146.52960181236267
+ },
+ {
+ "timestamp": "2025-01-03T17:37:10.282693",
+ "cpu_percent": 24.02,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.946456909179688,
+ "gpu_memory_used": 1107.0,
+ "relative_time": 147.60768365859985
+ },
+ {
+ "timestamp": "2025-01-03T17:37:11.358777",
+ "cpu_percent": 25.61,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.94234848022461,
+ "gpu_memory_used": 1107.0,
+ "relative_time": 148.6980345249176
+ },
+ {
+ "timestamp": "2025-01-03T17:37:12.448461",
+ "cpu_percent": 23.65,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.95345687866211,
+ "gpu_memory_used": 1107.0,
+ "relative_time": 149.7676465511322
+ },
+ {
+ "timestamp": "2025-01-03T17:37:13.523095",
+ "cpu_percent": 20.96,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.952884674072266,
+ "gpu_memory_used": 1105.0,
+ "relative_time": 150.85523986816406
+ },
+ {
+ "timestamp": "2025-01-03T17:37:14.616165",
+ "cpu_percent": 22.93,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.9246826171875,
+ "gpu_memory_used": 1105.0,
+ "relative_time": 151.93826293945312
+ },
+ {
+ "timestamp": "2025-01-03T17:37:15.692716",
+ "cpu_percent": 23.36,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.923118591308594,
+ "gpu_memory_used": 1105.0,
+ "relative_time": 153.02452492713928
+ },
+ {
+ "timestamp": "2025-01-03T17:37:16.786480",
+ "cpu_percent": 24.34,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.922576904296875,
+ "gpu_memory_used": 1101.0,
+ "relative_time": 154.13069224357605
+ },
+ {
+ "timestamp": "2025-01-03T17:37:17.893401",
+ "cpu_percent": 23.93,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.929943084716797,
+ "gpu_memory_used": 1101.0,
+ "relative_time": 155.20411658287048
+ },
+ {
+ "timestamp": "2025-01-03T17:37:18.957325",
+ "cpu_percent": 22.57,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.92938995361328,
+ "gpu_memory_used": 1101.0,
+ "relative_time": 156.2805426120758
+ },
+ {
+ "timestamp": "2025-01-03T17:37:20.032870",
+ "cpu_percent": 24.31,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.91608428955078,
+ "gpu_memory_used": 1101.0,
+ "relative_time": 157.3575358390808
+ },
+ {
+ "timestamp": "2025-01-03T17:37:21.107354",
+ "cpu_percent": 23.97,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.927276611328125,
+ "gpu_memory_used": 1101.0,
+ "relative_time": 158.43159866333008
+ },
+ {
+ "timestamp": "2025-01-03T17:37:22.182666",
+ "cpu_percent": 22.99,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.96563720703125,
+ "gpu_memory_used": 1101.0,
+ "relative_time": 159.51421451568604
+ },
+ {
+ "timestamp": "2025-01-03T17:37:23.274242",
+ "cpu_percent": 23.44,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.972015380859375,
+ "gpu_memory_used": 1097.0,
+ "relative_time": 160.60975646972656
+ },
+ {
+ "timestamp": "2025-01-03T17:37:24.372135",
+ "cpu_percent": 22.5,
+ "ram_percent": 45.6,
+ "ram_used_gb": 28.979278564453125,
+ "gpu_memory_used": 1097.0,
+ "relative_time": 161.72465538978577
+ },
+ {
+ "timestamp": "2025-01-03T17:37:25.479851",
+ "cpu_percent": 23.89,
+ "ram_percent": 45.6,
+ "ram_used_gb": 28.982975006103516,
+ "gpu_memory_used": 1097.0,
+ "relative_time": 162.80453157424927
+ },
+ {
+ "timestamp": "2025-01-03T17:37:26.557456",
+ "cpu_percent": 23.33,
+ "ram_percent": 45.6,
+ "ram_used_gb": 28.983108520507812,
+ "gpu_memory_used": 1097.0,
+ "relative_time": 163.90639925003052
+ },
+ {
+ "timestamp": "2025-01-03T17:37:27.662359",
+ "cpu_percent": 23.73,
+ "ram_percent": 45.6,
+ "ram_used_gb": 28.9891357421875,
+ "gpu_memory_used": 1097.0,
+ "relative_time": 164.97102570533752
+ },
+ {
+ "timestamp": "2025-01-03T17:37:28.729649",
+ "cpu_percent": 24.86,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.938156127929688,
+ "gpu_memory_used": 1097.0,
+ "relative_time": 166.06650972366333
+ },
+ {
+ "timestamp": "2025-01-03T17:37:29.821140",
+ "cpu_percent": 24.66,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.924610137939453,
+ "gpu_memory_used": 1097.0,
+ "relative_time": 167.14614033699036
+ },
+ {
+ "timestamp": "2025-01-03T17:37:30.899520",
+ "cpu_percent": 24.38,
+ "ram_percent": 45.4,
+ "ram_used_gb": 28.88815689086914,
+ "gpu_memory_used": 1097.0,
+ "relative_time": 168.21083331108093
+ },
+ {
+ "timestamp": "2025-01-03T17:37:31.971936",
+ "cpu_percent": 21.86,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.91744613647461,
+ "gpu_memory_used": 1097.0,
+ "relative_time": 169.29783082008362
+ },
+ {
+ "timestamp": "2025-01-03T17:37:33.046151",
+ "cpu_percent": 24.37,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.91482925415039,
+ "gpu_memory_used": 1097.0,
+ "relative_time": 170.36634373664856
+ },
+ {
+ "timestamp": "2025-01-03T17:37:34.127110",
+ "cpu_percent": 25.14,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.912853240966797,
+ "gpu_memory_used": 1097.0,
+ "relative_time": 171.45116710662842
+ },
+ {
+ "timestamp": "2025-01-03T17:37:35.202044",
+ "cpu_percent": 24.15,
+ "ram_percent": 45.4,
+ "ram_used_gb": 28.891281127929688,
+ "gpu_memory_used": 1097.0,
+ "relative_time": 172.50907135009766
+ },
+ {
+ "timestamp": "2025-01-03T17:37:36.259605",
+ "cpu_percent": 22.94,
+ "ram_percent": 45.4,
+ "ram_used_gb": 28.900623321533203,
+ "gpu_memory_used": 1097.0,
+ "relative_time": 173.5696086883545
+ },
+ {
+ "timestamp": "2025-01-03T17:37:37.322695",
+ "cpu_percent": 23.68,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.914325714111328,
+ "gpu_memory_used": 1097.0,
+ "relative_time": 174.63046431541443
+ },
+ {
+ "timestamp": "2025-01-03T17:37:38.384439",
+ "cpu_percent": 24.78,
+ "ram_percent": 45.4,
+ "ram_used_gb": 28.909072875976562,
+ "gpu_memory_used": 1097.0,
+ "relative_time": 175.72250294685364
+ },
+ {
+ "timestamp": "2025-01-03T17:37:39.474721",
+ "cpu_percent": 23.01,
+ "ram_percent": 45.4,
+ "ram_used_gb": 28.891307830810547,
+ "gpu_memory_used": 1097.0,
+ "relative_time": 176.81701827049255
+ },
+ {
+ "timestamp": "2025-01-03T17:37:40.579589",
+ "cpu_percent": 23.94,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.943130493164062,
+ "gpu_memory_used": 1097.0,
+ "relative_time": 177.92131423950195
+ },
+ {
+ "timestamp": "2025-01-03T17:37:41.683635",
+ "cpu_percent": 24.74,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.932857513427734,
+ "gpu_memory_used": 1096.0,
+ "relative_time": 179.0098111629486
+ },
+ {
+ "timestamp": "2025-01-03T17:37:42.770033",
+ "cpu_percent": 21.96,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.069801330566406,
+ "gpu_memory_used": 1096.0,
+ "relative_time": 180.11816310882568
+ },
+ {
+ "timestamp": "2025-01-03T17:37:43.875555",
+ "cpu_percent": 24.46,
+ "ram_percent": 45.9,
+ "ram_used_gb": 29.216472625732422,
+ "gpu_memory_used": 1096.0,
+ "relative_time": 181.22085428237915
+ },
+ {
+ "timestamp": "2025-01-03T17:37:44.976559",
+ "cpu_percent": 23.76,
+ "ram_percent": 45.6,
+ "ram_used_gb": 28.995086669921875,
+ "gpu_memory_used": 1096.0,
+ "relative_time": 182.30824899673462
+ },
+ {
+ "timestamp": "2025-01-03T17:37:46.064782",
+ "cpu_percent": 22.54,
+ "ram_percent": 45.8,
+ "ram_used_gb": 29.127277374267578,
+ "gpu_memory_used": 1096.0,
+ "relative_time": 183.41359210014343
+ },
+ {
+ "timestamp": "2025-01-03T17:37:47.166282",
+ "cpu_percent": 23.88,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.938556671142578,
+ "gpu_memory_used": 1096.0,
+ "relative_time": 184.49055337905884
+ },
+ {
+ "timestamp": "2025-01-03T17:37:48.239146",
+ "cpu_percent": 25.4,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.928009033203125,
+ "gpu_memory_used": 1096.0,
+ "relative_time": 185.56559538841248
+ },
+ {
+ "timestamp": "2025-01-03T17:37:49.315823",
+ "cpu_percent": 23.33,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.938018798828125,
+ "gpu_memory_used": 1096.0,
+ "relative_time": 186.64829564094543
+ },
+ {
+ "timestamp": "2025-01-03T17:37:50.408592",
+ "cpu_percent": 23.69,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.91741180419922,
+ "gpu_memory_used": 1096.0,
+ "relative_time": 187.7367081642151
+ },
+ {
+ "timestamp": "2025-01-03T17:37:51.499880",
+ "cpu_percent": 26.88,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.922603607177734,
+ "gpu_memory_used": 1096.0,
+ "relative_time": 188.8240191936493
+ },
+ {
+ "timestamp": "2025-01-03T17:37:52.586316",
+ "cpu_percent": 22.99,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.949260711669922,
+ "gpu_memory_used": 1096.0,
+ "relative_time": 189.92143487930298
+ },
+ {
+ "timestamp": "2025-01-03T17:37:53.672626",
+ "cpu_percent": 23.39,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.948657989501953,
+ "gpu_memory_used": 1096.0,
+ "relative_time": 190.984858751297
+ },
+ {
+ "timestamp": "2025-01-03T17:37:54.746634",
+ "cpu_percent": 24.11,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.925277709960938,
+ "gpu_memory_used": 1096.0,
+ "relative_time": 192.0710301399231
+ },
+ {
+ "timestamp": "2025-01-03T17:37:55.820140",
+ "cpu_percent": 22.63,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.933975219726562,
+ "gpu_memory_used": 1096.0,
+ "relative_time": 193.14398980140686
+ },
+ {
+ "timestamp": "2025-01-03T17:37:56.898580",
+ "cpu_percent": 23.97,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.95156478881836,
+ "gpu_memory_used": 1096.0,
+ "relative_time": 194.23374843597412
+ },
+ {
+ "timestamp": "2025-01-03T17:37:57.988946",
+ "cpu_percent": 23.97,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.946693420410156,
+ "gpu_memory_used": 1096.0,
+ "relative_time": 195.30232334136963
+ },
+ {
+ "timestamp": "2025-01-03T17:37:59.064831",
+ "cpu_percent": 24.23,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.944950103759766,
+ "gpu_memory_used": 1096.0,
+ "relative_time": 196.38868021965027
+ },
+ {
+ "timestamp": "2025-01-03T17:38:00.139407",
+ "cpu_percent": 25.11,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.923294067382812,
+ "gpu_memory_used": 1096.0,
+ "relative_time": 197.46892762184143
+ },
+ {
+ "timestamp": "2025-01-03T17:38:01.231615",
+ "cpu_percent": 23.29,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.938377380371094,
+ "gpu_memory_used": 1096.0,
+ "relative_time": 198.55928492546082
+ },
+ {
+ "timestamp": "2025-01-03T17:38:02.318916",
+ "cpu_percent": 21.24,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.966472625732422,
+ "gpu_memory_used": 1096.0,
+ "relative_time": 199.64228463172913
+ },
+ {
+ "timestamp": "2025-01-03T17:38:03.400606",
+ "cpu_percent": 24.24,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.959705352783203,
+ "gpu_memory_used": 1096.0,
+ "relative_time": 200.7337417602539
+ },
+ {
+ "timestamp": "2025-01-03T17:38:04.488903",
+ "cpu_percent": 24.32,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.94487762451172,
+ "gpu_memory_used": 1096.0,
+ "relative_time": 201.82986760139465
+ },
+ {
+ "timestamp": "2025-01-03T17:38:05.584754",
+ "cpu_percent": 19.91,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.944644927978516,
+ "gpu_memory_used": 1096.0,
+ "relative_time": 202.91072010993958
+ },
+ {
+ "timestamp": "2025-01-03T17:38:06.665108",
+ "cpu_percent": 24.06,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.949317932128906,
+ "gpu_memory_used": 1096.0,
+ "relative_time": 203.9763433933258
+ },
+ {
+ "timestamp": "2025-01-03T17:38:07.738542",
+ "cpu_percent": 23.04,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.956283569335938,
+ "gpu_memory_used": 1096.0,
+ "relative_time": 205.04700684547424
+ },
+ {
+ "timestamp": "2025-01-03T17:38:08.799958",
+ "cpu_percent": 22.69,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.951473236083984,
+ "gpu_memory_used": 1096.0,
+ "relative_time": 206.12592482566833
+ },
+ {
+ "timestamp": "2025-01-03T17:38:09.886260",
+ "cpu_percent": 25.02,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.939456939697266,
+ "gpu_memory_used": 1096.0,
+ "relative_time": 207.19513487815857
+ },
+ {
+ "timestamp": "2025-01-03T17:38:10.951074",
+ "cpu_percent": 24.25,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.949993133544922,
+ "gpu_memory_used": 1096.0,
+ "relative_time": 208.263596534729
+ },
+ {
+ "timestamp": "2025-01-03T17:38:12.021461",
+ "cpu_percent": 23.19,
+ "ram_percent": 45.6,
+ "ram_used_gb": 28.983253479003906,
+ "gpu_memory_used": 1096.0,
+ "relative_time": 209.3303987979889
+ },
+ {
+ "timestamp": "2025-01-03T17:38:13.082401",
+ "cpu_percent": 24.01,
+ "ram_percent": 45.6,
+ "ram_used_gb": 28.974666595458984,
+ "gpu_memory_used": 1096.0,
+ "relative_time": 210.41010332107544
+ },
+ {
+ "timestamp": "2025-01-03T17:38:14.161362",
+ "cpu_percent": 23.13,
+ "ram_percent": 45.6,
+ "ram_used_gb": 28.989315032958984,
+ "gpu_memory_used": 1096.0,
+ "relative_time": 211.47117710113525
+ },
+ {
+ "timestamp": "2025-01-03T17:38:15.222297",
+ "cpu_percent": 23.49,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.96143341064453,
+ "gpu_memory_used": 1096.0,
+ "relative_time": 212.53540086746216
+ },
+ {
+ "timestamp": "2025-01-03T17:38:16.298851",
+ "cpu_percent": 22.21,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.964900970458984,
+ "gpu_memory_used": 1096.0,
+ "relative_time": 213.62320566177368
+ },
+ {
+ "timestamp": "2025-01-03T17:38:17.374842",
+ "cpu_percent": 25.44,
+ "ram_percent": 45.6,
+ "ram_used_gb": 28.97797393798828,
+ "gpu_memory_used": 1096.0,
+ "relative_time": 214.69756841659546
+ },
+ {
+ "timestamp": "2025-01-03T17:38:18.449249",
+ "cpu_percent": 23.43,
+ "ram_percent": 45.6,
+ "ram_used_gb": 28.979202270507812,
+ "gpu_memory_used": 1096.0,
+ "relative_time": 215.78058910369873
+ },
+ {
+ "timestamp": "2025-01-03T17:38:19.542583",
+ "cpu_percent": 24.46,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.965267181396484,
+ "gpu_memory_used": 1096.0,
+ "relative_time": 216.87048959732056
+ },
+ {
+ "timestamp": "2025-01-03T17:38:20.619429",
+ "cpu_percent": 20.75,
+ "ram_percent": 45.6,
+ "ram_used_gb": 29.00174331665039,
+ "gpu_memory_used": 1106.0,
+ "relative_time": 217.92598581314087
+ },
+ {
+ "timestamp": "2025-01-03T17:38:21.680941",
+ "cpu_percent": 22.77,
+ "ram_percent": 45.6,
+ "ram_used_gb": 29.00212860107422,
+ "gpu_memory_used": 1106.0,
+ "relative_time": 219.00471091270447
+ },
+ {
+ "timestamp": "2025-01-03T17:38:22.755945",
+ "cpu_percent": 22.64,
+ "ram_percent": 45.6,
+ "ram_used_gb": 29.018749237060547,
+ "gpu_memory_used": 1106.0,
+ "relative_time": 220.07630491256714
+ },
+ {
+ "timestamp": "2025-01-03T17:38:23.829578",
+ "cpu_percent": 23.32,
+ "ram_percent": 45.6,
+ "ram_used_gb": 29.031707763671875,
+ "gpu_memory_used": 1106.0,
+ "relative_time": 221.14346361160278
+ },
+ {
+ "timestamp": "2025-01-03T17:38:24.906788",
+ "cpu_percent": 24.83,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.07465362548828,
+ "gpu_memory_used": 1106.0,
+ "relative_time": 222.22201228141785
+ },
+ {
+ "timestamp": "2025-01-03T17:38:25.983092",
+ "cpu_percent": 24.31,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.073284149169922,
+ "gpu_memory_used": 1106.0,
+ "relative_time": 223.32304573059082
+ },
+ {
+ "timestamp": "2025-01-03T17:38:27.086695",
+ "cpu_percent": 23.53,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.08266830444336,
+ "gpu_memory_used": 1105.0,
+ "relative_time": 224.39583778381348
+ },
+ {
+ "timestamp": "2025-01-03T17:38:28.148619",
+ "cpu_percent": 21.91,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.090988159179688,
+ "gpu_memory_used": 1102.0,
+ "relative_time": 225.47977566719055
+ },
+ {
+ "timestamp": "2025-01-03T17:38:29.229406",
+ "cpu_percent": 21.76,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.09268569946289,
+ "gpu_memory_used": 1102.0,
+ "relative_time": 226.55404257774353
+ },
+ {
+ "timestamp": "2025-01-03T17:38:30.302956",
+ "cpu_percent": 24.08,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.077293395996094,
+ "gpu_memory_used": 1102.0,
+ "relative_time": 227.62869882583618
+ },
+ {
+ "timestamp": "2025-01-03T17:38:31.381882",
+ "cpu_percent": 24.35,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.086669921875,
+ "gpu_memory_used": 1102.0,
+ "relative_time": 228.69078159332275
+ },
+ {
+ "timestamp": "2025-01-03T17:38:32.445804",
+ "cpu_percent": 20.03,
+ "ram_percent": 45.8,
+ "ram_used_gb": 29.113361358642578,
+ "gpu_memory_used": 1102.0,
+ "relative_time": 229.77253794670105
+ },
+ {
+ "timestamp": "2025-01-03T17:38:33.522412",
+ "cpu_percent": 23.34,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.09762954711914,
+ "gpu_memory_used": 1102.0,
+ "relative_time": 230.85764026641846
+ },
+ {
+ "timestamp": "2025-01-03T17:38:34.616552",
+ "cpu_percent": 23.3,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.079090118408203,
+ "gpu_memory_used": 1102.0,
+ "relative_time": 231.92474102973938
+ },
+ {
+ "timestamp": "2025-01-03T17:38:35.678749",
+ "cpu_percent": 23.45,
+ "ram_percent": 45.6,
+ "ram_used_gb": 29.03809356689453,
+ "gpu_memory_used": 1102.0,
+ "relative_time": 232.9860770702362
+ },
+ {
+ "timestamp": "2025-01-03T17:38:36.741777",
+ "cpu_percent": 22.69,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.048213958740234,
+ "gpu_memory_used": 1102.0,
+ "relative_time": 234.0679018497467
+ },
+ {
+ "timestamp": "2025-01-03T17:38:37.829685",
+ "cpu_percent": 22.24,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.05181121826172,
+ "gpu_memory_used": 1102.0,
+ "relative_time": 235.15741419792175
+ },
+ {
+ "timestamp": "2025-01-03T17:38:38.913093",
+ "cpu_percent": 22.38,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.053848266601562,
+ "gpu_memory_used": 1102.0,
+ "relative_time": 236.23899698257446
+ },
+ {
+ "timestamp": "2025-01-03T17:38:39.990580",
+ "cpu_percent": 23.18,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.047801971435547,
+ "gpu_memory_used": 1102.0,
+ "relative_time": 237.29979014396667
+ },
+ {
+ "timestamp": "2025-01-03T17:38:41.050376",
+ "cpu_percent": 23.39,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.04970932006836,
+ "gpu_memory_used": 1102.0,
+ "relative_time": 238.3743212223053
+ },
+ {
+ "timestamp": "2025-01-03T17:38:42.124373",
+ "cpu_percent": 22.72,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.08245849609375,
+ "gpu_memory_used": 1098.0,
+ "relative_time": 239.45003628730774
+ },
+ {
+ "timestamp": "2025-01-03T17:38:43.199649",
+ "cpu_percent": 25.66,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.084999084472656,
+ "gpu_memory_used": 1098.0,
+ "relative_time": 240.50825381278992
+ },
+ {
+ "timestamp": "2025-01-03T17:38:44.260682",
+ "cpu_percent": 23.85,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.082542419433594,
+ "gpu_memory_used": 1094.0,
+ "relative_time": 241.58613228797913
+ },
+ {
+ "timestamp": "2025-01-03T17:38:45.334296",
+ "cpu_percent": 23.69,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.070350646972656,
+ "gpu_memory_used": 1094.0,
+ "relative_time": 242.6601538658142
+ },
+ {
+ "timestamp": "2025-01-03T17:38:46.411938",
+ "cpu_percent": 23.01,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.07455062866211,
+ "gpu_memory_used": 1094.0,
+ "relative_time": 243.74639248847961
+ },
+ {
+ "timestamp": "2025-01-03T17:38:47.504134",
+ "cpu_percent": 21.79,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.08453369140625,
+ "gpu_memory_used": 1094.0,
+ "relative_time": 244.81757807731628
+ },
+ {
+ "timestamp": "2025-01-03T17:38:48.577966",
+ "cpu_percent": 26.51,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.07624053955078,
+ "gpu_memory_used": 1094.0,
+ "relative_time": 245.90141248703003
+ },
+ {
+ "timestamp": "2025-01-03T17:38:49.654082",
+ "cpu_percent": 24.57,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.06304931640625,
+ "gpu_memory_used": 1094.0,
+ "relative_time": 246.97977018356323
+ },
+ {
+ "timestamp": "2025-01-03T17:38:50.733158",
+ "cpu_percent": 25.44,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.063785552978516,
+ "gpu_memory_used": 1094.0,
+ "relative_time": 248.0419783592224
+ },
+ {
+ "timestamp": "2025-01-03T17:38:51.795154",
+ "cpu_percent": 24.09,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.070831298828125,
+ "gpu_memory_used": 1094.0,
+ "relative_time": 249.1282820701599
+ },
+ {
+ "timestamp": "2025-01-03T17:38:52.885541",
+ "cpu_percent": 24.81,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.097518920898438,
+ "gpu_memory_used": 1094.0,
+ "relative_time": 250.21453881263733
+ },
+ {
+ "timestamp": "2025-01-03T17:38:53.967110",
+ "cpu_percent": 23.51,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.092403411865234,
+ "gpu_memory_used": 1094.0,
+ "relative_time": 251.29131293296814
+ },
+ {
+ "timestamp": "2025-01-03T17:38:55.053817",
+ "cpu_percent": 25.5,
+ "ram_percent": 45.6,
+ "ram_used_gb": 29.026763916015625,
+ "gpu_memory_used": 1094.0,
+ "relative_time": 252.38121151924133
+ },
+ {
+ "timestamp": "2025-01-03T17:38:56.130310",
+ "cpu_percent": 23.02,
+ "ram_percent": 45.6,
+ "ram_used_gb": 29.029300689697266,
+ "gpu_memory_used": 1094.0,
+ "relative_time": 253.46182870864868
+ },
+ {
+ "timestamp": "2025-01-03T17:38:57.222446",
+ "cpu_percent": 23.57,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.048370361328125,
+ "gpu_memory_used": 1094.0,
+ "relative_time": 254.5469455718994
+ },
+ {
+ "timestamp": "2025-01-03T17:38:58.302749",
+ "cpu_percent": 23.36,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.04967498779297,
+ "gpu_memory_used": 1094.0,
+ "relative_time": 255.6269781589508
+ },
+ {
+ "timestamp": "2025-01-03T17:38:59.390900",
+ "cpu_percent": 23.02,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.05054473876953,
+ "gpu_memory_used": 1094.0,
+ "relative_time": 256.73245453834534
+ },
+ {
+ "timestamp": "2025-01-03T17:39:00.484435",
+ "cpu_percent": 27.29,
+ "ram_percent": 45.6,
+ "ram_used_gb": 29.031696319580078,
+ "gpu_memory_used": 1094.0,
+ "relative_time": 257.81579089164734
+ },
+ {
+ "timestamp": "2025-01-03T17:39:01.571625",
+ "cpu_percent": 23.57,
+ "ram_percent": 45.6,
+ "ram_used_gb": 29.025615692138672,
+ "gpu_memory_used": 1094.0,
+ "relative_time": 258.8843922615051
+ },
+ {
+ "timestamp": "2025-01-03T17:39:02.647939",
+ "cpu_percent": 22.86,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.05437469482422,
+ "gpu_memory_used": 1094.0,
+ "relative_time": 259.9595341682434
+ },
+ {
+ "timestamp": "2025-01-03T17:39:03.714104",
+ "cpu_percent": 23.78,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.049884796142578,
+ "gpu_memory_used": 1094.0,
+ "relative_time": 261.0387761592865
+ },
+ {
+ "timestamp": "2025-01-03T17:39:04.790962",
+ "cpu_percent": 23.94,
+ "ram_percent": 45.6,
+ "ram_used_gb": 29.026226043701172,
+ "gpu_memory_used": 1094.0,
+ "relative_time": 262.1065058708191
+ },
+ {
+ "timestamp": "2025-01-03T17:39:05.865171",
+ "cpu_percent": 23.71,
+ "ram_percent": 45.6,
+ "ram_used_gb": 29.03447723388672,
+ "gpu_memory_used": 1094.0,
+ "relative_time": 263.17548394203186
+ },
+ {
+ "timestamp": "2025-01-03T17:39:06.925070",
+ "cpu_percent": 22.13,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.03990936279297,
+ "gpu_memory_used": 1094.0,
+ "relative_time": 264.2537627220154
+ },
+ {
+ "timestamp": "2025-01-03T17:39:08.016047",
+ "cpu_percent": 24.59,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.048423767089844,
+ "gpu_memory_used": 1094.0,
+ "relative_time": 265.3426134586334
+ },
+ {
+ "timestamp": "2025-01-03T17:39:09.093191",
+ "cpu_percent": 24.42,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.047550201416016,
+ "gpu_memory_used": 1094.0,
+ "relative_time": 266.4227204322815
+ },
+ {
+ "timestamp": "2025-01-03T17:39:10.181954",
+ "cpu_percent": 25.16,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.04483413696289,
+ "gpu_memory_used": 1094.0,
+ "relative_time": 267.5157723426819
+ },
+ {
+ "timestamp": "2025-01-03T17:39:11.274938",
+ "cpu_percent": 23.93,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.05465316772461,
+ "gpu_memory_used": 1094.0,
+ "relative_time": 268.588228225708
+ },
+ {
+ "timestamp": "2025-01-03T17:39:12.348453",
+ "cpu_percent": 23.18,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.08385467529297,
+ "gpu_memory_used": 1094.0,
+ "relative_time": 269.6606616973877
+ },
+ {
+ "timestamp": "2025-01-03T17:39:13.409071",
+ "cpu_percent": 24.29,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.074764251708984,
+ "gpu_memory_used": 1094.0,
+ "relative_time": 270.734317779541
+ },
+ {
+ "timestamp": "2025-01-03T17:39:14.486654",
+ "cpu_percent": 23.16,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.066978454589844,
+ "gpu_memory_used": 1094.0,
+ "relative_time": 271.8204891681671
+ },
+ {
+ "timestamp": "2025-01-03T17:39:15.575060",
+ "cpu_percent": 24.34,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.046417236328125,
+ "gpu_memory_used": 1094.0,
+ "relative_time": 272.9057719707489
+ },
+ {
+ "timestamp": "2025-01-03T17:39:16.666232",
+ "cpu_percent": 24.03,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.045166015625,
+ "gpu_memory_used": 1099.0,
+ "relative_time": 273.9924840927124
+ },
+ {
+ "timestamp": "2025-01-03T17:39:17.755564",
+ "cpu_percent": 23.2,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.060283660888672,
+ "gpu_memory_used": 1099.0,
+ "relative_time": 275.0789313316345
+ },
+ {
+ "timestamp": "2025-01-03T17:39:18.830783",
+ "cpu_percent": 23.06,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.053966522216797,
+ "gpu_memory_used": 1099.0,
+ "relative_time": 276.15661883354187
+ },
+ {
+ "timestamp": "2025-01-03T17:39:19.920912",
+ "cpu_percent": 25.16,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.053394317626953,
+ "gpu_memory_used": 1103.0,
+ "relative_time": 277.2462124824524
+ },
+ {
+ "timestamp": "2025-01-03T17:39:21.001466",
+ "cpu_percent": 27.46,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.053443908691406,
+ "gpu_memory_used": 1105.0,
+ "relative_time": 278.33917236328125
+ },
+ {
+ "timestamp": "2025-01-03T17:39:22.096283",
+ "cpu_percent": 25.03,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.078914642333984,
+ "gpu_memory_used": 1106.0,
+ "relative_time": 279.46978664398193
+ },
+ {
+ "timestamp": "2025-01-03T17:39:23.218194",
+ "cpu_percent": 23.54,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.093307495117188,
+ "gpu_memory_used": 1106.0,
+ "relative_time": 280.5258755683899
+ },
+ {
+ "timestamp": "2025-01-03T17:39:24.281791",
+ "cpu_percent": 24.16,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.094207763671875,
+ "gpu_memory_used": 1106.0,
+ "relative_time": 281.63647842407227
+ },
+ {
+ "timestamp": "2025-01-03T17:39:25.388563",
+ "cpu_percent": 26.11,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.082473754882812,
+ "gpu_memory_used": 1106.0,
+ "relative_time": 282.72587037086487
+ },
+ {
+ "timestamp": "2025-01-03T17:39:26.478973",
+ "cpu_percent": 27.64,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.100139617919922,
+ "gpu_memory_used": 1114.0,
+ "relative_time": 283.8215317726135
+ },
+ {
+ "timestamp": "2025-01-03T17:39:27.583645",
+ "cpu_percent": 24.39,
+ "ram_percent": 45.8,
+ "ram_used_gb": 29.103557586669922,
+ "gpu_memory_used": 1114.0,
+ "relative_time": 284.9139611721039
+ },
+ {
+ "timestamp": "2025-01-03T17:39:28.671523",
+ "cpu_percent": 26.52,
+ "ram_percent": 45.8,
+ "ram_used_gb": 29.135478973388672,
+ "gpu_memory_used": 1114.0,
+ "relative_time": 286.01276302337646
+ },
+ {
+ "timestamp": "2025-01-03T17:39:29.771573",
+ "cpu_percent": 37.88,
+ "ram_percent": 45.9,
+ "ram_used_gb": 29.20437240600586,
+ "gpu_memory_used": 1114.0,
+ "relative_time": 287.1181216239929
+ },
+ {
+ "timestamp": "2025-01-03T17:39:30.869733",
+ "cpu_percent": 51.38,
+ "ram_percent": 45.8,
+ "ram_used_gb": 29.151920318603516,
+ "gpu_memory_used": 1114.0,
+ "relative_time": 288.20085978507996
+ },
+ {
+ "timestamp": "2025-01-03T17:39:31.957778",
+ "cpu_percent": 31.69,
+ "ram_percent": 45.9,
+ "ram_used_gb": 29.169265747070312,
+ "gpu_memory_used": 1111.0,
+ "relative_time": 289.32613611221313
+ },
+ {
+ "timestamp": "2025-01-03T17:39:33.087546",
+ "cpu_percent": 35.71,
+ "ram_percent": 45.9,
+ "ram_used_gb": 29.177200317382812,
+ "gpu_memory_used": 1109.0,
+ "relative_time": 290.4660506248474
+ },
+ {
+ "timestamp": "2025-01-03T17:39:34.225021",
+ "cpu_percent": 41.35,
+ "ram_percent": 46.1,
+ "ram_used_gb": 29.297359466552734,
+ "gpu_memory_used": 1109.0,
+ "relative_time": 291.5882089138031
+ },
+ {
+ "timestamp": "2025-01-03T17:39:35.340627",
+ "cpu_percent": 53.14,
+ "ram_percent": 45.9,
+ "ram_used_gb": 29.174144744873047,
+ "gpu_memory_used": 1109.0,
+ "relative_time": 292.7014124393463
+ },
+ {
+ "timestamp": "2025-01-03T17:39:36.450806",
+ "cpu_percent": 42.99,
+ "ram_percent": 46.0,
+ "ram_used_gb": 29.263404846191406,
+ "gpu_memory_used": 1110.0,
+ "relative_time": 293.8717691898346
+ },
+ {
+ "timestamp": "2025-01-03T17:39:37.626229",
+ "cpu_percent": 47.2,
+ "ram_percent": 46.0,
+ "ram_used_gb": 29.242015838623047,
+ "gpu_memory_used": 1110.0,
+ "relative_time": 294.98959398269653
+ },
+ {
+ "timestamp": "2025-01-03T17:39:38.744021",
+ "cpu_percent": 63.88,
+ "ram_percent": 46.2,
+ "ram_used_gb": 29.360958099365234,
+ "gpu_memory_used": 1110.0,
+ "relative_time": 296.1400308609009
+ },
+ {
+ "timestamp": "2025-01-03T17:39:39.894556",
+ "cpu_percent": 63.27,
+ "ram_percent": 45.9,
+ "ram_used_gb": 29.206928253173828,
+ "gpu_memory_used": 1110.0,
+ "relative_time": 297.2615807056427
+ },
+ {
+ "timestamp": "2025-01-03T17:39:41.013357",
+ "cpu_percent": 50.07,
+ "ram_percent": 46.0,
+ "ram_used_gb": 29.283645629882812,
+ "gpu_memory_used": 1110.0,
+ "relative_time": 298.38854813575745
+ },
+ {
+ "timestamp": "2025-01-03T17:39:42.147807",
+ "cpu_percent": 46.93,
+ "ram_percent": 46.0,
+ "ram_used_gb": 29.241199493408203,
+ "gpu_memory_used": 1110.0,
+ "relative_time": 299.50307989120483
+ },
+ {
+ "timestamp": "2025-01-03T17:39:43.258353",
+ "cpu_percent": 29.54,
+ "ram_percent": 46.0,
+ "ram_used_gb": 29.283145904541016,
+ "gpu_memory_used": 1110.0,
+ "relative_time": 300.59265756607056
+ },
+ {
+ "timestamp": "2025-01-03T17:39:44.348698",
+ "cpu_percent": 32.59,
+ "ram_percent": 46.0,
+ "ram_used_gb": 29.279296875,
+ "gpu_memory_used": 1108.0,
+ "relative_time": 301.70956802368164
+ },
+ {
+ "timestamp": "2025-01-03T17:39:45.462748",
+ "cpu_percent": 34.87,
+ "ram_percent": 46.0,
+ "ram_used_gb": 29.259193420410156,
+ "gpu_memory_used": 1109.0,
+ "relative_time": 302.79254055023193
+ },
+ {
+ "timestamp": "2025-01-03T17:39:46.547817",
+ "cpu_percent": 32.98,
+ "ram_percent": 46.0,
+ "ram_used_gb": 29.246997833251953,
+ "gpu_memory_used": 1109.0,
+ "relative_time": 303.8971025943756
+ },
+ {
+ "timestamp": "2025-01-03T17:39:47.658725",
+ "cpu_percent": 40.88,
+ "ram_percent": 46.0,
+ "ram_used_gb": 29.269287109375,
+ "gpu_memory_used": 1108.0,
+ "relative_time": 305.0029399394989
+ },
+ {
+ "timestamp": "2025-01-03T17:39:48.760123",
+ "cpu_percent": 43.58,
+ "ram_percent": 46.3,
+ "ram_used_gb": 29.445144653320312,
+ "gpu_memory_used": 1105.0,
+ "relative_time": 306.1181561946869
+ },
+ {
+ "timestamp": "2025-01-03T17:39:49.877225",
+ "cpu_percent": 33.54,
+ "ram_percent": 46.0,
+ "ram_used_gb": 29.264232635498047,
+ "gpu_memory_used": 1105.0,
+ "relative_time": 307.2013146877289
+ },
+ {
+ "timestamp": "2025-01-03T17:39:50.952699",
+ "cpu_percent": 38.25,
+ "ram_percent": 46.2,
+ "ram_used_gb": 29.358203887939453,
+ "gpu_memory_used": 1113.0,
+ "relative_time": 308.3161952495575
+ },
+ {
+ "timestamp": "2025-01-03T17:39:52.074361",
+ "cpu_percent": 28.14,
+ "ram_percent": 46.3,
+ "ram_used_gb": 29.476333618164062,
+ "gpu_memory_used": 1111.0,
+ "relative_time": 309.4449317455292
+ },
+ {
+ "timestamp": "2025-01-03T17:39:53.198618",
+ "cpu_percent": 40.66,
+ "ram_percent": 46.3,
+ "ram_used_gb": 29.434783935546875,
+ "gpu_memory_used": 1117.0,
+ "relative_time": 310.5291397571564
+ },
+ {
+ "timestamp": "2025-01-03T17:39:54.288267",
+ "cpu_percent": 28.12,
+ "ram_percent": 46.2,
+ "ram_used_gb": 29.40790557861328,
+ "gpu_memory_used": 1123.0,
+ "relative_time": 311.648175239563
+ },
+ {
+ "timestamp": "2025-01-03T17:39:55.397071",
+ "cpu_percent": 26.64,
+ "ram_percent": 46.2,
+ "ram_used_gb": 29.376319885253906,
+ "gpu_memory_used": 1123.0,
+ "relative_time": 312.72413992881775
+ },
+ {
+ "timestamp": "2025-01-03T17:39:56.486647",
+ "cpu_percent": 25.62,
+ "ram_percent": 46.2,
+ "ram_used_gb": 29.39731216430664,
+ "gpu_memory_used": 1120.0,
+ "relative_time": 313.8426833152771
+ },
+ {
+ "timestamp": "2025-01-03T17:39:57.593572",
+ "cpu_percent": 27.73,
+ "ram_percent": 45.9,
+ "ram_used_gb": 29.215042114257812,
+ "gpu_memory_used": 1120.0,
+ "relative_time": 314.9389867782593
+ },
+ {
+ "timestamp": "2025-01-03T17:39:58.700730",
+ "cpu_percent": 27.28,
+ "ram_percent": 45.9,
+ "ram_used_gb": 29.222488403320312,
+ "gpu_memory_used": 1121.0,
+ "relative_time": 316.0105288028717
+ },
+ {
+ "timestamp": "2025-01-03T17:39:59.774069",
+ "cpu_percent": 31.39,
+ "ram_percent": 45.9,
+ "ram_used_gb": 29.198009490966797,
+ "gpu_memory_used": 1111.0,
+ "relative_time": 317.12384700775146
+ },
+ {
+ "timestamp": "2025-01-03T17:40:00.885192",
+ "cpu_percent": 28.06,
+ "ram_percent": 45.9,
+ "ram_used_gb": 29.19915771484375,
+ "gpu_memory_used": 1111.0,
+ "relative_time": 318.2079384326935
+ },
+ {
+ "timestamp": "2025-01-03T17:40:01.963276",
+ "cpu_percent": 24.83,
+ "ram_percent": 45.9,
+ "ram_used_gb": 29.220375061035156,
+ "gpu_memory_used": 1111.0,
+ "relative_time": 319.31243777275085
+ },
+ {
+ "timestamp": "2025-01-03T17:40:03.069394",
+ "cpu_percent": 24.7,
+ "ram_percent": 45.9,
+ "ram_used_gb": 29.226959228515625,
+ "gpu_memory_used": 1115.0,
+ "relative_time": 320.39933586120605
+ },
+ {
+ "timestamp": "2025-01-03T17:40:04.157278",
+ "cpu_percent": 23.66,
+ "ram_percent": 46.0,
+ "ram_used_gb": 29.235092163085938,
+ "gpu_memory_used": 1116.0,
+ "relative_time": 321.47987031936646
+ },
+ {
+ "timestamp": "2025-01-03T17:40:05.234540",
+ "cpu_percent": 25.81,
+ "ram_percent": 45.9,
+ "ram_used_gb": 29.195159912109375,
+ "gpu_memory_used": 1116.0,
+ "relative_time": 322.55900835990906
+ },
+ {
+ "timestamp": "2025-01-03T17:40:06.309731",
+ "cpu_percent": 23.72,
+ "ram_percent": 45.9,
+ "ram_used_gb": 29.18838119506836,
+ "gpu_memory_used": 1114.0,
+ "relative_time": 323.6197941303253
+ },
+ {
+ "timestamp": "2025-01-03T17:40:07.369706",
+ "cpu_percent": 22.79,
+ "ram_percent": 46.0,
+ "ram_used_gb": 29.232295989990234,
+ "gpu_memory_used": 1115.0,
+ "relative_time": 324.67907524108887
+ },
+ {
+ "timestamp": "2025-01-03T17:40:08.427907",
+ "cpu_percent": 20.87,
+ "ram_percent": 45.9,
+ "ram_used_gb": 29.225311279296875,
+ "gpu_memory_used": 1114.0,
+ "relative_time": 325.77514839172363
+ },
+ {
+ "timestamp": "2025-01-03T17:40:09.531189",
+ "cpu_percent": 27.84,
+ "ram_percent": 45.9,
+ "ram_used_gb": 29.220264434814453,
+ "gpu_memory_used": 1111.0,
+ "relative_time": 326.8838756084442
+ },
+ {
+ "timestamp": "2025-01-03T17:40:10.635660",
+ "cpu_percent": 21.53,
+ "ram_percent": 45.9,
+ "ram_used_gb": 29.223426818847656,
+ "gpu_memory_used": 1111.0,
+ "relative_time": 327.9577159881592
+ },
+ {
+ "timestamp": "2025-01-03T17:40:11.712367",
+ "cpu_percent": 23.51,
+ "ram_percent": 45.9,
+ "ram_used_gb": 29.221378326416016,
+ "gpu_memory_used": 1111.0,
+ "relative_time": 329.0358979701996
+ },
+ {
+ "timestamp": "2025-01-03T17:40:12.785899",
+ "cpu_percent": 22.29,
+ "ram_percent": 46.0,
+ "ram_used_gb": 29.258285522460938,
+ "gpu_memory_used": 1111.0,
+ "relative_time": 330.09391021728516
+ },
+ {
+ "timestamp": "2025-01-03T17:40:13.847325",
+ "cpu_percent": 23.39,
+ "ram_percent": 46.0,
+ "ram_used_gb": 29.249286651611328,
+ "gpu_memory_used": 1111.0,
+ "relative_time": 331.1698377132416
+ },
+ {
+ "timestamp": "2025-01-03T17:40:14.924517",
+ "cpu_percent": 24.0,
+ "ram_percent": 45.9,
+ "ram_used_gb": 29.213272094726562,
+ "gpu_memory_used": 1109.0,
+ "relative_time": 332.2554090023041
+ },
+ {
+ "timestamp": "2025-01-03T17:40:16.017128",
+ "cpu_percent": 17.02,
+ "ram_percent": 46.2,
+ "ram_used_gb": 29.360702514648438,
+ "gpu_memory_used": 1106.0,
+ "relative_time": 333.3264467716217
+ }
+ ],
+ "test_duration": 335.7011880874634
+}
\ No newline at end of file
diff --git a/examples/assorted_checks/benchmarks/output_data/cpu_benchmark_stats_rtf.txt b/examples/assorted_checks/benchmarks/output_data/cpu_benchmark_stats_rtf.txt
new file mode 100644
index 0000000..45d2a87
--- /dev/null
+++ b/examples/assorted_checks/benchmarks/output_data/cpu_benchmark_stats_rtf.txt
@@ -0,0 +1,23 @@
+=== Benchmark Statistics (with correct RTF) ===
+
+Total tokens processed: 2250
+Total audio generated (s): 710.80
+Total test duration (s): 332.81
+Average processing rate (tokens/s): 6.77
+Average RTF: 0.47
+Average Real Time Speed: 2.14
+
+=== Per-chunk Stats ===
+
+Average chunk size (tokens): 450.00
+Min chunk size (tokens): 150
+Max chunk size (tokens): 750
+Average processing time (s): 66.51
+Average output length (s): 142.16
+
+=== Performance Ranges ===
+
+Processing rate range (tokens/s): 6.50 - 7.00
+RTF range: 0.45x - 0.50x
+Real Time Speed range: 2.00x - 2.22x
+
diff --git a/examples/assorted_checks/benchmarks/output_data/gpu_benchmark_results_rtf.json b/examples/assorted_checks/benchmarks/output_data/gpu_benchmark_results_rtf.json
new file mode 100644
index 0000000..ae59bfe
--- /dev/null
+++ b/examples/assorted_checks/benchmarks/output_data/gpu_benchmark_results_rtf.json
@@ -0,0 +1,607 @@
+{
+ "results": [
+ {
+ "tokens": 150,
+ "processing_time": 1.03,
+ "output_length": 45.9,
+ "rtf": 0.02,
+ "elapsed_time": 1.07
+ },
+ {
+ "tokens": 300,
+ "processing_time": 2.51,
+ "output_length": 96.425,
+ "rtf": 0.03,
+ "elapsed_time": 3.63
+ },
+ {
+ "tokens": 450,
+ "processing_time": 3.69,
+ "output_length": 143.1,
+ "rtf": 0.03,
+ "elapsed_time": 7.37
+ },
+ {
+ "tokens": 600,
+ "processing_time": 5.52,
+ "output_length": 188.675,
+ "rtf": 0.03,
+ "elapsed_time": 12.96
+ },
+ {
+ "tokens": 750,
+ "processing_time": 6.32,
+ "output_length": 236.7,
+ "rtf": 0.03,
+ "elapsed_time": 19.34
+ },
+ {
+ "tokens": 900,
+ "processing_time": 8.4,
+ "output_length": 283.425,
+ "rtf": 0.03,
+ "elapsed_time": 27.82
+ },
+ {
+ "tokens": 2000,
+ "processing_time": 15.46,
+ "output_length": 624.325,
+ "rtf": 0.02,
+ "elapsed_time": 43.4
+ },
+ {
+ "tokens": 3000,
+ "processing_time": 27.11,
+ "output_length": 931.15,
+ "rtf": 0.03,
+ "elapsed_time": 70.7
+ }
+ ],
+ "system_metrics": [
+ {
+ "timestamp": "2025-01-03T17:45:10.797646",
+ "cpu_percent": 10.05,
+ "ram_percent": 54.2,
+ "ram_used_gb": 34.474674224853516,
+ "gpu_memory_used": 3992.0,
+ "relative_time": 0.06637930870056152
+ },
+ {
+ "timestamp": "2025-01-03T17:45:11.871315",
+ "cpu_percent": 13.54,
+ "ram_percent": 54.2,
+ "ram_used_gb": 34.47991180419922,
+ "gpu_memory_used": 3990.0,
+ "relative_time": 1.1326591968536377
+ },
+ {
+ "timestamp": "2025-01-03T17:45:12.932597",
+ "cpu_percent": 12.76,
+ "ram_percent": 54.2,
+ "ram_used_gb": 34.501747131347656,
+ "gpu_memory_used": 3990.0,
+ "relative_time": 2.192795515060425
+ },
+ {
+ "timestamp": "2025-01-03T17:45:13.995052",
+ "cpu_percent": 15.48,
+ "ram_percent": 54.2,
+ "ram_used_gb": 34.48517990112305,
+ "gpu_memory_used": 3989.0,
+ "relative_time": 3.253366231918335
+ },
+ {
+ "timestamp": "2025-01-03T17:45:15.056310",
+ "cpu_percent": 11.96,
+ "ram_percent": 54.2,
+ "ram_used_gb": 34.457679748535156,
+ "gpu_memory_used": 3980.0,
+ "relative_time": 4.331450462341309
+ },
+ {
+ "timestamp": "2025-01-03T17:45:16.128795",
+ "cpu_percent": 14.28,
+ "ram_percent": 54.2,
+ "ram_used_gb": 34.465850830078125,
+ "gpu_memory_used": 3980.0,
+ "relative_time": 5.386842727661133
+ },
+ {
+ "timestamp": "2025-01-03T17:45:17.185921",
+ "cpu_percent": 13.14,
+ "ram_percent": 54.2,
+ "ram_used_gb": 34.4874153137207,
+ "gpu_memory_used": 3980.0,
+ "relative_time": 6.450911998748779
+ },
+ {
+ "timestamp": "2025-01-03T17:45:18.248252",
+ "cpu_percent": 15.54,
+ "ram_percent": 54.4,
+ "ram_used_gb": 34.581886291503906,
+ "gpu_memory_used": 3986.0,
+ "relative_time": 7.525278329849243
+ },
+ {
+ "timestamp": "2025-01-03T17:45:19.324382",
+ "cpu_percent": 14.89,
+ "ram_percent": 54.4,
+ "ram_used_gb": 34.5898551940918,
+ "gpu_memory_used": 3987.0,
+ "relative_time": 8.588879585266113
+ },
+ {
+ "timestamp": "2025-01-03T17:45:20.394701",
+ "cpu_percent": 12.13,
+ "ram_percent": 54.4,
+ "ram_used_gb": 34.582420349121094,
+ "gpu_memory_used": 3986.0,
+ "relative_time": 9.65286660194397
+ },
+ {
+ "timestamp": "2025-01-03T17:45:21.455704",
+ "cpu_percent": 11.02,
+ "ram_percent": 54.4,
+ "ram_used_gb": 34.617252349853516,
+ "gpu_memory_used": 3986.0,
+ "relative_time": 10.71657395362854
+ },
+ {
+ "timestamp": "2025-01-03T17:45:22.525946",
+ "cpu_percent": 14.01,
+ "ram_percent": 54.5,
+ "ram_used_gb": 34.651466369628906,
+ "gpu_memory_used": 3989.0,
+ "relative_time": 11.787351846694946
+ },
+ {
+ "timestamp": "2025-01-03T17:45:23.584761",
+ "cpu_percent": 13.09,
+ "ram_percent": 54.5,
+ "ram_used_gb": 34.680885314941406,
+ "gpu_memory_used": 3989.0,
+ "relative_time": 12.846002101898193
+ },
+ {
+ "timestamp": "2025-01-03T17:45:24.645316",
+ "cpu_percent": 17.72,
+ "ram_percent": 54.6,
+ "ram_used_gb": 34.741127014160156,
+ "gpu_memory_used": 3985.0,
+ "relative_time": 13.918755054473877
+ },
+ {
+ "timestamp": "2025-01-03T17:45:25.718731",
+ "cpu_percent": 14.66,
+ "ram_percent": 54.6,
+ "ram_used_gb": 34.71047592163086,
+ "gpu_memory_used": 3982.0,
+ "relative_time": 14.974157810211182
+ },
+ {
+ "timestamp": "2025-01-03T17:45:26.774860",
+ "cpu_percent": 11.52,
+ "ram_percent": 54.6,
+ "ram_used_gb": 34.728397369384766,
+ "gpu_memory_used": 3982.0,
+ "relative_time": 16.034392833709717
+ },
+ {
+ "timestamp": "2025-01-03T17:45:27.837623",
+ "cpu_percent": 11.04,
+ "ram_percent": 54.6,
+ "ram_used_gb": 34.75224685668945,
+ "gpu_memory_used": 3981.0,
+ "relative_time": 17.096498250961304
+ },
+ {
+ "timestamp": "2025-01-03T17:45:28.898447",
+ "cpu_percent": 12.17,
+ "ram_percent": 54.7,
+ "ram_used_gb": 34.796974182128906,
+ "gpu_memory_used": 3977.0,
+ "relative_time": 18.157397270202637
+ },
+ {
+ "timestamp": "2025-01-03T17:45:29.959510",
+ "cpu_percent": 10.72,
+ "ram_percent": 54.7,
+ "ram_used_gb": 34.819969177246094,
+ "gpu_memory_used": 3991.0,
+ "relative_time": 19.22814679145813
+ },
+ {
+ "timestamp": "2025-01-03T17:45:31.033262",
+ "cpu_percent": 17.95,
+ "ram_percent": 55.0,
+ "ram_used_gb": 34.9871711730957,
+ "gpu_memory_used": 3995.0,
+ "relative_time": 20.29205060005188
+ },
+ {
+ "timestamp": "2025-01-03T17:45:32.091757",
+ "cpu_percent": 19.11,
+ "ram_percent": 55.0,
+ "ram_used_gb": 35.0067138671875,
+ "gpu_memory_used": 3995.0,
+ "relative_time": 21.353832006454468
+ },
+ {
+ "timestamp": "2025-01-03T17:45:33.156831",
+ "cpu_percent": 32.93,
+ "ram_percent": 55.1,
+ "ram_used_gb": 35.05879211425781,
+ "gpu_memory_used": 3995.0,
+ "relative_time": 22.416496992111206
+ },
+ {
+ "timestamp": "2025-01-03T17:45:34.217136",
+ "cpu_percent": 12.59,
+ "ram_percent": 55.2,
+ "ram_used_gb": 35.10686111450195,
+ "gpu_memory_used": 3994.0,
+ "relative_time": 23.476072549819946
+ },
+ {
+ "timestamp": "2025-01-03T17:45:35.275577",
+ "cpu_percent": 30.79,
+ "ram_percent": 55.4,
+ "ram_used_gb": 35.22132110595703,
+ "gpu_memory_used": 3989.0,
+ "relative_time": 24.564188957214355
+ },
+ {
+ "timestamp": "2025-01-03T17:45:36.365095",
+ "cpu_percent": 13.36,
+ "ram_percent": 55.2,
+ "ram_used_gb": 35.08255386352539,
+ "gpu_memory_used": 4000.0,
+ "relative_time": 25.64090871810913
+ },
+ {
+ "timestamp": "2025-01-03T17:45:37.451539",
+ "cpu_percent": 14.94,
+ "ram_percent": 55.2,
+ "ram_used_gb": 35.118614196777344,
+ "gpu_memory_used": 4000.0,
+ "relative_time": 26.71500325202942
+ },
+ {
+ "timestamp": "2025-01-03T17:45:38.525364",
+ "cpu_percent": 12.76,
+ "ram_percent": 55.4,
+ "ram_used_gb": 35.221614837646484,
+ "gpu_memory_used": 3999.0,
+ "relative_time": 27.806236505508423
+ },
+ {
+ "timestamp": "2025-01-03T17:45:39.616790",
+ "cpu_percent": 16.11,
+ "ram_percent": 55.4,
+ "ram_used_gb": 35.2247200012207,
+ "gpu_memory_used": 3999.0,
+ "relative_time": 28.875747203826904
+ },
+ {
+ "timestamp": "2025-01-03T17:45:40.675234",
+ "cpu_percent": 14.96,
+ "ram_percent": 55.4,
+ "ram_used_gb": 35.21339416503906,
+ "gpu_memory_used": 3999.0,
+ "relative_time": 29.94703769683838
+ },
+ {
+ "timestamp": "2025-01-03T17:45:41.746176",
+ "cpu_percent": 10.99,
+ "ram_percent": 55.4,
+ "ram_used_gb": 35.260677337646484,
+ "gpu_memory_used": 3994.0,
+ "relative_time": 31.006144046783447
+ },
+ {
+ "timestamp": "2025-01-03T17:45:42.807809",
+ "cpu_percent": 13.15,
+ "ram_percent": 55.5,
+ "ram_used_gb": 35.299591064453125,
+ "gpu_memory_used": 3994.0,
+ "relative_time": 32.0741171836853
+ },
+ {
+ "timestamp": "2025-01-03T17:45:43.879826",
+ "cpu_percent": 12.74,
+ "ram_percent": 55.6,
+ "ram_used_gb": 35.34665298461914,
+ "gpu_memory_used": 3994.0,
+ "relative_time": 33.14525270462036
+ },
+ {
+ "timestamp": "2025-01-03T17:45:44.954413",
+ "cpu_percent": 12.11,
+ "ram_percent": 55.6,
+ "ram_used_gb": 35.34089660644531,
+ "gpu_memory_used": 3990.0,
+ "relative_time": 34.21659064292908
+ },
+ {
+ "timestamp": "2025-01-03T17:45:46.025229",
+ "cpu_percent": 13.02,
+ "ram_percent": 55.6,
+ "ram_used_gb": 35.37482833862305,
+ "gpu_memory_used": 3991.0,
+ "relative_time": 35.28446078300476
+ },
+ {
+ "timestamp": "2025-01-03T17:45:47.085470",
+ "cpu_percent": 13.53,
+ "ram_percent": 55.6,
+ "ram_used_gb": 35.392356872558594,
+ "gpu_memory_used": 3988.0,
+ "relative_time": 36.34242486953735
+ },
+ {
+ "timestamp": "2025-01-03T17:45:48.155295",
+ "cpu_percent": 15.0,
+ "ram_percent": 55.7,
+ "ram_used_gb": 35.449764251708984,
+ "gpu_memory_used": 3987.0,
+ "relative_time": 37.418004512786865
+ },
+ {
+ "timestamp": "2025-01-03T17:45:49.218400",
+ "cpu_percent": 13.84,
+ "ram_percent": 55.8,
+ "ram_used_gb": 35.468841552734375,
+ "gpu_memory_used": 3986.0,
+ "relative_time": 38.48085808753967
+ },
+ {
+ "timestamp": "2025-01-03T17:45:50.281360",
+ "cpu_percent": 13.25,
+ "ram_percent": 55.8,
+ "ram_used_gb": 35.491825103759766,
+ "gpu_memory_used": 3987.0,
+ "relative_time": 39.5399751663208
+ },
+ {
+ "timestamp": "2025-01-03T17:45:51.343810",
+ "cpu_percent": 10.34,
+ "ram_percent": 55.8,
+ "ram_used_gb": 35.51161193847656,
+ "gpu_memory_used": 3985.0,
+ "relative_time": 40.60230302810669
+ },
+ {
+ "timestamp": "2025-01-03T17:45:52.402527",
+ "cpu_percent": 12.56,
+ "ram_percent": 55.9,
+ "ram_used_gb": 35.57502365112305,
+ "gpu_memory_used": 3984.0,
+ "relative_time": 41.660725116729736
+ },
+ {
+ "timestamp": "2025-01-03T17:45:53.460932",
+ "cpu_percent": 12.04,
+ "ram_percent": 56.0,
+ "ram_used_gb": 35.61081314086914,
+ "gpu_memory_used": 3978.0,
+ "relative_time": 42.71787190437317
+ },
+ {
+ "timestamp": "2025-01-03T17:45:54.521959",
+ "cpu_percent": 10.13,
+ "ram_percent": 56.3,
+ "ram_used_gb": 35.822574615478516,
+ "gpu_memory_used": 3978.0,
+ "relative_time": 43.783926010131836
+ },
+ {
+ "timestamp": "2025-01-03T17:45:55.583212",
+ "cpu_percent": 28.17,
+ "ram_percent": 56.3,
+ "ram_used_gb": 35.78395462036133,
+ "gpu_memory_used": 3976.0,
+ "relative_time": 44.858543157577515
+ },
+ {
+ "timestamp": "2025-01-03T17:45:56.657026",
+ "cpu_percent": 16.61,
+ "ram_percent": 56.3,
+ "ram_used_gb": 35.7921028137207,
+ "gpu_memory_used": 3984.0,
+ "relative_time": 45.918612003326416
+ },
+ {
+ "timestamp": "2025-01-03T17:45:57.716203",
+ "cpu_percent": 15.03,
+ "ram_percent": 56.3,
+ "ram_used_gb": 35.79140853881836,
+ "gpu_memory_used": 3984.0,
+ "relative_time": 46.97588872909546
+ },
+ {
+ "timestamp": "2025-01-03T17:45:58.775392",
+ "cpu_percent": 14.81,
+ "ram_percent": 56.3,
+ "ram_used_gb": 35.80635452270508,
+ "gpu_memory_used": 3984.0,
+ "relative_time": 48.03421711921692
+ },
+ {
+ "timestamp": "2025-01-03T17:45:59.834277",
+ "cpu_percent": 15.06,
+ "ram_percent": 56.3,
+ "ram_used_gb": 35.81984329223633,
+ "gpu_memory_used": 3984.0,
+ "relative_time": 49.0965371131897
+ },
+ {
+ "timestamp": "2025-01-03T17:46:00.896761",
+ "cpu_percent": 19.76,
+ "ram_percent": 56.3,
+ "ram_used_gb": 35.7983512878418,
+ "gpu_memory_used": 3989.0,
+ "relative_time": 50.177143812179565
+ },
+ {
+ "timestamp": "2025-01-03T17:46:01.981868",
+ "cpu_percent": 17.32,
+ "ram_percent": 56.3,
+ "ram_used_gb": 35.81730270385742,
+ "gpu_memory_used": 3990.0,
+ "relative_time": 51.242098331451416
+ },
+ {
+ "timestamp": "2025-01-03T17:46:03.046930",
+ "cpu_percent": 19.8,
+ "ram_percent": 56.5,
+ "ram_used_gb": 35.92729949951172,
+ "gpu_memory_used": 3990.0,
+ "relative_time": 52.3223512172699
+ },
+ {
+ "timestamp": "2025-01-03T17:46:04.122311",
+ "cpu_percent": 20.91,
+ "ram_percent": 56.5,
+ "ram_used_gb": 35.949684143066406,
+ "gpu_memory_used": 3991.0,
+ "relative_time": 53.3851900100708
+ },
+ {
+ "timestamp": "2025-01-03T17:46:05.182768",
+ "cpu_percent": 17.39,
+ "ram_percent": 56.5,
+ "ram_used_gb": 35.94847869873047,
+ "gpu_memory_used": 3991.0,
+ "relative_time": 54.45881199836731
+ },
+ {
+ "timestamp": "2025-01-03T17:46:06.257550",
+ "cpu_percent": 16.64,
+ "ram_percent": 56.5,
+ "ram_used_gb": 35.9198112487793,
+ "gpu_memory_used": 3989.0,
+ "relative_time": 55.51820731163025
+ },
+ {
+ "timestamp": "2025-01-03T17:46:07.317263",
+ "cpu_percent": 15.99,
+ "ram_percent": 56.3,
+ "ram_used_gb": 35.82686233520508,
+ "gpu_memory_used": 3989.0,
+ "relative_time": 56.59837555885315
+ },
+ {
+ "timestamp": "2025-01-03T17:46:08.409244",
+ "cpu_percent": 15.11,
+ "ram_percent": 56.4,
+ "ram_used_gb": 35.852657318115234,
+ "gpu_memory_used": 3988.0,
+ "relative_time": 57.669328927993774
+ },
+ {
+ "timestamp": "2025-01-03T17:46:09.473703",
+ "cpu_percent": 18.54,
+ "ram_percent": 56.4,
+ "ram_used_gb": 35.889339447021484,
+ "gpu_memory_used": 3979.0,
+ "relative_time": 58.76238036155701
+ },
+ {
+ "timestamp": "2025-01-03T17:46:10.562180",
+ "cpu_percent": 15.7,
+ "ram_percent": 56.4,
+ "ram_used_gb": 35.90079879760742,
+ "gpu_memory_used": 3975.0,
+ "relative_time": 59.82209253311157
+ },
+ {
+ "timestamp": "2025-01-03T17:46:11.634373",
+ "cpu_percent": 16.25,
+ "ram_percent": 56.5,
+ "ram_used_gb": 35.94197082519531,
+ "gpu_memory_used": 3976.0,
+ "relative_time": 60.91385841369629
+ },
+ {
+ "timestamp": "2025-01-03T17:46:12.723458",
+ "cpu_percent": 16.98,
+ "ram_percent": 56.6,
+ "ram_used_gb": 35.99095153808594,
+ "gpu_memory_used": 3976.0,
+ "relative_time": 61.981855154037476
+ },
+ {
+ "timestamp": "2025-01-03T17:46:13.781955",
+ "cpu_percent": 15.59,
+ "ram_percent": 56.6,
+ "ram_used_gb": 36.00953674316406,
+ "gpu_memory_used": 3976.0,
+ "relative_time": 63.04051613807678
+ },
+ {
+ "timestamp": "2025-01-03T17:46:14.852706",
+ "cpu_percent": 13.16,
+ "ram_percent": 56.7,
+ "ram_used_gb": 36.050899505615234,
+ "gpu_memory_used": 3976.0,
+ "relative_time": 64.11573505401611
+ },
+ {
+ "timestamp": "2025-01-03T17:46:15.927719",
+ "cpu_percent": 12.34,
+ "ram_percent": 56.7,
+ "ram_used_gb": 36.07988739013672,
+ "gpu_memory_used": 3976.0,
+ "relative_time": 65.18661308288574
+ },
+ {
+ "timestamp": "2025-01-03T17:46:16.999292",
+ "cpu_percent": 12.34,
+ "ram_percent": 56.8,
+ "ram_used_gb": 36.099937438964844,
+ "gpu_memory_used": 3976.0,
+ "relative_time": 66.25790786743164
+ },
+ {
+ "timestamp": "2025-01-03T17:46:18.058608",
+ "cpu_percent": 11.74,
+ "ram_percent": 56.8,
+ "ram_used_gb": 36.14547348022461,
+ "gpu_memory_used": 3975.0,
+ "relative_time": 67.31676268577576
+ },
+ {
+ "timestamp": "2025-01-03T17:46:19.122597",
+ "cpu_percent": 12.63,
+ "ram_percent": 56.9,
+ "ram_used_gb": 36.177284240722656,
+ "gpu_memory_used": 3974.0,
+ "relative_time": 68.3815085887909
+ },
+ {
+ "timestamp": "2025-01-03T17:46:20.182864",
+ "cpu_percent": 9.65,
+ "ram_percent": 56.9,
+ "ram_used_gb": 36.216495513916016,
+ "gpu_memory_used": 3973.0,
+ "relative_time": 69.44507431983948
+ },
+ {
+ "timestamp": "2025-01-03T17:46:21.244696",
+ "cpu_percent": 10.38,
+ "ram_percent": 57.4,
+ "ram_used_gb": 36.51596450805664,
+ "gpu_memory_used": 3973.0,
+ "relative_time": 70.51762080192566
+ },
+ {
+ "timestamp": "2025-01-03T17:46:22.448455",
+ "cpu_percent": 9.24,
+ "ram_percent": 57.5,
+ "ram_used_gb": 36.56745529174805,
+ "gpu_memory_used": 3974.0,
+ "relative_time": 71.72753357887268
+ }
+ ],
+ "test_duration": 74.18872809410095
+}
\ No newline at end of file
diff --git a/examples/assorted_checks/benchmarks/output_data/gpu_benchmark_stats_rtf.txt b/examples/assorted_checks/benchmarks/output_data/gpu_benchmark_stats_rtf.txt
new file mode 100644
index 0000000..b045833
--- /dev/null
+++ b/examples/assorted_checks/benchmarks/output_data/gpu_benchmark_stats_rtf.txt
@@ -0,0 +1,23 @@
+=== Benchmark Statistics (with correct RTF) ===
+
+Total tokens processed: 8150
+Total audio generated (s): 2549.70
+Total test duration (s): 70.70
+Average processing rate (tokens/s): 120.20
+Average RTF: 0.03
+Average Real Time Speed: 36.36
+
+=== Per-chunk Stats ===
+
+Average chunk size (tokens): 1018.75
+Min chunk size (tokens): 150
+Max chunk size (tokens): 3000
+Average processing time (s): 8.75
+Average output length (s): 318.71
+
+=== Performance Ranges ===
+
+Processing rate range (tokens/s): 107.14 - 145.63
+RTF range: 0.02x - 0.03x
+Real Time Speed range: 33.33x - 50.00x
+
diff --git a/examples/assorted_checks/benchmarks/output_plots/cpu_processing_time_rtf.png b/examples/assorted_checks/benchmarks/output_plots/cpu_processing_time_rtf.png
new file mode 100644
index 0000000000000000000000000000000000000000..ee7f8014dd0f6f3699406a0806bd399257446cfc
GIT binary patch
literal 239297
zcmeEuXH-*b*Dc3-EXc7dh#VUz2#7QR0UIbKfI#R)EOZDxQey=b0R^S^5+H#9(rZ)%
zq)Q1%35fKB4xvNt+8jOK`;9y9-}~!z42KXQ+56e+S!=F2=XzeMsoZ4U$-a|?g@sk&
z)(v$QmR%z(EPn?6xeZ=1?onNZeUjBt?1H8&So)&(NX8zx5bnOT)%R~t_VK)
zfB(5cy3{H4zh6SW+qrq)99+-;{*$@szc2o;75VRi{IBI;VflY7h?)EL-#@t2)Qe*h
z90oVBQm7@a#oKltG3|fB()A}>uT_EVt*`2SN2W=v2@rbED?UCh74=%(?oD5qWIoHY
z?l38TvGFcnIU-}(e!t)Ee;*yRdm{7u8}9lTSros%SQhpOe#^GDZ0w&bEFZTizMImX
z9rDx6x8+A?80kKKoP;hZAeWR}9;DIYb8?(2s;c--oVdMX$IDl*OGv(l1?iuk*A_jT
z4s3{&$T)S=pYQzGJ9TySF=@_?&)IqT*KW8{HoMSECKwc6A-BhZm-5m^r-mc-t
zKDcV+=_j==
zN^iPqgqV`J?TxMrC%f!jT-x@lUQRr0bcEZuH(qPJi$akTwbIh452v(d7^^BYGw9+u
z_Wg%gxz@wcRhy7^ioI~^D%Zv(Tv1n3LvP(GopIRML;b`FB}Ru?(1D|_)E<+KWa98G
zUhbX-_Tis9bhZB8KgXwemUvF1lJ`=9RQzd*eoK$Z#tSU_Bgg*<5|%4hr$qSM%Of`#RgazRm%`Qp^gaZc}wYuAF$oxUIT
z>z)5@eEaK;?mUW#$%uBJ5b|8?d+6@7vF-)CoF=+ek`&)xpm6i^qaws?D9en9TpaU6
zl*E2gN(#p-U*8ajhgSz@Z)e?=;Wg@3g(s;UC3%%#D0PcMKnG?EkN&fAI-ALv8;|J9
ze;AC`n?AQ_Py_4naY1qNR
z#QFXza>nD$D@RDUdFxA9XJq+W(z~SbG_K1Jbp%VM^8AlUM2|9BTQv;JRqv;&LUiw#oD3`ULaaT(z4wlEn{!o4+>n*}3?Hdj~i<6Z)%Ec~9R@?aZ@@
zd%0g$Yx<`betCLkMsv8qhEXd>?mDY}Kc*NPfXZ4*x9KW7?i}v
z+m4KN*l|j^T)Ta{$;CHIGj$)Yaao#Th(O%kidQw|<%R+R4T=g1Xb+E)pZ)ut)|OeL
zWO29q`VJpF>MGrM*U`mA>-o;dy!v8l6}~H}aG?j!{p980xp{`0`<#r-ReSsCB8Nd5
zvkLo?RfJ_H=i$R<=v4LF$n_93ceuK_eT5(n4AMAo_;3c2Tab&9^-?@{?%e#I1=4VT
ze>&`S-V2$pJv@DDy1JAE1OyaSRkb}lp1{={*uOtv*I|Lt)g|8|r%^(4bMqLd*dq^u
zLeR1|JsKYN`kK#ZC2Oshz5&_my6<5op~4sUJ}4*%S=YGh+f!`1h7K{d_aEcrgo|>0
z&(9ItoTOCK(sCVE2yXiJwQElVjLY!CW}lr`7k9udbx0W?q5SBN(<2)Z@~F6>(obLH
z{?}~19P^^fm%CB9Hq;{BXzmel%L8d7!^zurb|~bAWn_A0;F;*@g`Yf?Vbz>C8ZOD;
z@m*P9@DqahBhFqb$b0gH!25UXJH_DhC$Ly?SVh9Xz=1QTPt%qebV=NLpi1PWbkCJ*
zxi($4utLqRghi`5I&@h%+R&wLx#(gS*7tmeQ{eImI$80&>R-_C-0}nSPn}9|TAtI5
zl62OJi@TZQxpF*8#5~?Gz-?Bv
zmw?_+Nz*lydsPr_i5qP`_}u$A+I-|TCxm>l%XoW7cAwTue%YDXS@dwjcx#cf#@4N9
zjj)SJ1A~Kc-@iYUyK#dyGf3<#^9*Q9?@Hv9n&%Z2r3?u{BGE7`%F*txWN0QS$}KG)
z3i9>MzI*ql)6xtYR^(cQ_~f@qaz^{o>|x>Y1gJY2F^#%W`+PrtqEZc`r1CYY{L^8b
z-qgH1cScopf!Q1Dh}ypASQ{RtpX;$?DR;9fl-32$-mut3t=NTMGg+mcsrt%MTT?TK
zWa!%pZ<}k;qLpq~tZ*~HyTjZYYxzBsRODolA!ZZKv3~w_KtTHhb$gB_MG{#(2uQ`_
zH+G7P=QE90x>77!w()8UeA8SR7`PvG*;Rf!+wmo4F8zhe*!P6*-!E~GD43L-
z%qJ-+&!ZaM)Kj{Bu8J!n2d)%e?UK6x{e_QDacbkca~h^Pk&(D!$TjH(g<5>tB=pkE
zV_Hjc1bVrGxn(OF-A>|cE%W4q`gnh&7<2b-;GScb5=E`VEIU4+C7o&3a5qToXFdbS
ze1U2CPllX%PiC%DG*4dp1yV-sjc{wOrBDg*ozLLG4>xT7kUm?(*JwWQwa%MMVYQ+3Q4W
z)#cZni7>6#i=7=7)NS=Sx@XTG%XZRpamzWYfg$XFfeeSv_nb|vHvNOu9L;Bz1B^#q$skBS{4*xC(oLh(WATeLLP#U
zGp`Ni3>U|w$|=NoE<3d-2}&oiixPB1#T0h3kz>iso>Wvs9#o{%J{MCNUolJ%>5t9c
zVK1r|jIheMLxh9%~Rh^yB$@nN|$MlJNJ+tqopg%Y}=l(dj;#TYb<+8d{B3*$LT_R
z{CXm7Pqy}QOt^O!R4@PZJ(qv{FeHq=axO}y$FMckF`K-y^<{U!U8ef&
zkUbk^R#KoSpKx_`Wq&m}Nyb{%g*N75j8o#4O(i5GMjvmVXz34-eb>Jn69j?EZ&;+h
zpWA~N#M7vA?ixMx;-E0q9ix5S8L~=i>?dhOl?JLL6_0YPkCLL;t!Wz_O?RCQzOSFG
zBg7@mT;0!pF%d^93%bvJ=8W9R0#{ovZt~lQ2Sn@naN*i9tU-16aCS%I>%FIXZr}R)
zVfTaj2=V3@dsT_fbW`t@jw0uf%GXN&Xib!94P6@e1NTDg|*bG_E?mNSZ-jwV9Z0!%k+J>B`C-RcLv
zi<}WkDaLBEa){{w%$!)Uf-J3-(Gxq}pKQrozg|e&g~a_GPyhKn$QOW_j7%F0?i8Ua
zUJY9RRn1&nd}u{$&03x)7IcV;#7aSplb-%^9rkEaWdP2&K3p{Cl7#W{FlKtDR@I_5
z*qAYzJedM%RqM4_!pa1l#L5wD-j-Hfw9rE;RC&gSCOq55Hj%8G%}pQAH_j`F*>`4D
z^g)f8UfHS{W31y`d-YPYY;pi@W&Bco#6o&ah&Xm9M$4O(Bdr(5)>OLSlI!MBHrF^B
zZPS(ajh;b{t@Nuu6aLWM-C}<1-11O_s!d>|pu*$F4*UePEN0b`Ssu1L;-HMT3bHDF0*=@h+JOxxs)#sKH|LCH)M7m6NnT5X
zW<*cSjrG298xOz0#Ana8aL8@4O3}{9^ICJx<5pci504^nU}PkPWGH1`@k)1j64x7C
zTwGYZc>{@B`~L1h+M$BSe=Zjcwe0Wfvkkg&UIFSKWp$~gm8gA~xL}wdk51PSQko<)
zi&odx(mM0Av$MT-k7hX24-qlO^ymo#z$eJ1bEB4T0CLK)>=l=gl+B-
zw5$%~LKix0_3hM7(^1)Vc>T?&cU(k7Ho!~$*8GHogbhT#JSyd2mubDWD9*#fQ_5VY
zhfBMEsF%>{cO%qV+{UhMSAhNzGZ(UKfVkXra)>`etVKc>nTm>TdoaGSNHJn
z2yT}GuofoFlX_zZ)1YE2J@s}1q|ESS*f_TKT>k$4Zu7eI(){{|%(Zq=lg~c9daX0g
zh4AE;$MXv$~o1Ri7B*VK5-cyG}zPHad%94_l%uJGCaes;d>)WG}1nY1OGG
zDGsLh$ZqWKi~qvA&LkymRKAcSA?dcQt!=m{og?BjdexE8(?~Gv%G<$F@oH&=p1~vv
z-80HnysH6-CdXmW0M_#B&L-XVHz(FaJ(okYddKXFk=Co6ZNC$9O`6}bHy^`R3Z0df
ze~lZr%Bqety)6fOWa|?$Ipz?5I0+DieiC^bE6sd$kJ%r3oT|brB?5yUhY2HY^*9RDx&RK
zP+V+P`TC{X&zC+9oT@6}qE;yYhI1a?dm;)z!K$Yu$(xR)wKlXy?Xhhybd=JF;2Ul*
zj2z4b{B1ci&^RzLVQPOllO*r=B-gyji5PVpd==vR;yl3gs#>izm1
z-0j=q$`ssm1&_^){=r~J2Z!dx=}CIMxbcS?YTE~m1o?yfMym;y6oPGw{YFNu{N18X
zSO!N&+uHP3mL~uBp{F&OkLf)QiPhfKt$4+x+#A+o!I4rGdv#%SWwL6R_e7)CHr685
z7_Vi2^uEh}cF?9j+rgFMIP8M&T!OSbH*#C-74O1CafTGnFvr07czQ=RkG^yVUMXb$
zP>k4EUmqz37oB+Vfuf!$eh;VKXn#O&o@LD0OSasy!$W)aGPR)pE?b?suAttP$3Hh+
zh0#swjWF_L=%Ukf8shT+WG}C;Ohd2s9(}JyWw7R;2zEmAAU`e2?4IcZSX4zNC1K5W
zDitN|c0~3Mri`hKtn`XX{dhm
zM=4_{0>D+CS)&TO)VyKwm#CnjdXCZT#-8d~`>4oB5sPLXf?jT5pV;PY-9)HLHb>AC
zySdg=O0S>2P=qB{abw%xBr;cr55j(9n*9ur^-`h`wX?3?Xui%y&Dh3zX>Gx_mCP)i
zVeng0Wc2f_8m`5Y#
z3`+`PV2^$0#pXQmG%P-pN^h@^iZ{-;MIk9yDWu^WU?w!Q6kS_E@o)yJF=LMm?wVGIo-K6!K@cNi|!*0bi%zf_NtWI<>{R!GT2c+Za^jqJ4B!Bu+|ImCbW$(8rr8MlGV=>wj_n@DZI#
z(;dSXl9j^(5rCfxJb*|$JQAA&O|qBb*xTloh9`A%PBgx-rA`Qyrp9V#G_YSTh(Y}n
zinr-Hgly|Ev2~Q=(0L()d^8b%NZJP|A+gBV(9qDtt-FHFTuZIm_u#8;etJ+10d`W(
zfWPx^Gfg1=MDE|hMKA(N-bP20()F==t$FtS%k08KqBh+Hnk)0^Ew7L6X8?dtfvN&r
zNn_31pDxgKQog=zV@6>#f!Qiu8uE4F*r0>{?ieO>Z`3}Pfz8)__FSM2wWelk1(Z{%
zRGO#%(b@2dBveueOFI4|VtMZ3k-dq^zv*`*RwKe(YO^36lUlZQZW>oL*ZR?$s->
zE6lT{Ch=*XJgqQTvo+FH#(0IFycP2w8SKXeUg3v!T@A-6+CPPDY()IHd%o-v?o<4;x_KjYi^pBl|
za3nw96=F5siIQ{~dj?Dj;n}krg@p&-1_b0IPo8A($g(ws5F1NCm^UEdgu7)w9D)wa
zk0XDiF3b)H7s9pY4#}@<
zJGs2rpO3O~AlB0b-+e4TAW*7U}08Cd%
zx{1_{ZP2JJT33GYas_;l^7K_NPsfhB0N;^)RiurQayt`N$+U<{qj1bk6vxo&!ltB1
zzb8Lt&fD{q)6Hz>;GPq})W5fvOlyjv1oY3d*Vh
zo?~5DfgS{@_KM2NH6K)fEcA{^hRjOKsxqn(aiv%KnNth)rcX`n+Q}+S7_8OqErRCh
z46shupk}E-5mct8$2pDe^QMg~oBDZUU)3gl{D_5b6ake+5Z9Ac))eklc_l0^zK-z(
zrJ$)U%(xafDm!CPI)oy>NLhcsju&sX?kO>Z?zf0!P#7F{`Q%A;+*&RQ`7672ApWQ@
zH^Ir`o~OIJ`)E>#w*^oU_`6y7#KimjdUM3SmerN{aTDIFWXR1(+t9ZRX|`eBEmgg;
zjRZGE#4)RcsjWvLaDf*?QWU5gU#B?ZpoSm|XVWEy@N712Ztf=UKnZ!dpWHrBE!#64
zRS7*gfPml$GC4U_roJTK(Q0;R+-aPi(fY{iyxwd3)yMN7X+Q<}gr6{d3)hDPntrlsj4eSwAD^mo?Ro%h
zH2LP}mzOO^Yce2Z!DF_p@-GdS@g$ixtm*+{F4me1i2z}Kpz-}vBMYgQENM^w?S4i9
zA0a5pSLJ(8js=nRWKl~nztmh828R$W2zN^|4hRIIswS8}0h$ybW`k+1sVKZnjO)Q8NWpnstSGT+!gRqI+)|aYK73bH2M&_H9
zF!Xj~=GQD+AX(Jf5?BOf^*j-h8Ha?MHwvGtjRAKDtv({pl&LiJ$
z`yIWve&7Cp%y6TzW`@!AiCecqAe}k+amk|n*eM!-kj~SXYYSBtwAPr5rGV=G@H@5?
zjaO}SLzNiX+3{azL`Fq1zMf(`GNHXZ>|XWbQzI`_y)HL|75(_Q9G;IOYs>)|^KYKZ
z@ZsZbMf2T-zN|bW?U3N0302eiJ{rlVgxq`I{UJyhotY*|dCsqU!pwhuu{cDvAFX+B
zg;e810tN!w+J}LjGg@2eCBA!izr~Yk6Brc47)cPcTwAUWGYatcZ`vhTaz&g`l0NEMiE9U+3q^wdBeiZ(PzCvl<daf?;ckQK2H)btUGfe!>IH`*T5=HKYs$DaUN={t*or1-V9LbXtMxnl%SbvPUu}+
z+`@WnF)?mnwF_uK%g@g+l(MxosD5U7&YZ`NA!r=G%q8t+?O(klo|Kei@#O_};!=f#
z6x-!*;$8HHhRVjlOU|^{eMP!2U%ze+kZnLWMnBDU7%VzQmy+U83e7qFNk|yz?6#4n
zdlWnhL!2`c6ad-@A>z^!>UyEN90gWLqGi;wf;Xu;Pv
zm$)jS7y+M00lU?Sz;A)rr`x*FnH%vzxq&>o(Ays2#)d$>3g6+?(s;o7c$G%7*P3^<
z>rn2opjph$T~ao*8zw+%Anas0!{!8{NlZwa7kKrSRjOVAhWfZSy&)oBq^j{;LiKu^
zPH|vhAj0`eFOT@TG1mkgsKzD>J?=4PVUaOeJvvEvJo`lZh`W@y%jCM5#+$vsMKre?
z<1hp!KrDngICR5%^?q~W8zM(S8xy-QC@DEvm*5UYL@lp__Ui
z2J)@xttq$B5P(A6X0Xqr-7FLP{Qd$&$-0lw7
zbDzoj8Qe{O>o#vzB@wUH6}$?%gjyFi`~J=E&bXt5m^W`??^UVX
zKTB^*2cl~XikRQg*}~LI$j$(%Fx;}GbVa(uDF5`;snUf?%Y}(GeAil6q2n+-(@tx;
zh=qFZRBB-$f=`F)_3!ff*M?+R_I*C+O&13~zN}0xyW}`%*LKKm5;bUlk&}~ijDEyV
z@KQl=-?Lk%UVjNT2Fm~)-W$3#7vbBW>^P*gAkQ5e_-0)q_42vu#nDDC+~pI-(XFpa
zvTdoxokh;t{*3b|O_A`A(JdCVeBqhnv^M^g+$onJ9#wIMtAIkbDL*2y?`{_dG2_rs
zgQ0c7gy({@xMqf4ZVjNvuWHns9Fgl0Poa=pY>km_qZI3!kL-mOu2GHp51O$g;7(1B
zcOMZRa@2^}1|6YMh@i2ks66D!dhh2R+!7LUK$WN;7BHx3v0qzTn*)}1WH^QlXcZgn
zZ^K6-2|&qv0?dgyff_0lW7U@ST%xUZ6gxQ%3q{Dx)WCP9>E$+PMyzJGA?nCS>Wg0$
zkY)49lP4MV;EynmhK7cYN<<>+D=S;3okFN5
zJ5Q?uhYB4JXj;B~dCAqaI@kH+&S*0lFT=HmOU9Of!(cLw(M4=51>TMVjsQukS~G&E
zeQJ)LVWpgVwePOe(-sH_l+F5^L?i_P3GPihE@f(aD*WviIbpg2T&Em%#|u!rpp5Ts
z$503sd2~l3osvcIa_rkw&D4^(Ef0m0hcTTwpa4ZRW*MnqoeM^vACCU^lx_Zf!YSqs
z0YT-c<)G9_!<+u0`dl_)?@BD9pB1@Yiam;KfHD^YFvy$vFo?PHj#LdTF$0HMZL+~mW^u>wW^CU@M?LyxiU~519DE(U@r7tc)49w
z=c467d1=;M13^53fH{4Ytd&l~FXxCft%n$&Uop7$DFjd&>;?`JjzP$JHX2c9{p`cM
z``7mTP>`NPx_hB%RE`Ps3<_g>0fdS0#pcJXUP6fl#@7VNiV{4%3#0eO5Up|2MPn;q-?IGSeOf&vq1LS%~PZ+{%j=RI75hw{PE$a31(c
zQ()7!xvVn>D0J70FH8?G8f$rJ!$o~aukJ^v`KC{PpFFrY(=CyFO=5nI8y
z4T{eFjA0qqtR9C7C#(1O0Rm5dYQ*U7xBTA7Vk
zmG!Lpwk-p;B5b1OcDH!EbbvE`{lmk{NCZ4`{l&wx-0z0~_=aPy
zB%!Jn=^5jPVggq%4{F&}gF_NQeuFh^`#Fu|Hr}hU#3ECb1Hnk*3R9`9VHK{9IevTs
zfLg%#lckVHkH(nu!lrdC0RP_*pK*fK0?Z01#L5+LoD=gt1~f%rAIxFdIt!*TfCR#6
z;?mNFEy<0*c$M?}qXI#o8JL_*=0>r1^D3#Rv_(sA&+%GAEEBxXpGP%Njx2pj)i6V0
zBril5jE#)E8Fw}_Gh15W=u)$9d3&mI;-_7)Rr^UN;4groTs3U{Dnha}nJ;wsP`(
z-9u(|^^gj;i&NdYcZF&D2)I=8(lL545Waaf5oWIW
zTZMOo?mA4m;IWedu*qp=zyeLUZ=2mrmXMMnswd3=6fBu2OlG9d0EN6v*!09+c)ZDYNVp@@9N!m?FIvy~OBb4B$*vu7-ZRp_fXmw*_96h7Ka
zF*u`TP+(WBxu%}<%1qDb!~LN_-K@E1Zc3nNrrz4yewm~3b>Nw7RU;W^S{gV6%!9IO
zL4b$Oi80&QYrZm%b{kFN83f&p5Rj^Y`=AgX3i~%)#3EMP*i(f=T%hG=g)9Li11&Bd
z!6+&0C#MK--ViH)jE_H%RA?Hjjy~FDlwZKVlT;g0@5vqN8D(7bZl&HXcxIN*G(XER
z&{(XzE+YSn9d%`LGR&e7I$<{k?QTI#q2pl=Qbc$-K~*NqyrAr8Sy=@l_dyc8*;$ND
zmsy+Re9vzs&@^ichBem~8+G-xVi`SNA)#~H^u&(hqk|y}5^0m*g$chj?G8l3&H8ZX
zc4CHq6KLoTo>jy&O`?|WcvpUoLT}y_bo-;#JU-!;*C&nZq&WD
z-@dVr#zv@E@ni>meKFuyQ8jQ=AE>$3w8aob8{Ccs_>Z-@;6pqto=GGZCW>su%xK|v!GKOxsSU7=Xw#GG2M2>U}OOab(a{K;~FqPbNvCv
zz&(`($HWxw|1s+u8hWI~CN1jG$dIX5f}&H-V55z#Y>e84FhlLSp3?HM>W((eZ|y-P
zf9?BFH)W;`c9el3m*L(VpzIOeS&ge0%XPtX(+VtzC^$GYZ0a1apf{XY+mLS8=E}u5
ziLu%Dqq63=i75@PEljPh9Nj{fz)qByHX0@Nl^cWZ6h+Cg5Jv(Hn(`!SF`Lf#N?h3F
z>bwYeMBJ7Jla+IqW*fgHz=moahhV-y4B
zvDvVc{jJ$O<6_gOECWMBWs2&pTXmF&)6+ZfG&ARYr9@)km)_7Ljyn_z`^LuJncWLR
zC*FE{jE+17Al;fVIaC*R*X{Z(
zuR99r_O=uLCep!GZ-RqOD=w+24Y(NA6yhA|!J@F+yKKfHkLaq-d|p$V{q$!^{RKVj(qXhUQP?i!Z}-YyY{MXTu<2AWk-9V
z1*uPB$Hq868uCD41%*AeDM3~v!{{lLbbX|63YWsZBpK&>v^au@kai@==JM(k%i6H)
zDdpUbGSB?u?qd(2ROpMCdKuBF-7{5s8?6m?|G$#d3CJ;sJHZxKD_o=X(SL!g@y^0kT^(}4^h{0h
z7W%U-j#-0eZr;4d%sIS&KhdOIeq_4JxDA+M8+Hw#zQGGq0?sdsg8GK274SJ3qe7=_
zNc*0@PSN1G1yqcAi;(rF!26$bMCjaq>&+NV&CTUJybMgfZ&&<}8o`&@d3Wgb+Y9?f
zLUDnn!*xQ@A~;ZTg)h5cvD*2zoV#=VfNlspLk@d2-dhgvrIHuNL;tGevfRRG7v%LSY^h&>C
zq@?OPi4#E!)04f#N9!er$SjZWYiz_w-1Qi3kCH)bmx6K~*{byE^>tl360ARL-{SJ^
z|1oGCIq;8h)oSv>dDHYn>FneeL)@nf0gW`QnQjXccrZ(+f<;)1s|v8BVCmdD$ma~2l@ntQMN0aLp=U{csSTX`?2rOYSmT!lzO(fbTB)G^NsOAJmX7nTUtj7
z=opB^e~g}I)qV|9B^aa)0|7<2m-nI&Bz{|+Mh2Qr8E{M>Vz63USkMd+7)OJS=esY&
zai)?((2_x8eeSlumdZUz_R(M1b22#cz{I%C+N5))0SG8~po%%ftlL06MXgnW9rGQy
zEb1pu-+c+pJ~#!0@G`yu$5~shv$1MBIZcQ)M64NZSbxrTU-~8e#l9(ka7|rKQasp5
zQE+=u^X?*=*_>3@FU)}jj}eN}=m!#qoV@&C$3s^WkQvyemxaO7rGW}}q6jyZ5D@Mc
z*4Am@Rw#O30ch8a@dL|XEaBJ7xmND&riOU@?X^`8)-%JKwpK%rG|qYL@teTF_^rFVgf!bLD&$Jtv4xBR
zP|&@r!ld2bj(-2(Fr-`ncw9@`?Q&vgZkOfPH)dc)+Mj#B8q=9C*vgCb0ei(O#3u6G
z{H^f;u<|0EQ&)(9&8n?jAc9$Zt7={oiN2$Vr8u2^b6WpYU;pwlol7}?7DU~r<_^hF
z>fr@dWt+bxJeMQEpl1P=&LOu|-^2ezCkwND$MQzmXLRDfHK`d04R
zNw6e%JzT6%u5FCmAx=>REefomVe}x3pQO83x80_i>3$vxiRzih`W4z}atdg7Kx%t6
zssT2zhi1nJ8Wwdh#Zuy~^Jyrcp>Ky&{T5=(+};|&+L@gA*Ki$>^h)l4`ezseKH9R}w{rNTnb350@>ilDAn2^Y`>5B6GGus9D`L^^GC3@T1
z`?wNNgu!d00+*}_bS#LVHvlw;(Tc&;p-1-07A=1s1^!6NP<Qi|+Rnt>}fF>4=ihboEug&}Pc&$9puyM$4>N*5|$Q?#KthA(JM>$ps(Sy-%Cg7bJ*(H+rRfQ(
zJct?PnjEXA1o!pv7uNcUP82=h2cnAtR)#ZJY(K|MumXSE%y!wvZL^`v-Me?4!I@gl
zptqH}x4R)RUBL0%$)fgRti$o6=32D0l)i{chS5+bIE+UYr-QgG2HU4Bh9C!E>HGN;3X%zbBpel_S{8Or%)F+k2?wlYnGrcT_3_41CVQ#6oCM
z*V{ATuZO+r;_~74={87Cr&(d>0oW%oj`GIF@6^w>uL1%fZ@%`67fJ@gB3!~T5JqlT
z^IyIyirdb0EP8OVrTfBcmky<(T3)MqisDzw(k%&}m?!
zavRIqZhgr2>C>m3ITp6r>yC7g%+7IF>U^F$6|h?!4CuQlYhaT_VRq&ut(K5J&p&x;
zb<}IT2Ru?Ra^N0jne93|e~ccA$|LD!7xfh(y3~w97ltId!kP5YUY0}dt|63;W}cYU
zo47RwL?y?0Hn#rcFC1W_=hI*`Mit*-965hGNpWj^v~)%5!ZL!aSEptrA5LYW?q1Ap
zlaiFAfpna$a7;Ycs=XP%wo&Xq$p6BqS5ACz7?eb4Y|g8grU}Q&pl^P4ZsDqQpd`vMAz<2GH!Qs*j&jI`vg1##ShSNXWbhU5W
z`5l+8X~pIa$KVW-6vK*leFI*6P-pn_b0z$;b;pO7eZ?LCRj&|n!4HN&?$%&Lh+5rG
zxMiJS-N6D>dAQ5iq9K>Zls8DdrtiIc4==Pp^Mc3JZq4U-5rk-NuH|m1JP5C+MT#UMwzFIe67i4hf0G&qPC`Cz
z0!x5$?r3*mRFB8(nd{D7`IyxFUa??ncMVY00r*7OL;=-DAE<7@FFRwg8n?b;X+6>_
zTUP3zJQJ$8aJT4f{OZ}Jtd422_vBxsu7h<|2p;X(!lI&3|MA|M_q$qs1^2X6`Y9a*
z5vs_!xXNzdT?k?Z6>DrxZ|fDg`=tdKP6yImdwct~gaCHgdWqs79q@b2eLeLw2c9{w
zv8BuN<8kqFrH%)|;gQQK9U}kpdYjj}>*GE2RK&?}KXtar+n+w~c`74k>O96f>hxDQp&cr5Jd`
z)F3~McAoVknZ+N@cekC<<8ru?7p^1I%ue!6Q*EATSizfXF#t
zz9d@H6O)ta^o(Sy?t%qn#yA)nEn3RH0Sh!o(Qe7PS9KiRoo0INpFUj$j;jT#YSWKT
z2!CbvFcNfAFdj=2UmbLmDO|_%#2h1lOJ)?faI1<}lvl56mNXkbe!K|Gb`n@o34rSm
ztkF09K^?Dr&X1juChzB_W!CTv3_h~ZgC9af^p;^sVZ?jXmmHwKk
zgqf-)Xulw)VnFlB_dhOeJA!eUefAac3~KOvWA|e_ny$YPGC#fx4Twl%+zlS;1a-0r
z@;Rb-%iS;t`G=2B4Cd#ESU31%Ck%UeU3hP2oRbm&J#zd(zP1tYyNrS@%dD`0I`H#n
z8Vqj0EM+i^bELs6U8vsdjBk|G!stlT1#o9o@UGRzFnyu4IZy+@=YyE8Ph4N-yLeHN
zL)^Bh$T=bh(|aQ7lD%Se^*M?8y^tqQ$zM)~J>EN2kgUp~pM87k^5Pw17`}?l?^(WO
zVuH2RVa&|HmXk?MjyZ0*+;B_SjJ;PUALI><;@PNI&s)(l^*2xLEWqJ8?Fq+X)1Sq#n|)dvcy_y<5=d)>L5;n7}R1SOXl%plEYh0QU!e+!&aq
zFqR;l0sA$ijt{gpR&`)}oExRn(GVLNvm5dJ{m{Rqy$&8=j>pm@GDw!{f=pN;PHFcA
zFN9yG5ch)TO!N89L6`;;E-Wq%^B??KQH!tvaYeI3{7vO-Y-*5oPyyhSN=H5fW)H!A
zJ>CYz>fl~`LlTJuTpS*#N$C6UfH6&!ntwVeF%fB=M}4@sLoKzzKJUjVrN`wt7wpfu
zt0nGw)dBcL;N8KjiAJVCwd!tyNuHpa9S)O*x(8xz57us{HQnSYaNg$yK2Lw>3qcYC
zQJx4q79csrWu8B>tUE3eU>X3igG#rN3|CMHJko+(EPQ(BFCWl1xI5s{(EyBqYz5;Y
zm~HMNd@Q@z?qCSG)GY99Df8I(;gD$;@Y3HYih|?;bVV!-f_?v+H}=l=9E%7ol7W@m
zDMN%x&o<*hfsED~wff@PD12PUm{==9vOpt*N(WI!N@)pd2knrLl#Lk^f2_J1)
zz>asCF&5wAa%`!}2w@6yZcbA_ObiPhP~eY*yNT4xwd95jk6(HHy2O^&AdXn8pa_bJ
zRsunXOxLVUSH(d4W&kdFUYM_dGVlicJ{@ly2fzPa!K{hwzhHcTnr=Wru9K0bWGIwl;fzq|z@U(&mYm&UXutdR4*MBSgUf
zT7f{;0J{S(7y*zc=f}|m2qGjTWSE)3kjZb`hg~S~4A|iH>VCU=WB+nnpf)~Ph3XRz`AfxfzGT4;;!4Cew{#rl=MhJwlPw{yYU|o#@drKV1FyXdz
zn3(U(eX0qf7wgIFNVq>J|MzJ3Jx)QVkZa!`4EiEWphH{LymRk^9Mk%3#K?G${NzO1e)u4n?A-ft$CYr=uihDf}dtHEU2K%1&HxqnF&X5U(4A3K+YK;$bxo8wIvG$^=+DO(C0
zU5J1*3MJh{>~h5SOcP&35h^V|n*(_esT*UQ@{gxRla0Yxt%VrANAsY7T3c>~ZT;xl
z$L{H48~dB(>|X+gMe#7yt`9n_i?2z|yQ2tol`00VLyLj03o|w|a~v3t*l*uX&F?wY
z8v%@A>a)y}cfrAUix$%ksn?q(saRPJ7!Hk?TdNf$Lb)=Em}4E-j^HB4#!?b!80S=NtZ8ra(AM;qR^7-p!0Pw%H*kwxC#OliXpGP
zggbhVpTkDk2zeJH^-TWBq4RQD7xH=DND
z+jhTbs}*G4WG*%0(jAr=U{Ir!u54bqc#a<`KHKG~97rf>iHRksJ$u0aijdEvq4Eas
zNZ4c+b6h66%_h|@GFB$(3g
z+M?5iw43cDwh=%igQEp6Un+q9Hts5kPlCM$+-&R5dB>rFi7j643d+jORvD!kU@5ej
zRJ(H>P8%S`c+Fp!NQEr&r_r1Xfx1b%BqZ+~YV|72Ms0EgyBf0P4(
z7IvAVB^VBb(1+6I!hxIqX^gnl=$*NL7FVc!LuRlIi*`5943dXO3lJ`>z+NfZe+RN?
zi(J2$d6tPy;6g)|1yN^_)J!SUP*)c*Yq;w?Vn-foWL6M7;S;rb%L2eI+mojoh|&vx
z5U~TqKQ5%7w7h`?q_;VjE_p5P1^q4r_p%GXsEhaznb
zTZqr~==sURHAIy2)P$52iz>p9HLT3ye9l<|~1d
z9w>J0g$XuVkSe$7L$-r_TCH}nP|PA;9sY`Gh-Pj7_*m6)ajN~`A(tBbJ#a9qVoDir
z*~P|E-k!SI<#gT(Q8DD`W1pV-AO}PczhojV6lxKl>O^Oj>#U3OJ!9^L};-3LYG1o_frOj%eoaWWsav=-d!L3THhcA+rmsULft
z>GO{eyODOrp|LMBO$k*1M@x@;&dEK0cD_Da3XNc(Z*h%CdeBr>wvK+S8;T6UL;j@z
z6)n#2?AYhChqJ|p%YTDb`|-5#yVH5(kzHwf-OKY2jl!~3&%xmp!f?y3i5PpXxWJv4
z!Z2`VWMt%d+T`4*qh8URIGBj8J@+xrbjV=_!MF$H0;{HYV}vJ#PW&b;4>E31;b~NS
zC{EOBy%oa5s=ty9J)h3^bJC2~voQ3e0=_`)g`O@s0F|}q7%+r%7COQRd?%Qe@+`p`
zh~J-RS{IspRCow{K>LsxZD^q4-58cc#rLlc$qqAJU>-VHz%VHWXA}!t2$hSvb?e(y
zUq&_bD4#y%f<@2~D8ei7rDpsy9O<4CG!66P?m2fpIq(~mT!S4{II&+sAE=);5;YN~
z#Ar}+*~Ll8z#0)?^0=^|;H;RK3YfbnO$j{62{Y+>xkg|^A}7Qk(-trZZE%T^
zyz4M7|LU12gr8@enQ+t(+pAPz+@re_p{58_rHkhycOqVfrdztN4cE#0W}D)?fN4h~_r
z6@s>bd$tT+CJWQCXT}0##Vmmebiu9fB*L(SRk?S)WoORg;}2sqz%IcjE8Amq3^PfVpo0Xm-;2mcixN
z3MBvoM>?%s_|R5SS&1+T@X%iYhpLg>Km+<%(rfh{VA@j(f@}yM4~tDOEcUJm5nN%?
zEgu0l@z%u9*~v*0D9)che;$D62UTnY5&*c9LR}_%GD_X+F_qWXFM{(7Icz|Uiqx?Z
z*8t@VWXTb=4^)tyI|=xYADbb#HR0Z2$_VtC-Q2#l2-cSSRaL@T(Qpcs;t^poBCmr=}LxKK^x%KV$xmyDq
z*=DM@!GyWH9XI&C2`GY>ya
z(LoLd02W6n&Gr@7P~PITcV63me?s=N};)
ze)!Fq|G3dBT1b4trQd*Yhdk`rf8R{vrV(DpJ984n#^7!4s_e$Z!=5
z{punUWayRuS;aHI@e>OR-lj7LDIq~1FvEOd29kWQJ}n{`ll|d*A6r7kyLSiR93B;f
z!vJ+J*R)<9rmk!fzWy5?)>S9|{`(TO=j!jv+Q|kiH^MR@TWxmsJv5ha+|c09pYd=|
z2N*5R@$%|vlME=J13Lld3a@lYS^xg!M+U!-BK!Rc@-Ls=e}wkG|8Zd3Z+`mUmtY~b
z{`bZISdRZL$iI8)_p1CiLVmBruYdo$Apc($qyY})*%|?iDO9%%pqEoXUxkw&_~0xv
zXqyrE1IPW)Vag$*st*Q70q7ya*g!5KXQTnQA?e6%u(IGogRx<;e;C34xa^Nqzb$Lm
zVxg(ny1iWsDCh&n4D>&J5=G}(f8=Cr+DspGI}f&YIH<}P#`QI7gHeD_3}M2#I)291iKw1kO(l$1!V0-~gJhXpICgow0sOG~2&LpKbKIP?(0
z49yJx^Puaivfp=of3AI3me*mP`NfI*+~+>i@F(qxWe388Z5GR_ZD{Ct5Dser92E>T
zt(_{Y;tYiKcVL#pLCQUQf6X^2erqe9-ZH>C)fHPo_pBRe)quqgPeB;#**5@9*?jsE8o--EKT9xLWY~jt(N*AtrYSsdf~T)
z&|*i&{5@Lxceipr|MABk5svC-7c-!ig7ktzY2mKs#C(N2$o)K|tA`V>dHya)K2)PQ
zjwm#?H1209jfICea|vm-vYk8MsWZTpap&2Wz`{b
z$*3r1o
z8E9x4P=YJObYeGZVD|gF9hJPTvl}%klrE3**!0XlfU-ijEw~%?^q=4e-QzmSWpBSC
zRu3YZR;JKJTP0Qxch}5m6XXLC6b;t#otipyf2d_@UqBiK2BQ%fA-VO5$B1<78IDt@
zX1gaQo6s&VccW&W!Eb*){1*A(hq~F|gID!^``|}cNJNp>7)I;N-8U710yxK60&SW8
zvp}dBQZBHbknp+FldV6`WN3>U>XrQWoc{ags|$}L7Ox}4u6*N!gND$)Eo#Mj5?xSO
z7!(iL@=a>=REN<#Cv-r(x#Ifk23N|nPM&B1fut?}96yFF^@|rT7R+jEN5C<8cF+(C
z8lZ9PeZ}&LmtJWcB9TkGgp@yi`Vx8p`vokWr`Hb=j9PSq^|jpHCE6FWoz5=mh)?@zCV|9Dr|JG3k23o!c-eTQ_Yu$~RJ7%Y}E={9Z5c7>v(eJ~&Z
zT8xUOrUOid(0O@kZr%<&7i8MdmebaTsHh@vGtkzm!l~fxXjt4QhCjfW^!KOqM1u`c
zzj4FLlOH^kqr*Ffxl4fn2!upMMa4FPs25lZ&yBwoH1&~x!b(cUN09;^@V1?t4Ehz{
zzf!sF67}kkbv?axJ)?E~tqZS$($a7lB4c@iGJEwEo-R1vQE226Yvi(S^~|{Hk!X(bj}bXl5&Y
zLQ^yGOvKP%#L#d1-=`#-xYCAY_Bww3{okG~cPwHC)SP<;!0D($QHwtq%+KT!M)FLj
zM6Qq%h_qHOK=;v*^4(q>dG-ZRONcD9elxj4@wzyNuU?(-aD%8$Au>@D!H!!I2(xFNIC?+{?!Tp;xw{OcK>pKd)w-v%*9eIAU1~O^szyN72<3&dwD~<2h56G7^b0(>g
zFDaeoiO8UKv~RlnjsLAL<-hBz0<4g+8PiQkE!ro)yl$*e1bB^3(vBCRx3@)Q{FO@g
ztL=X;c0>T~&VP!x_UpqS`vw0*oSHD8LIew+_P5585k4NG8dN5Ycwxbp9k}FbuuPQ$
z{*V?Otof)sOh@$Bllh$PFT(A!pqUDwbGfDT8sdtu0oK-Cg>8q%{R(U!oN?ROW=8+`
z!dhR5Ro;$;HP;rBNE;fEtGs
zvo~?j5M*6c1DNfj8ZDkcRotI>Ffeuo9HjAZdDa!;U^9>+p!A#ACoC!oHF{5=8e*Vv
zLJbUGtlj;slS5Ephw@B0lT2vqDj|99lW
z$45s-qDIuOhWRa7z!CoWOs}~r080t(T1Z@+{!8q@>53K^ZkLvzj}ffn!A##y@%f~1
zOHfv;gTi@hAM4n>$e`-0sHAY@-hA6vw}_5eY*JEExaf5*$>u4$rz4KVO#eGF4Hnk2
z*RIu`u>+0)bd8e}BFM4$kK0$LX8YeSqsMJF=HtU%X-B`^7Pu<#PjjEO6`*_&5wgl`
zGZ#y>O^#itc%EbQ$PLI{=S;ds4?YVF1o-lSy=19hr~^
znZ+9h2AxcXQBlyUGukPb%lGRqs5}TqNdS?)0ENrNg@tCNa?l&yN5z~8^+})gGP_e3
zp$wb7cLX=OG?Eu93FZ;ipI_QC_6GDMOK
zquV~hMKAk~BO+06qesIXToyM25G*ZvAL^|n@+LfoAK(^xb0DiI6{-MZ!fJ-Y%iG7N<^UG+chkxFgZgGY65_
zGi;#jaD}`AoD*Q>n)LLjqb#6fuM8|X3-NM?1#-#jyHIx3|N2qX+5y~OsO|Up>e7|o
ziogI1YHc8qfxg0Us>6bB-5MggA9F#G`}py8V|{SpIR6~rn7o=pgD(LcEhEJFrUI!%
zi;>~irN72HS#=Q+!2=b5@;VxByE(MZZL`x906Bn>o7SX
zO{oDLEOVT|Rsuy2x3Eijz2PF(4-xHw
z&*~+jSEjWBq!Q4rW?`s`NzSv_pr&N5E1pfBe7Q
z-#vTw;Oi)Q!T{TVwLT%U4piq5==q!YI`89u59aX*J>*z~90BE9WEbN2sp7i!M#d+zW&`t9lOX$@li_A(&v5mRYx
zWAi*T)W{PJHH$-#<9stw9O-i3vX1S%K|hKC
zGg$>j>t$;N>_0e}0U6N7-aQ7`r-z4*%lwo=bH)qbqW$T2A^*Yi38_seeS+S6Ztm%;
z$V&Y9H7cO+y&U!Z=auA0L(a@3oah@iHt8_-g8V$pRDensp&N_C#09^8eHZRtbKPX0@38uPbxW6{@fC_g+fs`kMD
z^X2(}zh?hG|K=ze+5Tm9ht(miEd~H{$}Z)2ySLmYZQfs|`~__aw*r5F@+egSlM+y9U^h;$OUsE2Q=6UxD;^lAt~E?iK9X%sqOX`~@fgP24Orp?%-HNUeAS^{R#
z46Q!+5wOxm;8G65hzuw65~%(Vi7N`ATOx$W2xZpLpPQ5b*hxS2M#7y$%n8ul=}^Qk
zLALlM?8n7X@&HLGq`Qs!$jz%D*phttfFv+SYC%v2;!$A1c8Bo%DGJTwpxVs|gtXJz
z?@Z_4D*uK|o_UJYWRdQf>E7I+5kcSxAT$@~5R9dgaJUr2PK*IPOif6z1bwO3y1wbs
z7cxVCi$4Hwy?n~!nE}fGO4~4X?*v=0rgJWp+dArL8fL$*BMJG?R
zeK^u|W;c7(+3N)mQAB~~qR1aH&MTvlb#)R*lE~m4;3fove~WkjYYEskmawI7R)#hP
zz6e7>A5s_tAh-x!a)enFhyo5`KI?upk$PbRWdaFWemG*RJGoOu8{iY5z$cR&MJQm
z+N#Ftd%J`9uG&aY2UO5Np+372DX@QbB+{RZHkv#a8(RXrxdE5=mK-&u*kn
z&v_Y^|30)^(*@$=VE&LKs4)4^X4Dp#0gGEcYBLUm^_4*uIZkNUMH%v!9NSt{`V0&P
znjLgjf}X`rL~sRiNQuoa6cmg-=uAswoCcjj`f%BX2ecb=0R`l4@?l!LTZo(&(lrE4
z9qwtHFiW8up$kGUbpaL>RFpws+ubtsH3MB$D5%=l+iNtk~UCJP>>c@fHr5QBZH}=Kue)~{pc=H<2WIFOq{#>RUqXes{
z404qg;tD{*0h;TCKd1vbuM8?8_!ffH2Exa7M5unq$PBx5<+!M(w=y87r8jr2d8;r^
zx{C0AzurCQBr*>vTdeN@3ffaZ5J}lT#nAmg)=GuO%%d>pBE<56nx?dnNW5HV0dr^d
zsr|tZPSvZr!w2_1ZzKD{OQEi*B3ZUDJuOh)I3+VEv^kOO+SqPn{(~{}A0zb`zq?OA
z)NjWN>H07t2ADwu8ECj0php@C_`)ys+li#XK}R%-@Gh+eau-lKV+{hbyvagTa8#K=yQvA)mJ5QRhN<*^!E
z0fzVO)bjA~aKtV{w;W(gYQU(;)b(p^i@Oc=pVv4C^9ktS0%@USWDu8TyB;_Zgl7UR
zy{7L1fa?hBQwkifOaO~*=LYt|P#i6Ppe)IzfhZxj@&`p|0-1!C5a`TM1A<%|%(b&R
zd#VG_2Bhi<07eK{Y$!z_bG)FQqE=!*--35YPmv8F2NqeP_u-WwFR$?9$1`#52|SSZ
z6$0~%6%j`UqEAT|J7{A83zpVTBTd~FsfaFoE(aUd4VDoJ5kNaVQcuQTGSkAp7^Hx4
zXKrCVG%89%o4g^X0;(2K9DqK?cT1}?y?A)X0{zPYzI|2?5BtZ=Hyh6Lz
z?d@xL1~kzDyclwd0QGc`Y-s5?0_}>KFqtq-r{pEwUkO{Y&`yeQhs)RQ76YnIu_X4D
zPB+?(t*mqkj7RE$387J{h>8t>jIm8H;%Hd5l?B4Jnt-iO-T7aq-ta@MxDBeDZ(vGX
zG87r;`lPQ6*79E%uCCTZ=2E~Y2MvgV_%ni_X9A{w!GMcGq{SlE@zxuts38+$z+@L8
z0(EACt*vqE%l*-L_u8^#<(t-h?)T>G>MIt&7X`3vUGS_Jn}zWeZqk1_ZwHR$G$OKjpjOlBFK?*3=bp2^CWoxM;2gApMv
zai90I#IoajcoVM$BJ$rFx{mA(6=To=MIdeHf#lMl=eUPE7@G^w%KdIh80ffSb~8$80x!Vd-rnfBZfZkF8I6ub1u4X{m$!o41i@7}!w
zR;G6$vtFsgQ}=UwQ9U&Owe!+-fz<*wlc*w5(IMGpNg|F`XCPt~G8Iih4SF=m
zYxtzP;o+9)g_|mX8A9jPowAL&+LJ-}1a~*zg8LoXfQ2Pz>q|HE_SU^wfWj5ho`(y!
zkZ4-A=mp|8?a5b(;GSSc6eI@ZRKw0V_pH)I-z$8y2KgpNrPUa4qd9}DRweyHmpe0c
zPK0Ok>-Iw*#fT+RBENX?a|RlE)PsT7JF@(vnS9F(^(64SS}^+}b?%i=-P)F&WrJHJ
z$=2umX!5jwlofOlL(GflS8`zE{zjN*i
zK@AP@=Rj&zadElmX@b~@2$5}qa|d=1#$_S0fe2kA5c1ws7)Et(WsVH#B?DGLZ^6pq
zVta&$3O{R)9<-yun5lLkjUsVuo(`0K&$*NA?O6#x+nWO57DDt2eIF`+T_#~Z|b7BlaS3IjZ2tJSA?qoZImw3z}}{ZYPMKW(O|EHU
zb_MKK2{IvW>?dxE{Tw!(Ux+il8W_xWXybl5C@
zKXUG#$5lW2w%H~3t(EBR=f@ti%GeKB0`VTV)g5k8-oLQ&{%@tE2rHL50WW4z-3sEY
zqHNTYEkZ{NPN-r{546h98e6p3GG?Xu;6D7`(k8iCby%!hWc-!PdXoG4epf7mw3qEFznS
zL81);94(m=w_4(d!=zN~yc<4zs|z5!F0-gpTFdiG^0u~?#0hs&2|Ys)6S{rM*KIj>
z7=(8FWnYg42!H-V$GYjT&0LX@4WroFD0ZoumlWc*l(xm~+@zT+G1S&c{8FVjdAMRV
z5)`wlSVo!9ZPJoXscx%P7SQl=EYQx?fAiD+;nuYYJy?a@fv?iBC(HcpPz
z#%}fu#|L2?n%(E$T~E10**T&501X=(Kn
zVA{mWdU2H(Ra;^C(paFr5A9&AW-h5XUPQ02%fPjYqHrq0dK`?UJQJFML1n!ypu9KE
z*6Sc$JoJ@JZsN1r=iS|PlJ*V2Yzy$Ly8XDNK*HcW>9XGH2g#S`pb!}NWcqd(BuneKCXJ=krfMAFtNkO=27t;J=WXT2-S
zP0wmGD7sYJi^c6)uZdf0#FmfVW>RMHv=3Voj1w!)ge2zArYIs^LPwZC7R`88<`4(-
ztz}VSZ_EWU*_sk}Do%8i#iJv_Z?hZl`zTN9B&7*mBiLk!=OS3`zh5yck(M=F)f!oB#H&Z|W15Ar)QUMVcC7$w2$-)V`N_2xn2Ax=
z7DsQ6jiaW2kDc^4l9b5q$th%QM#TCwx5#o=773XykWr?)VzFhHLCdS#
zj%;)BbgGL@i-r7ImX>)svo0t=>EbSdR~~|r8=ki?gv|?nU7hPBb~CqS
z3iv5Kb!)tgm;JQbE<5SR5~$91;?a+W1_rF8x+Bj{d=6Zw*lcK#d)-!ohXXs=EXFc<
zM9e>~$eNeSdC(}z@j`%jLEdJJ&gzt6)~SsFSPC9dl2~N(BwCWB){7}$>b8OzFmX&+
zPFK8%bT{<2mN15qg&PwYtn#n(=7o-H+w~fqvl?w2LFP4{=nRTL@
z>?A7E^d5+2b>M|WXNqVmuKjrH8E>MQgw!5NNnMKkhxL9{P?kf)y-+_ny%1Ik+v9Mq
zKn&Zv@Zl$ubC9_DrKP3u&}l4oPDZ^xb2}Z%yFGlmC3a;mPdX;f;0~WIEi-WWZ~z_b
zI?H4~ps`$}w<+C@|Ak
zkI@Odjet3~zbmYl>UFk;wd&^)VWRIkonHd{)F#6(<
zmcK}1-kRf7yOFaE5yJW`7l|w-PF5Mw#o8(w^1Thl13=!xIOTOm={lus;y}aW*5bzv
zh#Jb!IIwH61}?S7u+SZ5L2!sFqZ2TG0-^9p`7o<^N-@(%`wm`9DK9Tq(I-T~w`u}t
z(ks!W#nZU9CB^gE*U`*k3HmOLd_f@GqBw^yj&eCk{`Ar7aIJF>KA~i$+f>#!
za>W^DF2zO>S9`lAECu_E?z^{e;JvD1nKN>6uiNvv9m#)US#h6rS^YymF!5aL69ZN=
zUG+hUG-Cc#7$$^K@&=jwwY8dHpo+fnV-Rv?`d#Ak_Kk=XlbiqJ{2%gFP-Wz$t5_Q^
z5igbqZ7_i;y2OrlwPDitD^+u&lXgm#Pwx+|KTd
ze&;*|(a5&SiqiAP>HU@``$(x7r*z=N$gFI%cd5*DOxYyqM`C(6D7eSc^Cu#HGys$0
zEQADsS25`(`S?KrW)AMIJeQ30!4QkMCXm_~IX4+6Cq6$;hFY!nXOd^bJ|tssc5M<5
zZ-g0jZCDYQF&0s@3e2T|0yfzABV|(Jqr$;%V_fxFfvd1*OWXgH^2K`L7XzdZdmR)l5%(QJE
z{z75peg{A%I^WUo4Drwjo$JJXdMs_yl{!@7e1{8VzG*Tfv+;F)#-;Y1F7ySNg+Y-n#aM4-xcP~*~=r0J#%$=|eN$=_qI*F>B
z)5fIc=VurkM0tcJ%cD?|u~1Ij^n~it7Y5@)>wq|2<>cf#9(M{?;J}n5XND`d_vQK3
zy>Q}+zuIBY#Zg;x(T!C~#`cWXNiQ2eCXpomEJ-uz>sy|$TP-^lRQJ+%3s;c|V!v|B
zCe6^eJ%@n^B7v9ax9{NU7x@D~>FplgA_^_F%Vo{t`@~m9_nVm|G%ba=NL-0~5x6i{
z5&uC8ntQqF0vJWoA&Oe_X^g_V{=GD9%QpPGe)g+<2k*_aF-R;o7Pvr&8gT%%@v_c<
z(5cem$ON@0%1(BaN3;@9LfkF^G`&iZe}LEp_kg-1GpQpNTVI)FE|L?I|;Lc=Xk9dw)^mKBUQM>erxYid-t_bf4SJX4X-3aO1;
z4Mfacfov@fhwci&qS&F`jrcWnEUPD$NE^8NV!;sEW%YD#0inX=nC`1urQ`tq7YJ`j$7
zOi7=QzLn+_O!MP84Eg=(Me^Eff#eb{f26R|{0oHfKqoCn2|~k{m6ZZ;f1t=
zzhoV6=wq%n-uUr&{rfT4xvWF&o=dymm|R#|h2&!qFW&hJGJ2T97aOSn?F7*xKqsy6{rm+Y;mze
zuOB}9=Jk$5`MDz{nQ4HhFPOIs(n6W+m;}#Jz?__hD*b@s6%;@=8
zw^`=t|9wNAEYOOq3+K%5e!sp@oqIDulcnbn#X72W|2cttm}bx~>rb2=)=fxJ;h0?+
zFp99WO45hh0F+5Bu^K2y%qU-48Vl7^<#(G-nc!*0gd@QoAozv)=j~=Lh!P1w=dZKU
zsjOGsQKX-fT&wF9zr_>T9PF!~4f{U-48pZcgY(YIdkyu(+%}Iul;EYP*>blZpyI5#
zF>=OxRN{o2bq;d_J`KflkA81fcXic4uY$lvD~xY17T+523gtu`J}mc5CL1M33%^(5
z8y^A}K&tUZx9yJ~KG`3f@*6wb$^9#F8eB{P#<Qx7aGGXr(q6$Sa4}n!DCVs49BLhUXnGRTfqdX+)aK
z8$e@NdJ2-sDP$`c%-`j|@!kJn-faaPxUf>OB_MFF)A`UmUI-%Obe7DHFX7HyLYVef=91YfXwaD}xwZJr6JNn5v3PA%1yGD0FuYnR|y-EFOEg
z)%Nk3*Q_lvMiIAPa@DXdn|Gqj`g^9`o?_41&H+Kh09^;kSk70
z+=5Z^xi+S2m^9mEY`p&E{vt%BfNu`JG09wWks(`{+ebkkbGBvkAGgJz`N_2jmUe)2
z$e65ZhoX<@W+G{_!DfRVNy>f2-hD8o*AXoKdLO1dS<8HlML>`}q|QKYDCaG2e}NeW
zEPEFpvyi#(Tq%BwFs0bi0b6%@8}Ny)bt6642#Ve;X4Qj_Yd^5yz7G2wYmgH=I#5L!
z9y7c}GK!=h+9D(|$eAZkDhAehDzF4mkq*+`SpU7@6<=JjxP6N|&UmfReZyTgNwPG>
zXu31b2A8oFhg_S{QcG;jB;4pQ$lG%Qv67^PELF~#lKZ4VdyQ#N2HEJmEu7xgH9g|=
zVcBKcgZl))wiVA7NOFhB+Az9f{R(Y6ptBcVu+`RxHCG(MEX~nHNu;24_AUTCsVU?JpL=I0ZQR
zi3;Mf?U^;JX8m@_MMY{5yMa{e%>@Tc)(6l{GAT46E(H7zIg@fRp&c*g57cak?F96+
z3e7R=OAIv6F59$TI3ZeQ%o9{M3DATmsC(o{_KP0{_mlw`hB1+}BB=ta+MYSSkd{S?
z$ShyChGJ0(*n!0B>~ZVsq7+A3VfeZe$veS{IA`zPp03F<|E~A=Uk7ju!>c&}DDc9#
z^e)+?v(SvB4Bms&QR}^Z#l|5WmjyKP<_)?y$t}Dn5$(SDR^NrtCOthF7cjFp0EwA~
z2_`e^fyNCjty=&KYUKa@eqNLmiJShsK4+iRhN&R2WoYIqoH~^O7UHcZ6-(dLI!Bbl
zRgCUN1x=P7j$PsWbx>I=Pqzjs^Uc7l{`QKEyeOyXBIBz;dAI|s7;U^_@p2!Fy^Aih
zrpBKsamIsZj6vR>PyedzIg!IYSBM>wMAhGE2l}x2=0#)XI?l6O
z^|phl2E(CHFS?X_mzb}faZAPOJR{|O#HfR%hF{Km%f_pGbIc-S>4~kwM6=jDW|%lA
z6kKN@>rX|0PD+}^H6En#hf+kgz~jc1Zwy8g?nRmkrtxI1mPR>z4hzJB6+Xo7+XdFk
z*0|Yyi)i~=e8U2!vQIN-{K%K}m5V(Id=Yg%5Es#=!@cbPPY
z#zSWtmqxo_I0oWsP;VP2D-bmP9SMn~L9|pinnch5#4~pZEqVGRPG-r>>^{iTT#nw_
z#GEthlU^F$a&^_DXp!6;;k9iRu_jcK1wg7azg&tKRKRT&BV88PoR)hJJLj=T-Yj#<
zkRUDa5KZoLFfh}=Y!m`34%m3<-0
z35+f5oZ=J$k-8Hxo8B0}PTf*+n_p)Pla>QupylB_!v;Y^{$B5i{?Ni<-Aowca&v35
zMG_SLD@q_3Vv~#NzH-=A7HpMdiqfZ)iTqFgVsjWeqsI1(!l5|8NoE}t#g`)PHm#cw
zKkctbz84uO7A|1aY_vqR^~JSpMR!Gh1?qd9?we?LnE#^*3Jzm5zbv)`JV$V77p0bx
z@dwT;``N`v%>q~DQ})ds=}f+J=7E$o{0u=A#7r+ZYKjvH`29AnowR4~d^Et>{uWbV
z%_x1=DC)Qj=vdV4a=sxzneOT7L|n&60y?S`)x1zR&Z#ldMu%Q
zR{K4hg$5d^33&X}w>q@0sM~
zIAxG-&^&SVpqjqEY}>udu(4*tAlUysO{9GsqzMMUEk_oeOc#Zhv|P6($Vyx2RMfAC@C#
zmX|9aSR07B#e?xQ%$)`K_qZt6<%HDX+b*jkhhXsfpY;3&Z+GvK7PC7yl~MQ-SAAG-
zwo8b2b;MZ`WzpGH4FfF+{tN91Er55--ad|!a+gNZ%mcvj`6Ptop2JGvtv8LzVjH{LnNVPEF84Bl=;>$Mn%e=&X!Yg*SV%ewt?u6S>
z_btV-5WTbHSn$QXLJm?IWGDw1w~&v-XIYsqn(i*GVilLB>D`cTM}rzl{bS*bEWx37
zV2Z<}-^`YU^;;C(=XAluB%Il8^%+?XC>)-eII#Ak1C+pYzwCy-+LRQPfEzxij6V^c
zIjxjY^cBTb=U!jhPto;!>QL7mA7f=@&^e)PTI=CK<9>Yo_V-6+s3c1-L&}=Pm1O(k
z?Y>BN@9UM}FO3czyf;vtR7Zb5AzT@11yxkM3#`8z17`B;Z*aNYQNUD8oI0E@!q_Pq
zu9BCnTcsE^BlEJ&h$*QjCcgWfrs3H1`~57?JFF#M_o(jz0AIg+wG=HaS$R=*b?}xY
z!iB%@vuK=~xld6<^>HGjlT_|OG0Q;DXknRC1<8JbNdhL_Q;0lf44qQ=y&Y`ePjn3A
zDZ&PC2UKr#^W}6;mv7LfJ?EVi4_CgbenK(Qs`53#D^gfSU&8K9yVX)bf1*oy%oU%x
z;QVEe^ngBE0@B@8NwU&Ovk_I{--oJ{1DpiF4sJR!)F(FML#Rr{WH}V7A{Gh%J^P-J
zKFa1i+gC(E5y;2pUTD@&IcSL{7Ow^d+)rqSQi8x@Cl@&fw(rYFo}4R%(~j5A#30Ku
z!l7{ER^||Q0oQzS@mz6uo+G|HHOs5~**=8)j88vh2A`|aq)1yA&cQM=c7W+W;@aOC
z$m^KSNC|ejis9>5)Nz?&EUjyYBQj`YP12v;{}Ozu>&)FJ-RB9u?FbVnKdO)3~U-F(~D6%pMZ8K2ub)GB#Xe>Cw1HziXJW_**X6h9|B
zXoo8^ZaJJca5t~HiEH3y)`8Hb&f1P7uE5mctn+OTWtdz@b+1>$ey5=e?lcxk6B-aS
z#P20eDf$rlOe|(Go0IN{NB;TCL)R!yxhN&r^h|3UPQASF)|UO?@kr;n>-Sfc^551xr_&k_^X7ca1J__nuV15~XxugM_Sz>qI4T^osN
zplFr?3?}tFMS=xH^*_HQYEq!v(jZ8t$acYdqNl*jE3<+`D<3VP#u0r^r{S@>ZFk$z
zL+YPnju{Kz`zJjlr$0QTH%}+*l$e@n%l%KwSFdEKouOBDS(WPYV@!uYlkP-OU_tLA
z!J=8XZ*ZMJ0r3^H`zDvBN!kjPhuc$RXInY;>{dX#ANI-gk^y}kUp{ZNJ`>hPv}((N
z4E2Q?`hGugbh2$5pKajlq8_&9PL3a!Cc1Z}ZsYF^Af_Ra;ED|t2d3X%LZ_UX
zd@=RxuIl~l=l=ft4fb_xQR$r$SBm@wW3N-m)u---H}UYMU5rJhcgev`f08t=Ux8$v
zC}Z8DpOUlry7-1*tg<{@YsG3T9|X5$`lU8gxyB}P&aZw$wGJyozqQ*%=zT^oxNYM|
zpy{?8rMwsHcQs971(HgPOG0=(%oR57_6}SK$?5!Y6=`6sq&H<>zFS|Y?#Bi|Q
z%rw~V3Ds(!R)KlP%Zz@)w24IsBNdRZI)>aT@iAL(zDpYY`)aA`bk_dXqe5**!-iaq
zB|h2jJWt90P&`GJh2m+c|L)b@I}N21Vk$mnCXJI#O6@R#9yz_&E}QGenV$J%OI>Nd
zb6wslb|2|gyDW8Qp%omdi7h+)A_HG)L%b+%*Mcc$E2IALAsUB>(1
zgMEr+Ax>)PN~6~I=*KH@cJeh(;B&gGr2btk!)T$Kxz(AqY!JHcMvoIq38GSlP7a7VJRD1mLe*viognq*%NnPl|!wdszbX_>tcut0kG&-(B3I1#DIZ*l%1Yczto?MAcLGQkJpFE
zpPn-xc+Z4|A&XypxKNcU-(r&;caoNYa^A_`1jhB`R^u(?L7Ui9w7#Y4ir%=1@h^lcdfJSxOpD*
zMkikAC^k{;gX=*+%Ah~(3koGrFX}&_Dl5p07bQpS+_+BG!b}ChuwYwXIRBe_cU&(i
zitO0u12w^>!ZWtiDCd|R7a(~T?EK}6u=55EaD%0HK7qspSlg6%)1z<&QO?_8O0r|7
zNmpN=`uAfBapA9PKU25wv6Twf_CXhDxEjabn~vdYRebg533hxW!V*bK>V`i8$hj2e^d6_JNl6YXpCV5B?6pQv6Tw1fFQ+qu!k!sA(U#c(QB2!o~Y#Sn#paFx6VZEW;D=$>I
zqfV@LYo%^2S_(`spi1*7O{=P)HyJT=E-1re~6ub0t$7pH9fplsi`T
z7OQ15n5*9FwI(<3y*2iva%kr*pWy&I1J53(4A0KW!{y$nYB;WQ+J3UNp8dXs24T*b
zrfkJ6Ogl4m&BJQv%T;UXPk@^)jMe3@!3hhb2wOdA*JxC`-L6d2@~7^4@ExI{lSUiM
z@bFIm@$kFNPqK&0*6-U5r$+`HZBD3m@^YBbH5wAbz9FrIRa*?t>f9oC5*hcU9sawO
zOprP-T(<7&2js=wmYr_vDfP|8mkVOc--ujS3A*EBuIx~Eh`(E1$QX&KR=qL+kJi1WlMHfXeE!UoP;%M729cgNFTo4
zsC4j=DnaB}G?#1Fc^Y&;mg#Pc+USThP1%^BNJa*%5`llm5~E}U|9S!2yKNj~%()X9
z$3nU77iHjNxM1b0$9a_=H1)yBpkzc|D%EthhLbtK77G2o>G{m;yHN!D|AX(ffUSd_
zpIf#FSnO3&>a}iGUw#xI*LLrUwXjwJXThhdPotc5OAAr?zF$q+=Y?0G`8>p@TTIZ*
z&rcA)z6UiqkAwvOv4{I#B5{Eo(emE!^ISY!lCwv)??dhLulJ$3gr;oXEzBi0bq&S4
z<1%}=p|8eS1XH^`3?&ErRk3^9}8bYMnS);-Ex>m;~{>0g564AV2LE>^Pa*lMZh@$arU_;s+P
z@f*0tL1M-RPY;dKlxLvrV
z6VVThz^300^GBdFKYu#>dhNCa5$QfMrU2qWGjfJ1Dc<8}GBUh}tiq2|NpfS0yC{C^
zMC~}CN_Yrf!Fu{VDFf?CeyE1WwTp(^pFyj4PxYJQJ}kY}X+;I6=C-!i)D`^vP8au>ngAA4`SQH52;=P>UJTro4pZjZh@
zzRKsBaYvFm=rkJwgi)yZ?Oek^*80`Dm*=h?6B~?JbsO4m63m{c%n{}jvlC{
z9XxI{(LB8ucje(JEyKS4GXIBUwf{y?#!>m|fKei`e(S?;jG13RolXIsx^7GZ`(HW!
z$nmvgpbrkS&9+L4<`z-7V!h{OmeOW1jz)V;7tEY10&)NR_2m@9GbGcxlvlPKOtGia
zZJZ8*q!*@7Ett;q+jH7s<;f+N@(x;9qn4k|tN#32)E6e4uoZ(!PWCIXkvH7hm1Xt#
zH0Emm43U4{z~eMb^d7PexbEz1TeZve$j*1B;HhJ$9~C@ztp<2Mr+`1;Hti>06%v69GrwJxgAY
zJ_PLCJ~HrdB7(Fk<8OZNbn6bg@ivEcq;kWAI=n(ER%kfYN;GC`JNoV{g))aFmU8Zh
z#lX7+J>ng5)EAcjN`Jyhx57=&a0-P0qyQ)18sb9NQEJ1xErIf^NXV1NX@}1smt246
zi$graHihKpWmCMVk7Q4RopReBe3VN3a#&iX(!wXc5P7AO+>5Puu2_^@QV`V{d>|#c
z^V(Ai3(*Ea)Iqijj2)x>Cse{jqdMsc7S7XZy@|J;`{Ug>j0PXw&MW(%vPbEfIX4@_
z0cWO!@ty}ad-2U`%ca%{*W%xZMEHeMmyq^7)pgP=%|#_k?6~l)IEekhA+C!e?+ek+
z2TNH!2`#(U%6j%X10=}1!n~P#Jyxm91+~o={P|Nmg=ifPm1j5HzP3qc?)vg;G^vcd
zy~GnT~c4y)I;c}N|gan7r^Kxg5p54F=e9bz!v
z?u1HWjJLCZ8{M%2r5x$;AKHRK%I{n)}Q+7pp@c{_|U^!bK6zd=p#qi{Cwg}_2E=vBXWcb
zhALCLyqol5TKnf!MVLHfx1FhR6z*1jK|zS`F4UL5{_|R#>SE`q6FfJwjzzLJa35Al
zk*SRdp|$MvaCFZVa@ut?kxLd_u32Bf@0qk{{O8Z0HSay=$A$gxHlM!|nCGzYS5YR;
z^7zj?Ta3V=b@8VE7+qOWC4H$1F0zX9)~8H0eRT1{3olw8rwt~n_ONMB+~eI^1@(D_
z5VP%nj&Ut=(fApbKuKwu9zBX&tZKwIdq3XCeRjoaEFfHWf$@E)t~wH~nGW2nHbA9r
zLo7c%#Z1VLnShEZYx)ejMP*rk0%+k7+As)mx)Cov)pbj#pF*iJ?AWKtmws2r5^vUdO3Uz2u@zdrc)ZzcXEr?D_kFhQ8aj6IVGR
zraqN6p5jpZq@MkO&Qlgw?QAXckdA>HgAY(n+a4)qy!s9wppORVRcLq6U=Lb2^Cv2%
zVwJbQ4MYl;<%NWy24M;2unJ#hFl
z-7386P>h^NX6NyGLip;^k}Vr}jnd2C2#G&|wOCs-E375l3Bf;vV{qM~3t<s8u^_&7+~@(A~<
zElOpYmuJihxXyeRjmoQbGYPcPCmjihTOtu8^xvy=~U-X|lM!e64}
zNqu-~z!_@pz*BQUoOd@O8M29bn51yd>%NH!p}U_(bMv3;**C|Ypf-18njR96$b2gr
z>7rA(o-Sa6%Z;PFv1kn>Mc=HA^B!~wkLd26_9Rm}yK~LRen_%a04E4*;8mL&4MGz1
znsCI1eR`b_dmTf&Z2*mRC89CVz$06OLODG9uZSlxux~qLjyFBm=To=N%s|fV+}vBQ
z`^7TlgS(hf18p_!Kys^fi_Z!T>iPELxJsQ4#dy=CmH9)awEsVSdQ>Mn#L|}@4|qD&
zwKD6T-kG~VTv)cX>uc`Pxf7%^_Wd91YAkEn$(fK9k@n%mA#lo@AA`J46>B#8M|Sjb=(SX;<1R6;6=`~08I9{1|S85ZRe_3}oje$}sdfP}OJ1#g}KYRE3
z{1ILtngHKR;IA7#_EomjXGi)0|_q`8(x@OhT|`m!K-mNWxPuM=yuh*P4Ty}JS(}h~aSeQ2xFAo#
z-?4y}aGF0uDVgnfY(^=;Kb}MCvmLjdh1ACq&3$&ls_5~4H9;BnxE>~Zp2f{7)}J
zC{wwA$4wSSL7sf?IUrVaQG>dfA1=g&fTJguW(4;w-v@t3K~djWeDnXK?JJ|IT(_vVauo~o96_Z-JqiK}0!o*nsB|N(gtSOWhXsm+
z0vjZxyBlc~sZC3(gwiF_4c~ehn(@&}J&Skf3C~2U1m%fvY%(LY2#%X
z9|tG~rM-QBx*DE7g}{W3(Td66Qp2!O+Dr#gsx+6VvHvJts$Jvuraw*uauCTo!JH^bVCCFa{9O2
zKGU&e1LQ(0^D?_%mcEEi{Nb94KejOk4wmogIlL{HWGlBEr9xjD1bvD|_K_YuE`x3V
z51yBw$>#2h%xxrg+TH;*2yPNQ%?JM6!7QCKIMbEvM1V!2wuY0_@%y7ATHbp%z{1q8
zsp*4n62`0tg|Yj9f}QRG00@OM>7wZAFk_Zsx1eYme~re`vqKvI%+F5pFRETpXE69x
zZ{dz{Q=otZlA3|0r2L+B>3vwJ5J(Q0)iUmQn=W2SNos@J1XuO~thZ>`ahZT2@U*wo
z*c>;h^^ba%r=M0VOCD;cD{_j}vRFGrKi*)eE-{hp19zOq+~uGc`ij^cSh@a79ANIa
zzY?SAZ*7}hQVNs2(R+}*gSqRig{>Id(9v=?DTF4Ve4wj+_-*ogkzh%~#`C5Dfslml
zy{h^jTpGxM0(;jg<+c*H^FEY(pff?(jKg29Uhx-UlKS2(6S;4_7N>=3p!K)AEH2|M
z$~J02BJlUN&w*N365)Qa#ouj$9a$k3(JV(*Pn7jS?#wn)u={%IX|RJ(G-k13{R_{q&Kgww5J%)PnqW$o7x&+R~Bq+9HPxw;U|`E3u2RPuPZ6R{utI*1T
zB&D1iU_qi|t0+)Jua`Y(u%oKy1xJyxa-+p)=
zt#n0rp5<)?c|%Oc{aZ*ZJNtf7~=QrhS5l6_%iEM*#Uu#K6^
z!bQz8f*`R;ImL&41n0Ye!94k-B%?pIIibcG2*!v>dI4yMVXH+;0F0y@^m*U@7>!L;
z{*oR4OHTL^;mPozI>|IZIUaV9>#rc2F$STRV(s4tZGasYlEy^PwP#7J7apV
z=O8W&)^Ie}!UBMiM8V~WqPJB~So~Duzr@AJ@?8IsltT(Q)4P@lfMb@(dq1g)EuKHm
z;(O%o3~|b`sWrZ`XowXc)g0zC6vVbvW%FAIskB=NI-f)Tp!C#JU*2l(hDKh4{>&)t
zfIw}lXUt0+_&ZCW7W>t)@T%*C;?2c48J}DYk1uc;WME{WYGeL{+)UT9wrOtsfMB=A
zM>0+^F_yA$SXhEb_|2MR(-$ihHZ->3T2T9AujX7-z9sSqN$|5qC{4$2=&5WT5EX5v
zT*-0P`jiV!@zkA((aa}>X>;tQg_FcRL+!gY>f0nc!
znzHKZ3V-6%^*uKVe^S0(T@!A-v3$p2R+^rF5_;MoTFaSO49!^*66>CmW%r7`qYccf
zNnSw8fW!g6zzz{~;QaL1vK{k5c)5Snu1^+z>v@N%DO6b;Zh~fM=KTxT^H33s1ww^o
z|NK{+n!Nbu$4UIx?_{ialdTbm#mg+2J0{%Sh0wg~JlgfV%KM%I%%hkij@_Um2FM0|
z|Mr1`jDyGvw=7UX*tu+bA`v$7&e`S_B=r>Cb(gwV{v4y?5ND{VdClx~HrqAhLe*Dg
zo|Pmoc*<5dlct~Ot#`I7bJu+yOhYk0)cP&Ow-?wTIR3Oa<
zwk^`&ud?pbTXr@Q*}0$$bYV}g^hG5AKzz?5iybx?d^G2BD8rUyI3QVu^TuFIBe8c(
zzs9qoz~h^NytKn7W=u`K%J-Ki(PAIFl84?{0uD~V^B0vW%i<%X3^8m)1V-Cf4XgK9
zQbU_6xVMPCOe(^s$TWH1b%YQ3aae*rd+07F_XByRV@n5Q&S5UHSL=zu86Rtnzrs!g
zKp2`5Uc}OdlUZ{&u5u5kC1-mEo%-2J$|7~PUrMYg*UYYR3+(`9EhXhLBNhRRZEPIz
z9)$jU0y{M&Vj*CZL&@R8)k23c^ZxKgpoh3E>7m@;$Db=({)wLUY7%R5XNynw$M=JT
zlG?~aIR*ID4`!M6$9q8)susQ>KZWVD-?-NOp0^eNKuKr6r+qhnG|5G@LG`FgG`-fF``!8Oq{tX4V_AX6;Cu7
zYqX`CAEP~xS1aKFM-y*{qn%cDA{u{6tiCC;kw^npd)9W%S~@a4XaCk-_F>6)w^7W+
zoXX7XsoWh4DEx#FTJZ}tp0hXQ*9K(~bpmJy`iYOrHd4|A%FX>#vuF_;jYGxmJPf8QtXT
zt9Q@WL^Y)e)xIrFg`UF__*Krj{9_)U8$xRmU!b{HWvfi+12lW0-2>pNU-Gxw8Nr77
z@upXrS-k5i_4K{yjzFpkrI5XWn`2THT1}v^uK*%o@;t?!*wq?VpTdPx9eGxegvYsk
z@>Yp!Yji&3?@i_2lJahEXR12^MAkmMSJB0H@_QdGRTNTedr(Ss{4cj4e7-YfNOeAz
zv8R%fH4auPLzEG$LcK*92SHz^y)9vxYXAgHK&~x#62eg4q+L>bpjOIZ#0LuQ^S1KX
zf;-)!ZAX_h>L!i)%ckbj-nBMVy95qy!$Y_bEeTCuYYQ9-Ifmz=YpxxQT}@<-x~-k`
z8mH5Z)sDX91Nq!r9CDw)({1&(*XbNa;Ijc+Zj-XBf<1O!9Bz#{*Ksu*_L#^E4Pg7A
z*sI3wF?q5!wBEf}dPe9;t^-c1+cw#1&?eusAdl^C5l-V^vf(2QDJ-Y)nTm&siptGE
zTM2Tgl6K-O7qJsP{=NsK8#){)K~tu=T#a)H2+#8BWKYpjs>41~Py_u5?4#l$h^D5W
z(KH~oPk?%xUT5{?mh4gGT#5;{Ooon^;o2#He+|Dxw$HMko%vYJB3I|~Nqe)JL(S|&
zW^B4#@G96`3SyMahhq^|Jgv;)F|6qYQ~OapC%5CGq5SFZLJ?(`l46s!x(>kn{Nlr$
z8QO$IiH2UHLTSJ~^}XY-20MwoX85#opP0pU(wn3C-fyV{?E`@5;n~oy;Yne!lPkYn
z6BnH9EC5{`{LbL;D42
zcl*W+;Xgs6*@N1)_Fzg7kslmD4Om&3A8ctBzdC8?cTZzAA>0%6d{8J%B*ow3Xg=r7
zgzUbmyRiFuQ23$l#0MfPgf(c4MFIzp{%iMYlIvmcu3;lp<(uyIb1ceWtEmW+Bk>GBgDz=<_wx
z^yabDo@Yr)`jWU~Mswp#Jxc6y(PrkE$0V3N!gSN0q&QARl|rWwjpw2Bk5uVAjwR0&
zaMDxq=7OX|f=R~k>#1GYMhP0}s(64#OWWnyV{Mqs?8?kc%&k!iO4RX)C9r14)%EfR
z>_qWg+YT$uoY~A#XK?!FWv#7!^LN25hz2*&l7n^<^mNeKxsBSq+3h5OUx)~}n{DK*
z3V4l{TSb_$K-qi=Zx>@{SZV(|_$BeprBvqPTQi`?4Z#^Xc^+c~8<
zRG|2DCUR9TmJIV#k2B?JKIP2>ll$nA5n9qpbh_dOwXcQ14~mlN)osw(q>HZ%pL8<<<%jHM3*empcq!
z&Kc&OX1&5J>@1;5A0g3Be*MqC2{k2cxyYwqT!Td#+9q2nAp|Z9X<+ssvFQ2_@PB10
zq%>4=laygm5)2N#Vy4-~DH<&B(l$Gf(1X~7(#+GArEg^PC;4v4LBkWN3C#TDv+!l#
zT3BGzOv8Z4IVoq=d!?sP&b1zRGVTd^PSuIj9`6E*KS0*z3?CGXOvtzKkMR3GVOp4(g)v1S#|nVAI2L_;VAjl
zK9%RhtbF(f8kXQMjS(9g>LDR%{wgDR`ZA}?S)^UL8H0o`byzyV)<{9!ce+*4(ej$w
z6OMjM*T;km2;CnekpS9={Y>;AMcl<^6c5Wu{wo)L>m)%DZ!6M=wB$KeypxrKE;^NRKvQ@JmG3Effh
zQyEon;x;lm`X|nG==>&UzTg@tOb0Sn%o{uDGPD16TgjvW@S>8D?OL@`%Hc86o94yL
z#k!FK?Yc(Kq9nH=Lh&D<$AshXYbqRvWzLTF{aV^CcKVqw)88J%
zuT^i!f=-P1rC*5yB5d$BwX|g*1oia4szrAM`*wNy+c~ryp3Xop#1ggbpR$Q-e`EM_
z>#XleM*@IZe`FKHv{U-FRJv)+A28}OuDc&j4t6P
z<%V4p?%!bu2pJ*kPAxxcIdwq$t~!07FtFBoMshR0pGmA2q{!Nm?g=#iuPN9YX(6hu
zLHZlNjTU{NNw5Ps-F2JcbeHDPdh`(#mL^}?0ed@wNE^a)IvHcKlJdSv9v0;*9(}%u
zbi}C8Hf#+1-xLS!Z??=j^-=&@&0@wV12`BW#U8gPOMx>#Yc&3KqA_N0T4{1E&&wo<
z^(hvsHV4`R)i8>E7nhj8Cl7ovHb=p5>)#(ce(5k<>`VSd)Jk!LerDyxH(!^jzNx2P
zyH}O&swhC`9i8Ov$?xRwIOH@=8bhu6S_A=zItju|yHa6{1i!q0u3t43Pi*?lbj7mo
zZ$(}a9`~s_3dG8qjtWE5r&CQI={1wuOi(>ea9qimfh`xfr@H8UcyewN6nJvCRM4QD)8QaSOo
zvN{t86}WglFB3d{EW}_2+0KKb;2Y;J4(q}9M70aey8+nFuFA};M9>~o2@sF-JK|)o
zc+*nHMR*3sf`?}_c);m4$Nu%Ej)HS)C_aS>T)Cc0)U?Ves78Zvi@9wV7@b%krnANC
z%Qj%3B5SmIr__1KEM)l!*c`k?yxisrV8T?!ai!N?DRYQ8mjnlg4OP$hnu-i(A!B*n
z_y0ps8+L>P3wUD5MN|(Rw%wwV)W&=5pa0T0t!yOoQ}W+S<_ewcvmgRqwcFpp>RD>yV-jrlG$j$X`L>
zKXg*l?OT4K{)IPnl~XnsQBE$~MfnZI*yb80D+)cnQLr77T=v&FYdd)@opdc9fCG~J
zNt&l2HcHMDVav6@JGi1`F&ghzP0ia=w?a{3!I6j&b}Wev&w*mM94Dr69o
zArj*GG2)MxAMYLlUsk`Rxg5tt{#8AxvQfwR{I((RlfCMh!VUJA7GOqX&{a^67#k#6
z*`u6_Km2ic7l(iiUB~jgFcbm>7yFdAhxC$buIY7;Si?suYK_a^6=srcwLX1Lph%{E
zQ4!8H3_g0jwCYxJM#F9{&-3oWHJZRZnR3Fp;gr%4>Uo@du00~(u#V}-i>2uh>NXfq
z)?Zx`Lwst$g*W~MBUetJxLJkC%wuXhLBwkh+-JY^&hpRaN<8QY>IVOU@tp1$Fg{a&
z`xg)+3Ujc2LtaYX*c>M6YWU?gRm-G0Ud!axM8&q=S)9tEnHZUWZ#YXa(ouS2IsK11
zf-subo2MN@bauOB<_ylveXgDb@6e%MYJQP8+3*pA4;sv1x>Ec=zfQeqh1Y(mNxgBZ
zf5IYfz=eP1DcHd}wsz=*beRBkwYoCc-8NlcoCvOa=)yI`&L+iib%L9!7~EwnW?Q=D
zWMo9DCJ{psuD+KhSSK`al>W}q)&9CUt8MMpQI&@2m>)Q$+YkQD;TXHpat%aI`>CEw
z1_lrKj#)XnYfK+KJ2RjxCr()WGZ$Vo4%p!YlfK15Fbf23_rirMQxrD+=(gFZYU3ey
z_AYro^_QpL*o-_c{200(#bmD(A%rff8!7~=fcN|P5yy%Ag1dutckKsQ6-L2Ni2aRa
z*L7|WTJOE~(}C)(U{&0i-nRbkU{P|zam(~B`}xO9Uj~b442=BR3gFkNT79K8Z&Pgp
zXJclniWo6&r3@W)#7p6vEzaggtqr!eRMB`NYI;|uTbe%0m2{Yz;4^`-i77RB;_j82
zx5=V18BiB4xbAeakGNdC8!^He0n6WmVDv#~-U%@gveX>prCFvSrM`%n&oio9cN)!G
z0`H@v$oRgX^MMf#hpID!!-#84;rd#heX`@nn-zLXGjV4sz?&%?@m(3t=3FO0%t1yH
ziemze*RZallT7x*ta-!H`E%C1IJtj)BYTY#rL?`TeVF8h>&oIC
zthiiRSuW#U_iy-WD2>1=9V`_D>fyG}Sts^Ek&k6Z9YvpC6|71td*L_Kp`(6obcu>P
zU?#tNHLma0g=5VH))|<*wZkEA6PwbySHDXv7jX6vRQndjg7tNHUprjwJ@d*l=vM12
z&cm%#!Q5=Q5UI<7XwG$}FAoWtm&)qn`nkIk-*WPUd|lXXIv<)H0W$|jjiex7c}@y;
zjNZL4PHL15j9B`NBkp_VV|kF`;94;L!`l5uS-)n7pjxnQr#d^cR(f|DiEr
zcCfxUw<*oE73U!8k+^4H_~lM%A2ucKXQPxK0D^zUn;PWolN)GSUdX!iEdQ!MqN3+K
zxl*Gi!jO6=0=OH5)*?nVM~1fTkfZ=h=1WGO`n)v3jml=GF^V@p2d-eX0fR*IrD0kN
zN(x?^NW`?Mzq)&LE{}}WL+&Cu9cMO}6=?JGt`m6jOhqawe+Fi;&19@uMrTGBcQCRG
z`w~ecYS#CI(tgE6xE{ekhxTY8_8LQ8?5Ll%JK*O{Np%Z=2-lenE
znR?!&2YkEgC*SW+!6fw%=YpT~konj`uV=^Tr3$LH#fcF6xw7Las^^Z(!awn$qyQ_e
za(9ZR-dvm)MQPP)B1?HepzP#hy}I=mfXC^t&apSoS%#z}a&}vgI*gf|S^qlf+GWcx
z_G$H*UOio^UdGPP+1MxfiabeQ)nRTb+qFeMhJexBK`wQ6FalI{Ts?2Axc-c>)iLS?
zYqj$FKh&IQeeIDquj}?>T1abg-c{(l;?3+LH9!SnjM~KRo!G^*X2a+D#d)up(%Y&T
z_q?$r4FPt2ULIzQ>}va_^MhZ9ARsep35MdNe8J)L0arHoP`eI{jO5w#zAGA??M~5M
zZAsux;dfYhM_P*`lntFnJPkmi7Tz!BWq2ZVE~8EshDIW-3c@*Svd3MSR3aOi>Ysh4
zS~`d{wU?@@v;cQR*&pFg3OyGgIR0stv=%9pzz=%YV`_&bcQV%a&(nfop>U;=
zis`wKdV}8b^Q1||_d3hPE9SCsM-E{!?^iQ?^grhe-W4Mq$ffGcEEB=qC}en0%J-z;
zZuqj`yX-n(s;U4trGfDu^_mlhuH@h%{{`^*+O_kcmnNKPFuep1;9x(vd$lkL3<`?P
zxgw%OLeBqTtat%C>5okG(fUSy`*8;u@Oc_RtH5)8Xjt3zyh>zAwvn8uGlT2)1qAC=TFZTdU}5dK0TMoJ{D&%7H8|w<&Df;4SVpS1FPz7
zmju+8IP9Hx(_#wmNT_N`bWZmM^Tmo#5AC=8bl6ry!nQOxuRJ)fA~;W>mUp~Eq~YjX
zChgpZ;JI>RTYYv%Ykdb@1=~P)+FRz-3ckWR>2Kyq_RZ-^v|6C+vMsredkSe$#*A4{
zEf#>TtST*d48%;$E0dmavzao$gPeB^1I1s~5Y5wWhl-;=pC(kvR(~|yFZsG{x_lXx
z7{<7pF+-VONqymXujA2WA3O{a}(n>RhYEV+RHG
zp%(Rg9saB1wY2=4%HWQ-=$gC=g%R9GX-$|5E!OAO4xRAPETd=a;_l$YKHmrtRA
zcU3h#cidC2+-+F9{^U(?#`rw!n0buX=1I9b_uDbfcA0tXpSJ%Ge`!l(Rw}Y^@ip#J
zGUYk`VkA>Z7BF1~)lequQoUavE+g*&fBN&>GnC^>x2kc_Ovee4QOuqsL>
zA1nQu%un@L^~-Foqk-kUhKVXw#H(rZC%!^B9j-#)Pa3ul?xr?8&Zoju$DzngNEAP0
zBXe%#Wt&>dD<+aOT~WC$59X*4FgJ6!T=CMtaI}qkf=AAD3~|rQY+_8-
zE#lp}(~%;F*C%d{*izFz=e;?mXWTA>1nWvc40vBRMqEEj?p}E;Ch^nhMOz8#Arb1B
zgQ>Ke+2KnQp~Wx(SZXSSNje;UPCcI)Q}n}RYbqGb;U#_X)aw)2MB41y`7WSKMty|?
zc4W&`FhW-QzjR06tsR`k^zT1YfiUvn+B&Zy&1pVcON8C_oYPWO7&f$x>7CTQT28m_
z{K#~!&$(2ebMRc2-Ha$5C8O>4&Ry*Hq^CeXXgfZ8ub%UzE&cT*`yh;zJMS(6kKLD|
zxylXL45b;>hcaos>#Kq3th$>Y6ysZb-Q0!O!gC{RnI@{X=-RaB`{iLe)Lu4=Vn*F|
z+yv}cMLfg;%mcEGyQLcKJb6qLh3#q({iaDA@m
zgAykU&CM9n{Bh!I$CYXhH!nZSvl6AHVvW;W>D!+YZ&Pfs2)6PFff{0ck6_2Uep!2vl!*7MKGsFGschU|xZIm-KlW%+|n|J652ll)45kzB#8U8l#}$qY3cJ8;@}#+zted
z^&*0i@DX^D``&xC8S`axWh7uMwny2R0ns-lbOXzrUyw
z@f3d3z8~945~pC0Lxf+xCkS$gt{kG>Ee>IJkF(=_B_GzqDB^1^fBsIXI2n4Vaix8+-0>v?=IKK8O>&1)OC&KXCchy4V|2%wF3>ensR^+chcyokUjKToFzg=48Kh
z_0M`xhJi>z?zOor*bw;gB`vtKS9KH>x>ohT)Sgsm)U~?TuK-U*$s*lJKU~NJ_Jfia
z7QV*RKq21G;qm2o*Bf^GrDpMpV27o}LbVLTj{QsDj;_b|wVPB}1na0nn83dj@qoTd
z4(#*3PpaTVr(_LWB%CAGL5K|l)Tlsy6Nw9Z|Za>
zzg38MK=TYv;JbL0iW3TQx*gO#A=f^{}%Eiey8ljdWiqO$thkz2OlqNt(t
zWQnN10Y2bUG=-g``|go4w7TGEE)}B*9v(VEeKFUyWm<9L$Enr>ZLY5$9ZJW|7NB`)~ERb``nqA+8kH2VPi|1t{@jP
zjkFk4E3H4L^&N%~WaMNsY(KQRP|@wnd^e>nT6*s)L|mdk=!|%$`ufsAQ`MYA9B|5M
zt6L}&*yO`W
z!3N^I1V_hNlt4FPN2y)N-7#ieX*Fk*o
z&r}2kD$juVX#Dk2$5Cbe)=tZM1!VVpe|j`H0tWKIS3DT*xZzS|z4QLoDmd~tyfx7j
z_9%vc2H@7?IMTt;{wJ~OQ-Ph;3VxlUYJ8}&mO~2W579=3i69f6
zv-c}QD`s)MV|n^)E@q`}dYy-T?pcl7jhnjH&i(z&o