diff --git a/.coverage b/.coverage
deleted file mode 100644
index 2133e27..0000000
Binary files a/.coverage and /dev/null differ
diff --git a/.gitignore b/.gitignore
index f61cc2d..cf4c28f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -23,4 +23,7 @@ examples/assorted_checks/test_openai/output/*
examples/assorted_checks/test_voices/output/*
examples/assorted_checks/test_formats/output/*
+examples/assorted_checks/benchmarks/output_audio_stream/*
ui/RepoScreenshot.png
+examples/assorted_checks/benchmarks/output_audio_stream_openai/*
+
diff --git a/README.md b/README.md
index 6909eee..4c7b1ae 100644
--- a/README.md
+++ b/README.md
@@ -10,12 +10,12 @@
Dockerized FastAPI wrapper for [Kokoro-82M](https://huggingface.co/hexgrad/Kokoro-82M) text-to-speech model
- OpenAI-compatible Speech endpoint, with voice combination functionality
- NVIDIA GPU accelerated inference (or CPU) option
-- very fast generation time (~35x real time generation speed via 4060Ti)
+- very fast generation time (~30x real time speed via 4060Ti)
- automatic chunking/stitching for long texts
+- streaming support w/ variable chunking to control latency
- simple audio generation web ui utility
-
## Quick Start
The service can be accessed through either the API endpoints or the Gradio web interface.
@@ -129,7 +129,7 @@ response = requests.post(
)
```
-
+
@@ -144,7 +144,7 @@ response = requests.post(
- pcm
-
+
@@ -162,6 +162,76 @@ If you only want the API, just comment out everything in the docker-compose.yml
Currently, voices created via the API are accessible here, but voice combination/creation has not yet been added
+
+Streaming Support
+
+```python
+# OpenAI-compatible streaming
+from openai import OpenAI
+client = OpenAI(
+ base_url="http://localhost:8880", api_key="not-needed")
+
+# Stream to file
+with client.audio.speech.with_streaming_response.create(
+ model="kokoro",
+ voice="af_bella",
+ input="Hello world!"
+) as response:
+ response.stream_to_file("output.mp3")
+
+# Stream to speakers (requires PyAudio)
+import pyaudio
+player = pyaudio.PyAudio().open(
+ format=pyaudio.paInt16,
+ channels=1,
+ rate=24000,
+ output=True
+)
+
+with client.audio.speech.with_streaming_response.create(
+ model="kokoro",
+ voice="af_bella",
+ response_format="pcm",
+ input="Hello world!"
+) as response:
+ for chunk in response.iter_bytes(chunk_size=1024):
+ player.write(chunk)
+```
+
+Or via requests:
+```python
+import requests
+
+response = requests.post(
+ "http://localhost:8880/v1/audio/speech",
+ json={
+ "input": "Hello world!",
+ "voice": "af_bella",
+ "response_format": "pcm"
+ },
+ stream=True
+)
+
+for chunk in response.iter_content(chunk_size=1024):
+ if chunk:
+ # Process streaming chunks
+ pass
+```
+
+
+
+
+
+
+Key Streaming Metrics:
+- First token latency @ chunksize
+ - ~300ms (GPU) @ 400
+ - ~3500ms (CPU) @ 200
+- Adjustable chunking settings for real-time playback
+
+*Note: Artifacts in intonation can increase with smaller chunks*
+
+
## Processing Details
Performance Benchmarks
@@ -175,8 +245,8 @@ Benchmarking was performed on generation via the local API using text lengths up
- H.G. Wells - The Time Machine (full text)
-
-
+
+
Key Performance Metrics:
diff --git a/api/src/core/config.py b/api/src/core/config.py
index 5348730..ad0ef1c 100644
--- a/api/src/core/config.py
+++ b/api/src/core/config.py
@@ -18,6 +18,8 @@ class Settings(BaseSettings):
onnx_model_path: str = "kokoro-v0_19.onnx"
voices_dir: str = "voices"
sample_rate: int = 24000
+ max_chunk_size: int = 300 # Maximum size of text chunks for processing
+ gap_trim_ms: int = 250 # Amount to trim from streaming chunk ends in milliseconds
# ONNX Optimization Settings
onnx_num_threads: int = 4 # Number of threads for intra-op parallelism
diff --git a/api/src/core/don_quixote.txt b/api/src/core/don_quixote.txt
new file mode 100644
index 0000000..a32a3a6
--- /dev/null
+++ b/api/src/core/don_quixote.txt
@@ -0,0 +1,9 @@
+In a village of La Mancha, the name of which I have no desire to call
+to mind, there lived not long since one of those gentlemen that keep a
+lance in the lance-rack, an old buckler, a lean hack, and a greyhound
+for coursing. An olla of rather more beef than mutton, a salad on most
+nights, scraps on Saturdays, lentils on Fridays, and a pigeon or so
+extra on Sundays, made away with three-quarters of his income. The rest
+of it went in a doublet of fine cloth and velvet breeches and shoes to
+match for holidays, while on week-days he made a brave figure in his
+best homespun.
\ No newline at end of file
diff --git a/api/src/main.py b/api/src/main.py
index c2a567e..fc51043 100644
--- a/api/src/main.py
+++ b/api/src/main.py
@@ -22,9 +22,28 @@ async def lifespan(app: FastAPI):
logger.info("Loading TTS model and voice packs...")
# Initialize the main model with warm-up
- voicepack_count = TTSModel.setup()
- logger.info(f"Model loaded and warmed up on {TTSModel.get_device()}")
- logger.info(f"{voicepack_count} voice packs loaded successfully")
+ voicepack_count = await TTSModel.setup()
+ # boundary = "█████╗"*9
+ boundary = "░" * 24
+ startup_msg =f"""
+
+{boundary}
+
+ ╔═╗┌─┐┌─┐┌┬┐
+ ╠╣ ├─┤└─┐ │
+ ╚ ┴ ┴└─┘ ┴
+ ╦╔═┌─┐┬┌─┌─┐
+ ╠╩╗│ │├┴┐│ │
+ ╩ ╩└─┘┴ ┴└─┘
+
+{boundary}
+ """
+ # TODO: Improve CPU warmup, threads, memory, etc
+ startup_msg += f"\nModel warmed up on {TTSModel.get_device()}"
+ startup_msg += f"\n{voicepack_count} voice packs loaded\n"
+ startup_msg += f"\n{boundary}\n"
+ logger.info(startup_msg)
+
yield
diff --git a/api/src/routers/openai_compatible.py b/api/src/routers/openai_compatible.py
index 6663d7b..2b30c7a 100644
--- a/api/src/routers/openai_compatible.py
+++ b/api/src/routers/openai_compatible.py
@@ -2,10 +2,12 @@ from typing import List
from loguru import logger
from fastapi import Depends, Response, APIRouter, HTTPException
-
+from fastapi import Header
+from fastapi.responses import StreamingResponse
from ..services.tts_service import TTSService
from ..services.audio import AudioService
from ..structures.schemas import OpenAISpeechRequest
+from typing import AsyncGenerator
router = APIRouter(
tags=["OpenAI Compatible TTS"],
@@ -18,9 +20,23 @@ def get_tts_service() -> TTSService:
return TTSService() # Initialize TTSService with default settings
+async def stream_audio_chunks(tts_service: TTSService, request: OpenAISpeechRequest) -> AsyncGenerator[bytes, None]:
+ """Stream audio chunks as they're generated"""
+ async for chunk in tts_service.generate_audio_stream(
+ text=request.input,
+ voice=request.voice,
+ speed=request.speed,
+ output_format=request.response_format
+ ):
+ yield chunk
+
+
+
@router.post("/audio/speech")
async def create_speech(
- request: OpenAISpeechRequest, tts_service: TTSService = Depends(get_tts_service)
+ request: OpenAISpeechRequest,
+ tts_service: TTSService = Depends(get_tts_service),
+ x_raw_response: str = Header(None, alias="x-raw-response"),
):
"""OpenAI-compatible endpoint for text-to-speech"""
try:
@@ -31,24 +47,53 @@ async def create_speech(
f"Voice '{request.voice}' not found. Available voices: {', '.join(sorted(available_voices))}"
)
- # Generate audio directly using TTSService's method
- audio, _ = tts_service._generate_audio(
- text=request.input,
- voice=request.voice,
- speed=request.speed,
- stitch_long_output=True,
- )
+ # Set content type based on format
+ content_type = {
+ "mp3": "audio/mpeg",
+ "opus": "audio/opus",
+ "aac": "audio/aac",
+ "flac": "audio/flac",
+ "wav": "audio/wav",
+ "pcm": "audio/pcm",
+ }.get(request.response_format, f"audio/{request.response_format}")
- # Convert to requested format
- content = AudioService.convert_audio(audio, 24000, request.response_format)
+ # Check if streaming is requested (default for OpenAI client)
+ if request.stream:
+ # Stream audio chunks as they're generated
+ return StreamingResponse(
+ stream_audio_chunks(tts_service, request),
+ media_type=content_type,
+ headers={
+ "Content-Disposition": f"attachment; filename=speech.{request.response_format}",
+ "X-Accel-Buffering": "no", # Disable proxy buffering
+ "Cache-Control": "no-cache", # Prevent caching
+ },
+ )
+ else:
+ # Generate complete audio
+ audio, _ = tts_service._generate_audio(
+ text=request.input,
+ voice=request.voice,
+ speed=request.speed,
+ stitch_long_output=True,
+ )
- return Response(
- content=content,
- media_type=f"audio/{request.response_format}",
- headers={
- "Content-Disposition": f"attachment; filename=speech.{request.response_format}"
- },
- )
+ # Convert to requested format
+ content = AudioService.convert_audio(
+ audio,
+ 24000,
+ request.response_format,
+ is_first_chunk=True,
+ stream=False)
+
+ return Response(
+ content=content,
+ media_type=content_type,
+ headers={
+ "Content-Disposition": f"attachment; filename=speech.{request.response_format}",
+ "Cache-Control": "no-cache", # Prevent caching
+ },
+ )
except ValueError as e:
logger.error(f"Invalid request: {str(e)}")
diff --git a/api/src/services/audio.py b/api/src/services/audio.py
index b8cc708..dcb2a72 100644
--- a/api/src/services/audio.py
+++ b/api/src/services/audio.py
@@ -4,15 +4,61 @@ from io import BytesIO
import numpy as np
import soundfile as sf
+import scipy.io.wavfile as wavfile
from loguru import logger
+from ..core.config import settings
+class AudioNormalizer:
+ """Handles audio normalization state for a single stream"""
+ def __init__(self):
+ self.int16_max = np.iinfo(np.int16).max
+ self.chunk_trim_ms = settings.gap_trim_ms
+ self.sample_rate = 24000 # Sample rate of the audio
+ self.samples_to_trim = int(self.chunk_trim_ms * self.sample_rate / 1000)
+
+ def normalize(self, audio_data: np.ndarray, is_last_chunk: bool = False) -> np.ndarray:
+ """Normalize audio data to int16 range and trim chunk boundaries"""
+ # Convert to float32 if not already
+ audio_float = audio_data.astype(np.float32)
+
+ # Normalize to [-1, 1] range first
+ if np.max(np.abs(audio_float)) > 0:
+ audio_float = audio_float / np.max(np.abs(audio_float))
+
+ # Trim end of non-final chunks to reduce gaps
+ if not is_last_chunk and len(audio_float) > self.samples_to_trim:
+ audio_float = audio_float[:-self.samples_to_trim]
+
+ # Scale to int16 range
+ return (audio_float * self.int16_max).astype(np.int16)
class AudioService:
"""Service for audio format conversions"""
-
+
+ # Default audio format settings balanced for speed and compression
+ DEFAULT_SETTINGS = {
+ "mp3": {
+ "bitrate_mode": "CONSTANT", # Faster than variable bitrate
+ "compression_level": 0.0, # Balanced compression
+ },
+ "opus": {
+ "compression_level": 0.0, # Good balance for speech
+ },
+ "flac": {
+ "compression_level": 0.0, # Light compression, still fast
+ }
+ }
+
@staticmethod
def convert_audio(
- audio_data: np.ndarray, sample_rate: int, output_format: str
+ audio_data: np.ndarray,
+ sample_rate: int,
+ output_format: str,
+ is_first_chunk: bool = True,
+ is_last_chunk: bool = False,
+ normalizer: AudioNormalizer = None,
+ format_settings: dict = None,
+ stream: bool = True
) -> bytes:
"""Convert audio data to specified format
@@ -20,6 +66,20 @@ class AudioService:
audio_data: Numpy array of audio samples
sample_rate: Sample rate of the audio
output_format: Target format (wav, mp3, opus, flac, pcm)
+ is_first_chunk: Whether this is the first chunk of a stream
+ normalizer: Optional AudioNormalizer instance for consistent normalization across chunks
+ format_settings: Optional dict of format-specific settings to override defaults
+ Example: {
+ "mp3": {
+ "bitrate_mode": "VARIABLE",
+ "compression_level": 0.8
+ }
+ }
+ Default settings balance speed and compression:
+ optimized for localhost @ 0.0
+ - MP3: constant bitrate, no compression (0.0)
+ - OPUS: no compression (0.0)
+ - FLAC: no compression (0.0)
Returns:
Bytes of the converted audio
@@ -27,34 +87,58 @@ class AudioService:
buffer = BytesIO()
try:
- if output_format == "wav":
- logger.info("Writing to WAV format...")
- # Ensure audio_data is in int16 format for WAV
- audio_data_wav = (
- audio_data / np.abs(audio_data).max() * np.iinfo(np.int16).max
- ).astype(np.int16) # Normalize
- sf.write(buffer, audio_data_wav, sample_rate, format="WAV")
- elif output_format == "mp3":
- logger.info("Converting to MP3 format...")
- # soundfile can write MP3 if ffmpeg or libsox is installed
- sf.write(buffer, audio_data, sample_rate, format="MP3")
- elif output_format == "opus":
- logger.info("Converting to Opus format...")
- sf.write(buffer, audio_data, sample_rate, format="OGG", subtype="OPUS")
- elif output_format == "flac":
- logger.info("Converting to FLAC format...")
- sf.write(buffer, audio_data, sample_rate, format="FLAC")
- elif output_format == "pcm":
- logger.info("Extracting PCM data...")
- # Ensure audio_data is in int16 format for PCM
- audio_data_pcm = (
- audio_data / np.abs(audio_data).max() * np.iinfo(np.int16).max
- ).astype(np.int16) # Normalize
- buffer.write(audio_data_pcm.tobytes())
+ # Always normalize audio to ensure proper amplitude scaling
+ if stream:
+ if normalizer is None:
+ normalizer = AudioNormalizer()
+ normalized_audio = normalizer.normalize(audio_data, is_last_chunk=is_last_chunk)
else:
- raise ValueError(
- f"Format {output_format} not supported. Supported formats are: wav, mp3, opus, flac, pcm."
- )
+ normalized_audio = audio_data
+
+ if output_format == "pcm":
+ # Raw 16-bit PCM samples, no header
+ buffer.write(normalized_audio.tobytes())
+ elif output_format == "wav":
+ if stream:
+ # Use soundfile for streaming to ensure proper headers
+ sf.write(buffer, normalized_audio, sample_rate, format="WAV", subtype='PCM_16')
+ else:
+ # Trying scipy.io.wavfile for non-streaming WAV generation
+ # seems faster than soundfile
+ # avoids overhead from header generation and PCM encoding
+ wavfile.write(buffer, sample_rate, normalized_audio)
+ elif output_format == "mp3":
+ # Use format settings or defaults
+ settings = format_settings.get("mp3", {}) if format_settings else {}
+ settings = {**AudioService.DEFAULT_SETTINGS["mp3"], **settings}
+ sf.write(
+ buffer, normalized_audio,
+ sample_rate, format="MP3",
+ **settings
+ )
+
+ elif output_format == "opus":
+ settings = format_settings.get("opus", {}) if format_settings else {}
+ settings = {**AudioService.DEFAULT_SETTINGS["opus"], **settings}
+ sf.write(buffer, normalized_audio, sample_rate, format="OGG",
+ subtype="OPUS", **settings)
+
+ elif output_format == "flac":
+ if is_first_chunk:
+ logger.info("Starting FLAC stream...")
+ settings = format_settings.get("flac", {}) if format_settings else {}
+ settings = {**AudioService.DEFAULT_SETTINGS["flac"], **settings}
+ sf.write(buffer, normalized_audio, sample_rate, format="FLAC",
+ subtype='PCM_16', **settings)
+ else:
+ if output_format == "aac":
+ raise ValueError(
+ "Format aac not supported. Supported formats are: wav, mp3, opus, flac, pcm."
+ )
+ else:
+ raise ValueError(
+ f"Format {output_format} not supported. Supported formats are: wav, mp3, opus, flac, pcm."
+ )
buffer.seek(0)
return buffer.getvalue()
diff --git a/api/src/services/text_processing/chunker.py b/api/src/services/text_processing/chunker.py
new file mode 100644
index 0000000..c0c59eb
--- /dev/null
+++ b/api/src/services/text_processing/chunker.py
@@ -0,0 +1,52 @@
+"""Text chunking service"""
+
+import re
+from ...core.config import settings
+
+
+def split_text(text: str, max_chunk=None):
+ """Split text into chunks on natural pause points
+
+ Args:
+ text: Text to split into chunks
+ max_chunk: Maximum chunk size (defaults to settings.max_chunk_size)
+ """
+ if max_chunk is None:
+ max_chunk = settings.max_chunk_size
+
+ if not isinstance(text, str):
+ text = str(text) if text is not None else ""
+
+ text = text.strip()
+ if not text:
+ return
+
+ # First split into sentences
+ sentences = re.split(r"(?<=[.!?])\s+", text)
+
+ for sentence in sentences:
+ sentence = sentence.strip()
+ if not sentence:
+ continue
+
+ # For medium-length sentences, split on punctuation
+ if len(sentence) > max_chunk: # Lower threshold for more consistent sizes
+ # First try splitting on semicolons and colons
+ parts = re.split(r"(?<=[;:])\s+", sentence)
+
+ for part in parts:
+ part = part.strip()
+ if not part:
+ continue
+
+ # If part is still long, split on commas
+ if len(part) > max_chunk:
+ subparts = re.split(r"(?<=,)\s+", part)
+ for subpart in subparts:
+ subpart = subpart.strip()
+ if subpart:
+ yield subpart
+ else:
+ yield part
+ else:
+ yield sentence
diff --git a/api/src/services/text_processing/normalizer.py b/api/src/services/text_processing/normalizer.py
index db5b7db..7b0dd17 100644
--- a/api/src/services/text_processing/normalizer.py
+++ b/api/src/services/text_processing/normalizer.py
@@ -1,4 +1,5 @@
import re
+from functools import lru_cache
def split_num(num: re.Match) -> str:
"""Handle number splitting for various formats"""
@@ -48,6 +49,7 @@ def handle_decimal(num: re.Match) -> str:
a, b = num.group().split(".")
return " point ".join([a, " ".join(b)])
+# @lru_cache(maxsize=1000) # Cache normalized text results
def normalize_text(text: str) -> str:
"""Normalize text for TTS processing
diff --git a/api/src/services/tts_base.py b/api/src/services/tts_base.py
index f502373..16e8462 100644
--- a/api/src/services/tts_base.py
+++ b/api/src/services/tts_base.py
@@ -15,7 +15,7 @@ class TTSBaseModel(ABC):
VOICES_DIR = os.path.join(os.path.dirname(os.path.dirname(__file__)), "voices")
@classmethod
- def setup(cls):
+ async def setup(cls):
"""Initialize model and setup voices"""
with cls._lock:
# Set device
@@ -59,19 +59,23 @@ class TTSBaseModel(ABC):
except Exception as e:
logger.error(f"Error copying voice {voice_name}: {str(e)}")
- # Warm up with default voice
+ # Load warmup text
try:
- dummy_text = "Hello"
- voice_path = os.path.join(cls.VOICES_DIR, "af.pt")
- dummy_voicepack = torch.load(voice_path, map_location=cls._device, weights_only=True)
-
- # Process text and generate audio
- phonemes, tokens = cls.process_text(dummy_text, "a")
- cls.generate_from_tokens(tokens, dummy_voicepack, 1.0)
-
- logger.info("Model warm-up complete")
+ with open(os.path.join(os.path.dirname(os.path.dirname(__file__)), "core", "don_quixote.txt")) as f:
+ warmup_text = f.read()
except Exception as e:
- logger.warning(f"Model warm-up failed: {e}")
+ logger.warning(f"Failed to load warmup text: {e}")
+ warmup_text = "This is a warmup text that will be split into chunks for processing."
+
+ # Use warmup service
+ from .warmup import WarmupService
+ warmup = WarmupService()
+
+ # Load and warm up voices
+ loaded_voices = warmup.load_voices()
+ await warmup.warmup_voices(warmup_text, loaded_voices)
+
+ logger.info("Model warm-up complete")
# Count voices in directory
voice_count = len([f for f in os.listdir(cls.VOICES_DIR) if f.endswith(".pt")])
diff --git a/api/src/services/tts_gpu.py b/api/src/services/tts_gpu.py
index 300d141..51c8424 100644
--- a/api/src/services/tts_gpu.py
+++ b/api/src/services/tts_gpu.py
@@ -1,6 +1,7 @@
import os
import numpy as np
import torch
+import time
from loguru import logger
from models import build_model
from .text_processing import phonemize, tokenize
@@ -8,42 +9,97 @@ from .text_processing import phonemize, tokenize
from .tts_base import TTSBaseModel
from ..core.config import settings
+# @torch.no_grad()
+# def forward(model, tokens, ref_s, speed):
+# """Forward pass through the model"""
+# device = ref_s.device
+# tokens = torch.LongTensor([[0, *tokens, 0]]).to(device)
+# input_lengths = torch.LongTensor([tokens.shape[-1]]).to(device)
+# text_mask = length_to_mask(input_lengths).to(device)
+# bert_dur = model.bert(tokens, attention_mask=(~text_mask).int())
+# d_en = model.bert_encoder(bert_dur).transpose(-1, -2)
+# s = ref_s[:, 128:]
+# d = model.predictor.text_encoder(d_en, s, input_lengths, text_mask)
+# x, _ = model.predictor.lstm(d)
+# duration = model.predictor.duration_proj(x)
+# duration = torch.sigmoid(duration).sum(axis=-1) / speed
+# pred_dur = torch.round(duration).clamp(min=1).long()
+# pred_aln_trg = torch.zeros(input_lengths, pred_dur.sum().item())
+# c_frame = 0
+# for i in range(pred_aln_trg.size(0)):
+# pred_aln_trg[i, c_frame : c_frame + pred_dur[0, i].item()] = 1
+# c_frame += pred_dur[0, i].item()
+# en = d.transpose(-1, -2) @ pred_aln_trg.unsqueeze(0).to(device)
+# F0_pred, N_pred = model.predictor.F0Ntrain(en, s)
+# t_en = model.text_encoder(tokens, input_lengths, text_mask)
+# asr = t_en @ pred_aln_trg.unsqueeze(0).to(device)
+# return model.decoder(asr, F0_pred, N_pred, ref_s[:, :128]).squeeze().cpu().numpy()
@torch.no_grad()
def forward(model, tokens, ref_s, speed):
- """Forward pass through the model"""
+ """Forward pass through the model with light optimizations that preserve output quality"""
device = ref_s.device
+
+ # Keep original token handling but optimize device placement
tokens = torch.LongTensor([[0, *tokens, 0]]).to(device)
input_lengths = torch.LongTensor([tokens.shape[-1]]).to(device)
text_mask = length_to_mask(input_lengths).to(device)
+
+ # BERT and encoder pass
bert_dur = model.bert(tokens, attention_mask=(~text_mask).int())
d_en = model.bert_encoder(bert_dur).transpose(-1, -2)
- s = ref_s[:, 128:]
- d = model.predictor.text_encoder(d_en, s, input_lengths, text_mask)
+
+ # Split reference signal once for efficiency
+ s_content = ref_s[:, 128:]
+ s_ref = ref_s[:, :128]
+
+ # Predictor forward pass
+ d = model.predictor.text_encoder(d_en, s_content, input_lengths, text_mask)
x, _ = model.predictor.lstm(d)
+
+ # Duration prediction - keeping original logic
duration = model.predictor.duration_proj(x)
duration = torch.sigmoid(duration).sum(axis=-1) / speed
pred_dur = torch.round(duration).clamp(min=1).long()
- pred_aln_trg = torch.zeros(input_lengths, pred_dur.sum().item())
+
+ # Alignment matrix construction - keeping original approach for quality
+ pred_aln_trg = torch.zeros(input_lengths, pred_dur.sum().item(), device=device)
c_frame = 0
for i in range(pred_aln_trg.size(0)):
- pred_aln_trg[i, c_frame : c_frame + pred_dur[0, i].item()] = 1
+ pred_aln_trg[i, c_frame:c_frame + pred_dur[0, i].item()] = 1
c_frame += pred_dur[0, i].item()
- en = d.transpose(-1, -2) @ pred_aln_trg.unsqueeze(0).to(device)
- F0_pred, N_pred = model.predictor.F0Ntrain(en, s)
+
+ # Matrix multiplications - reuse unsqueezed tensor
+ pred_aln_trg = pred_aln_trg.unsqueeze(0) # Do unsqueeze once
+ en = d.transpose(-1, -2) @ pred_aln_trg
+ F0_pred, N_pred = model.predictor.F0Ntrain(en, s_content)
+
+ # Text encoding and final decoding
t_en = model.text_encoder(tokens, input_lengths, text_mask)
- asr = t_en @ pred_aln_trg.unsqueeze(0).to(device)
- return model.decoder(asr, F0_pred, N_pred, ref_s[:, :128]).squeeze().cpu().numpy()
+ asr = t_en @ pred_aln_trg
+
+ return model.decoder(asr, F0_pred, N_pred, s_ref).squeeze().cpu().numpy()
+
+# def length_to_mask(lengths):
+# """Create attention mask from lengths"""
+# mask = (
+# torch.arange(lengths.max())
+# .unsqueeze(0)
+# .expand(lengths.shape[0], -1)
+# .type_as(lengths)
+# )
+# mask = torch.gt(mask + 1, lengths.unsqueeze(1))
+# return mask
def length_to_mask(lengths):
- """Create attention mask from lengths"""
- mask = (
- torch.arange(lengths.max())
- .unsqueeze(0)
- .expand(lengths.shape[0], -1)
- .type_as(lengths)
- )
- mask = torch.gt(mask + 1, lengths.unsqueeze(1))
- return mask
+ """Create attention mask from lengths - possibly optimized version"""
+ max_len = lengths.max()
+ # Create mask directly on the same device as lengths
+ mask = torch.arange(max_len, device=lengths.device)[None, :].expand(lengths.shape[0], -1)
+ # Avoid type_as by using the correct dtype from the start
+ if lengths.dtype != mask.dtype:
+ mask = mask.to(dtype=lengths.dtype)
+ # Fuse operations using broadcasting
+ return mask + 1 > lengths[:, None]
class TTSGPUModel(TTSBaseModel):
_instance = None
diff --git a/api/src/services/tts_service.py b/api/src/services/tts_service.py
index 6d763fe..8e2aed6 100644
--- a/api/src/services/tts_service.py
+++ b/api/src/services/tts_service.py
@@ -3,26 +3,29 @@ import os
import re
import time
from typing import List, Tuple, Optional
+from functools import lru_cache
import numpy as np
import torch
import scipy.io.wavfile as wavfile
-from .text_processing import normalize_text
+from .text_processing import normalize_text, chunker
from loguru import logger
from ..core.config import settings
from .tts_model import TTSModel
+from .audio import AudioService, AudioNormalizer
class TTSService:
def __init__(self, output_dir: str = None):
self.output_dir = output_dir
- def _split_text(self, text: str) -> List[str]:
- """Split text into sentences"""
- if not isinstance(text, str):
- text = str(text) if text is not None else ""
- return [s.strip() for s in re.split(r"(?<=[.!?])\s+", text) if s.strip()]
+
+ @staticmethod
+ @lru_cache(maxsize=20) # Cache up to 8 most recently used voices
+ def _load_voice(voice_path: str) -> torch.Tensor:
+ """Load and cache a voice model"""
+ return torch.load(voice_path, map_location=TTSModel.get_device(), weights_only=True)
def _get_voice_path(self, voice_name: str) -> Optional[str]:
"""Get the path to a voice file"""
@@ -31,6 +34,13 @@ class TTSService:
def _generate_audio(
self, text: str, voice: str, speed: float, stitch_long_output: bool = True
+ ) -> Tuple[torch.Tensor, float]:
+ """Generate complete audio and return with processing time"""
+ audio, processing_time = self._generate_audio_internal(text, voice, speed, stitch_long_output)
+ return audio, processing_time
+
+ def _generate_audio_internal(
+ self, text: str, voice: str, speed: float, stitch_long_output: bool = True
) -> Tuple[torch.Tensor, float]:
"""Generate audio and measure processing time"""
start_time = time.time()
@@ -49,42 +59,42 @@ class TTSService:
if not voice_path:
raise ValueError(f"Voice not found: {voice}")
- # Load voice
- voicepack = torch.load(
- voice_path, map_location=TTSModel.get_device(), weights_only=True
- )
+ # Load voice using cached loader
+ voicepack = self._load_voice(voice_path)
- # Generate audio with or without stitching
+ # For non-streaming, preprocess all chunks first
if stitch_long_output:
- chunks = self._split_text(text)
- audio_chunks = []
-
- # Process all chunks
- for i, chunk in enumerate(chunks):
+ # Preprocess all chunks to phonemes/tokens
+ chunks_data = []
+ for chunk in chunker.split_text(text):
try:
- # Process text and generate audio
phonemes, tokens = TTSModel.process_text(chunk, voice[0])
+ chunks_data.append((chunk, tokens))
+ except Exception as e:
+ logger.error(f"Failed to process chunk: '{chunk}'. Error: {str(e)}")
+ continue
+
+ if not chunks_data:
+ raise ValueError("No chunks were processed successfully")
+
+ # Generate audio for all chunks
+ audio_chunks = []
+ for chunk, tokens in chunks_data:
+ try:
chunk_audio = TTSModel.generate_from_tokens(tokens, voicepack, speed)
-
if chunk_audio is not None:
audio_chunks.append(chunk_audio)
else:
- logger.error(f"No audio generated for chunk {i + 1}/{len(chunks)}")
-
+ logger.error(f"No audio generated for chunk: '{chunk}'")
except Exception as e:
- logger.error(
- f"Failed to generate audio for chunk {i + 1}/{len(chunks)}: '{chunk}'. Error: {str(e)}"
- )
+ logger.error(f"Failed to generate audio for chunk: '{chunk}'. Error: {str(e)}")
continue
if not audio_chunks:
raise ValueError("No audio chunks were generated successfully")
- audio = (
- np.concatenate(audio_chunks)
- if len(audio_chunks) > 1
- else audio_chunks[0]
- )
+ # Concatenate all chunks
+ audio = np.concatenate(audio_chunks) if len(audio_chunks) > 1 else audio_chunks[0]
else:
# Process single chunk
phonemes, tokens = TTSModel.process_text(text, voice[0])
@@ -97,6 +107,99 @@ class TTSService:
logger.error(f"Error in audio generation: {str(e)}")
raise
+ async def generate_audio_stream(
+ self, text: str, voice: str, speed: float, output_format: str = "wav", silent=False
+ ):
+ """Generate and yield audio chunks as they're generated for real-time streaming"""
+ try:
+ stream_start = time.time()
+ # Create normalizer for consistent audio levels
+ stream_normalizer = AudioNormalizer()
+
+ # Input validation and preprocessing
+ if not text:
+ raise ValueError("Text is empty")
+ preprocess_start = time.time()
+ normalized = normalize_text(text)
+ if not normalized:
+ raise ValueError("Text is empty after preprocessing")
+ text = str(normalized)
+ logger.debug(f"Text preprocessing took: {(time.time() - preprocess_start)*1000:.1f}ms")
+
+ # Voice validation and loading
+ voice_start = time.time()
+ voice_path = self._get_voice_path(voice)
+ if not voice_path:
+ raise ValueError(f"Voice not found: {voice}")
+ voicepack = self._load_voice(voice_path)
+ logger.debug(f"Voice loading took: {(time.time() - voice_start)*1000:.1f}ms")
+
+ # Process chunks as they're generated
+ is_first = True
+ chunks_processed = 0
+ # last_chunk_end = time.time()
+
+ # Process chunks as they come from generator
+ chunk_gen = chunker.split_text(text)
+ current_chunk = next(chunk_gen, None)
+
+ while current_chunk is not None:
+ next_chunk = next(chunk_gen, None) # Peek at next chunk
+ # chunk_start = time.time()
+ chunks_processed += 1
+ try:
+ # Process text and generate audio
+ # text_process_start = time.time()
+ phonemes, tokens = TTSModel.process_text(current_chunk, voice[0])
+ # text_process_time = time.time() - text_process_start
+
+ # audio_gen_start = time.time()
+ chunk_audio = TTSModel.generate_from_tokens(tokens, voicepack, speed)
+ # audio_gen_time = time.time() - audio_gen_start
+
+ if chunk_audio is not None:
+ # Convert chunk with proper header handling
+ convert_start = time.time()
+ chunk_bytes = AudioService.convert_audio(
+ chunk_audio,
+ 24000,
+ output_format,
+ is_first_chunk=is_first,
+ normalizer=stream_normalizer,
+ is_last_chunk=(next_chunk is None) # Last if no next chunk
+ )
+ # convert_time = time.time() - convert_start
+
+ # Calculate gap from last chunk
+ # gap_time = chunk_start - last_chunk_end
+
+ # Log timing details if not silent
+ # if not silent:
+ # logger.debug(
+ # f"\nChunk {chunks_processed} timing:"
+ # f"\n Gap from last chunk: {gap_time*1000:.1f}ms"
+ # f"\n Text processing: {text_process_time*1000:.1f}ms"
+ # f"\n Audio generation: {audio_gen_time*1000:.1f}ms"
+ # f"\n Audio conversion: {convert_time*1000:.1f}ms"
+ # f"\n Total chunk time: {(time.time() - chunk_start)*1000:.1f}ms"
+ # )
+
+ yield chunk_bytes
+ is_first = False
+ # last_chunk_end = time.time()
+ else:
+ logger.error(f"No audio generated for chunk: '{current_chunk}'")
+
+ except Exception as e:
+ logger.error(f"Failed to generate audio for chunk: '{current_chunk}'. Error: {str(e)}")
+
+ current_chunk = next_chunk # Move to next chunk
+
+ except Exception as e:
+ logger.error(f"Error in audio generation stream: {str(e)}")
+ raise
+
+
def _save_audio(self, audio: torch.Tensor, filepath: str):
"""Save audio to file"""
os.makedirs(os.path.dirname(filepath), exist_ok=True)
diff --git a/api/src/services/warmup.py b/api/src/services/warmup.py
new file mode 100644
index 0000000..67937dd
--- /dev/null
+++ b/api/src/services/warmup.py
@@ -0,0 +1,52 @@
+import os
+from typing import List, Tuple
+import torch
+from loguru import logger
+
+from .tts_service import TTSService
+from .tts_model import TTSModel
+
+
+class WarmupService:
+ """Service for warming up TTS models and voice caches"""
+
+ def __init__(self):
+ self.tts_service = TTSService()
+
+ def load_voices(self) -> List[Tuple[str, torch.Tensor]]:
+ """Load and cache voices up to LRU limit"""
+ # Get all voices sorted by filename length (shorter names first, usually base voices)
+ voice_files = sorted(
+ [f for f in os.listdir(TTSModel.VOICES_DIR) if f.endswith(".pt")],
+ key=len
+ )
+
+ # Load up to LRU cache limit (20)
+ loaded_voices = []
+ for voice_file in voice_files[:20]:
+ try:
+ voice_path = os.path.join(TTSModel.VOICES_DIR, voice_file)
+ voicepack = torch.load(voice_path, map_location=TTSModel.get_device(), weights_only=True)
+ loaded_voices.append((voice_file[:-3], voicepack)) # Store name and tensor
+ # logger.info(f"Loaded voice {voice_file[:-3]} into cache")
+ except Exception as e:
+ logger.error(f"Failed to load voice {voice_file}: {e}")
+ logger.info(f"Pre-loaded {len(loaded_voices)} voices into cache")
+ return loaded_voices
+
+ async def warmup_voices(self, warmup_text: str, loaded_voices: List[Tuple[str, torch.Tensor]]):
+ """Warm up voice inference and streaming"""
+ n_warmups = 1
+ for voice_name, _ in loaded_voices[:n_warmups]:
+ try:
+ logger.info(f"Running warmup inference on voice {voice_name}")
+ async for _ in self.tts_service.generate_audio_stream(
+ warmup_text,
+ voice_name,
+ 1.0,
+ "pcm"
+ ):
+ pass # Process all chunks to properly warm up
+ logger.info(f"Completed warmup for voice {voice_name}")
+ except Exception as e:
+ logger.warning(f"Warmup failed for voice {voice_name}: {e}")
diff --git a/api/src/structures/schemas.py b/api/src/structures/schemas.py
index bc778bb..66db7ad 100644
--- a/api/src/structures/schemas.py
+++ b/api/src/structures/schemas.py
@@ -22,7 +22,7 @@ class OpenAISpeechRequest(BaseModel):
)
response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] = Field(
default="mp3",
- description="The format to return audio in. Supported formats: mp3, opus, flac, wav. AAC and PCM are not currently supported.",
+ description="The format to return audio in. Supported formats: mp3, opus, flac, wav, pcm. PCM format returns raw 16-bit samples without headers. AAC is not currently supported.",
)
speed: float = Field(
default=1.0,
@@ -30,3 +30,7 @@ class OpenAISpeechRequest(BaseModel):
le=4.0,
description="The speed of the generated audio. Select a value from 0.25 to 4.0.",
)
+ stream: bool = Field(
+ default=True, # Default to streaming for OpenAI compatibility
+ description="If true (default), audio will be streamed as it's generated. Each chunk will be a complete sentence.",
+ )
diff --git a/api/tests/test_chunker.py b/api/tests/test_chunker.py
new file mode 100644
index 0000000..ed598c0
--- /dev/null
+++ b/api/tests/test_chunker.py
@@ -0,0 +1,35 @@
+"""Tests for text chunking service"""
+
+import pytest
+from api.src.services.text_processing import chunker
+
+
+def test_split_text():
+ """Test text splitting into sentences"""
+ text = "First sentence. Second sentence! Third sentence?"
+ sentences = list(chunker.split_text(text))
+ assert len(sentences) == 3
+ assert sentences[0] == "First sentence."
+ assert sentences[1] == "Second sentence!"
+ assert sentences[2] == "Third sentence?"
+
+
+def test_split_text_empty():
+ """Test splitting empty text"""
+ assert list(chunker.split_text("")) == []
+
+
+def test_split_text_single_sentence():
+ """Test splitting single sentence"""
+ text = "Just one sentence."
+ assert list(chunker.split_text(text)) == ["Just one sentence."]
+
+
+def test_split_text_with_custom_chunk_size():
+ """Test splitting with custom max chunk size"""
+ text = "First part, second part, third part."
+ chunks = list(chunker.split_text(text, max_chunk=15))
+ assert len(chunks) == 3
+ assert chunks[0] == "First part,"
+ assert chunks[1] == "second part,"
+ assert chunks[2] == "third part."
diff --git a/api/tests/test_endpoints.py b/api/tests/test_endpoints.py
index 80fe733..4827adc 100644
--- a/api/tests/test_endpoints.py
+++ b/api/tests/test_endpoints.py
@@ -1,19 +1,34 @@
-from unittest.mock import Mock
+from unittest.mock import Mock, AsyncMock
import pytest
+import pytest_asyncio
+import asyncio
from fastapi.testclient import TestClient
+from httpx import AsyncClient
from ..src.main import app
# Create test client
client = TestClient(app)
+# Create async client fixture
+@pytest_asyncio.fixture
+async def async_client():
+ async with AsyncClient(app=app, base_url="http://test") as ac:
+ yield ac
+
# Mock services
@pytest.fixture
def mock_tts_service(monkeypatch):
mock_service = Mock()
mock_service._generate_audio.return_value = (bytes([0, 1, 2, 3]), 1.0)
+
+ # Create proper async generator mock
+ async def mock_stream(*args, **kwargs):
+ for chunk in [b"chunk1", b"chunk2"]:
+ yield chunk
+ mock_service.generate_audio_stream = mock_stream
mock_service.list_voices.return_value = [
"af",
"bm_lewis",
@@ -34,12 +49,12 @@ def mock_tts_service(monkeypatch):
@pytest.fixture
def mock_audio_service(monkeypatch):
- def mock_convert(*args):
- return b"converted mock audio data"
-
+ mock_service = Mock()
+ mock_service.convert_audio.return_value = b"converted mock audio data"
monkeypatch.setattr(
- "api.src.routers.openai_compatible.AudioService.convert_audio", mock_convert
+ "api.src.routers.openai_compatible.AudioService", mock_service
)
+ return mock_service
def test_health_check():
@@ -57,6 +72,7 @@ def test_openai_speech_endpoint(mock_tts_service, mock_audio_service):
"voice": "bm_lewis",
"response_format": "wav",
"speed": 1.0,
+ "stream": False # Explicitly disable streaming
}
response = client.post("/v1/audio/speech", json=test_request)
assert response.status_code == 200
@@ -76,6 +92,7 @@ def test_openai_speech_invalid_voice(mock_tts_service):
"voice": "invalid_voice",
"response_format": "wav",
"speed": 1.0,
+ "stream": False # Explicitly disable streaming
}
response = client.post("/v1/audio/speech", json=test_request)
assert response.status_code == 400 # Bad request
@@ -90,6 +107,7 @@ def test_openai_speech_invalid_speed(mock_tts_service):
"voice": "af",
"response_format": "wav",
"speed": -1.0, # Invalid speed
+ "stream": False # Explicitly disable streaming
}
response = client.post("/v1/audio/speech", json=test_request)
assert response.status_code == 422 # Validation error
@@ -104,6 +122,7 @@ def test_openai_speech_generation_error(mock_tts_service):
"voice": "af",
"response_format": "wav",
"speed": 1.0,
+ "stream": False # Explicitly disable streaming
}
response = client.post("/v1/audio/speech", json=test_request)
assert response.status_code == 500
@@ -153,3 +172,89 @@ def test_combine_voices_error(mock_tts_service):
assert response.status_code == 500
assert "Combination failed" in response.json()["detail"]["message"]
+
+
+@pytest.mark.asyncio
+async def test_openai_speech_pcm_streaming(mock_tts_service, async_client):
+ """Test streaming PCM audio for real-time playback"""
+ test_request = {
+ "model": "kokoro",
+ "input": "Hello world",
+ "voice": "af",
+ "response_format": "pcm",
+ "stream": True
+ }
+
+ # Create streaming mock for this test
+ async def mock_stream(*args, **kwargs):
+ for chunk in [b"chunk1", b"chunk2"]:
+ yield chunk
+ mock_tts_service.generate_audio_stream = mock_stream
+
+ # Add streaming header
+ headers = {"x-raw-response": "stream"}
+ response = await async_client.post("/v1/audio/speech", json=test_request, headers=headers)
+
+ assert response.status_code == 200
+ assert response.headers["content-type"] == "audio/pcm"
+ # Just verify status and content type
+ assert response.status_code == 200
+ assert response.headers["content-type"] == "audio/pcm"
+
+
+@pytest.mark.asyncio
+async def test_openai_speech_streaming_mp3(mock_tts_service, async_client):
+ """Test streaming MP3 audio to file"""
+ test_request = {
+ "model": "kokoro",
+ "input": "Hello world",
+ "voice": "af",
+ "response_format": "mp3",
+ "stream": True
+ }
+
+ # Create streaming mock for this test
+ async def mock_stream(*args, **kwargs):
+ for chunk in [b"mp3header", b"mp3data"]:
+ yield chunk
+ mock_tts_service.generate_audio_stream = mock_stream
+
+ # Add streaming header
+ headers = {"x-raw-response": "stream"}
+ response = await async_client.post("/v1/audio/speech", json=test_request, headers=headers)
+
+ assert response.status_code == 200
+ assert response.headers["content-type"] == "audio/mpeg"
+ assert response.headers["content-disposition"] == "attachment; filename=speech.mp3"
+ # Just verify status and content type
+ assert response.status_code == 200
+ assert response.headers["content-type"] == "audio/mpeg"
+ assert response.headers["content-disposition"] == "attachment; filename=speech.mp3"
+
+
+@pytest.mark.asyncio
+async def test_openai_speech_streaming_generator(mock_tts_service, async_client):
+ """Test streaming with async generator"""
+ test_request = {
+ "model": "kokoro",
+ "input": "Hello world",
+ "voice": "af",
+ "response_format": "pcm",
+ "stream": True
+ }
+
+ # Create streaming mock for this test
+ async def mock_stream(*args, **kwargs):
+ for chunk in [b"chunk1", b"chunk2"]:
+ yield chunk
+ mock_tts_service.generate_audio_stream = mock_stream
+
+ # Add streaming header
+ headers = {"x-raw-response": "stream"}
+ response = await async_client.post("/v1/audio/speech", json=test_request, headers=headers)
+
+ assert response.status_code == 200
+ assert response.headers["content-type"] == "audio/pcm"
+ # Just verify status and content type
+ assert response.status_code == 200
+ assert response.headers["content-type"] == "audio/pcm"
diff --git a/api/tests/test_main.py b/api/tests/test_main.py
index c6a972e..cb7aa8b 100644
--- a/api/tests/test_main.py
+++ b/api/tests/test_main.py
@@ -1,6 +1,6 @@
"""Tests for FastAPI application"""
-from unittest.mock import MagicMock, patch
+from unittest.mock import MagicMock, patch, call
import pytest
from fastapi.testclient import TestClient
@@ -28,25 +28,34 @@ async def test_lifespan_successful_warmup(mock_logger, mock_tts_model):
"""Test successful model warmup in lifespan"""
# Mock file system for voice counting
mock_tts_model.VOICES_DIR = "/mock/voices"
+
+ # Create async mock
+ async def async_setup():
+ return 3
+ mock_tts_model.setup = MagicMock()
+ mock_tts_model.setup.side_effect = async_setup
+ mock_tts_model.get_device.return_value = "cuda"
+
with patch("os.listdir", return_value=["voice1.pt", "voice2.pt", "voice3.pt"]):
- mock_tts_model.setup.return_value = 3 # 3 voice files
- mock_tts_model.get_device.return_value = "cuda"
+ # Create an async generator from the lifespan context manager
+ async_gen = lifespan(MagicMock())
+ # Start the context manager
+ await async_gen.__aenter__()
- # Create an async generator from the lifespan context manager
- async_gen = lifespan(MagicMock())
- # Start the context manager
- await async_gen.__aenter__()
+ # Verify the expected logging sequence
+ mock_logger.info.assert_any_call("Loading TTS model and voice packs...")
+
+ # Check for the startup message containing the required info
+ startup_calls = [call[0][0] for call in mock_logger.info.call_args_list]
+ startup_msg = next(msg for msg in startup_calls if "Model warmed up on" in msg)
+ assert "Model warmed up on" in startup_msg
+ assert "3 voice packs loaded" in startup_msg
- # Verify the expected logging sequence
- mock_logger.info.assert_any_call("Loading TTS model and voice packs...")
- mock_logger.info.assert_any_call("Model loaded and warmed up on cuda")
- mock_logger.info.assert_any_call("3 voice packs loaded successfully")
+ # Verify model setup was called
+ mock_tts_model.setup.assert_called_once()
- # Verify model setup was called
- mock_tts_model.setup.assert_called_once()
-
- # Clean up
- await async_gen.__aexit__(None, None, None)
+ # Clean up
+ await async_gen.__aexit__(None, None, None)
@pytest.mark.asyncio
@@ -77,39 +86,21 @@ async def test_lifespan_cuda_warmup(mock_tts_model):
"""Test model warmup specifically on CUDA"""
# Mock file system for voice counting
mock_tts_model.VOICES_DIR = "/mock/voices"
+
+ # Create async mock
+ async def async_setup():
+ return 2
+ mock_tts_model.setup = MagicMock()
+ mock_tts_model.setup.side_effect = async_setup
+ mock_tts_model.get_device.return_value = "cuda"
+
with patch("os.listdir", return_value=["voice1.pt", "voice2.pt"]):
- mock_tts_model.setup.return_value = 2 # 2 voice files
- mock_tts_model.get_device.return_value = "cuda"
+ # Create an async generator from the lifespan context manager
+ async_gen = lifespan(MagicMock())
+ await async_gen.__aenter__()
- # Create an async generator from the lifespan context manager
- async_gen = lifespan(MagicMock())
- await async_gen.__aenter__()
+ # Verify model setup was called
+ mock_tts_model.setup.assert_called_once()
- # Verify model setup was called
- mock_tts_model.setup.assert_called_once()
-
- # Clean up
- await async_gen.__aexit__(None, None, None)
-
-
-@pytest.mark.asyncio
-@patch("api.src.main.TTSModel")
-async def test_lifespan_cpu_fallback(mock_tts_model):
- """Test model warmup falling back to CPU"""
- # Mock file system for voice counting
- mock_tts_model.VOICES_DIR = "/mock/voices"
- with patch(
- "os.listdir", return_value=["voice1.pt", "voice2.pt", "voice3.pt", "voice4.pt"]
- ):
- mock_tts_model.setup.return_value = 4 # 4 voice files
- mock_tts_model.get_device.return_value = "cpu"
-
- # Create an async generator from the lifespan context manager
- async_gen = lifespan(MagicMock())
- await async_gen.__aenter__()
-
- # Verify model setup was called
- mock_tts_model.setup.assert_called_once()
-
- # Clean up
- await async_gen.__aexit__(None, None, None)
+ # Clean up
+ await async_gen.__aexit__(None, None, None)
diff --git a/api/tests/test_tts_implementations.py b/api/tests/test_tts_implementations.py
index 3f10c17..9e92392 100644
--- a/api/tests/test_tts_implementations.py
+++ b/api/tests/test_tts_implementations.py
@@ -16,13 +16,14 @@ def test_get_device_error():
with pytest.raises(RuntimeError, match="Model not initialized"):
TTSBaseModel.get_device()
+@pytest.mark.asyncio
@patch('torch.cuda.is_available')
@patch('os.path.exists')
@patch('os.path.join')
@patch('os.listdir')
@patch('torch.load')
@patch('torch.save')
-def test_setup_cuda_available(mock_save, mock_load, mock_listdir, mock_join, mock_exists, mock_cuda_available):
+async def test_setup_cuda_available(mock_save, mock_load, mock_listdir, mock_join, mock_exists, mock_cuda_available):
"""Test setup with CUDA available"""
TTSBaseModel._device = None
mock_cuda_available.return_value = True
@@ -36,17 +37,18 @@ def test_setup_cuda_available(mock_save, mock_load, mock_listdir, mock_join, moc
TTSBaseModel.process_text = MagicMock(return_value=("dummy", [1,2,3]))
TTSBaseModel.generate_from_tokens = MagicMock(return_value=np.zeros(1000))
- voice_count = TTSBaseModel.setup()
+ voice_count = await TTSBaseModel.setup()
assert TTSBaseModel._device == "cuda"
assert voice_count == 2
+@pytest.mark.asyncio
@patch('torch.cuda.is_available')
@patch('os.path.exists')
@patch('os.path.join')
@patch('os.listdir')
@patch('torch.load')
@patch('torch.save')
-def test_setup_cuda_unavailable(mock_save, mock_load, mock_listdir, mock_join, mock_exists, mock_cuda_available):
+async def test_setup_cuda_unavailable(mock_save, mock_load, mock_listdir, mock_join, mock_exists, mock_cuda_available):
"""Test setup with CUDA unavailable"""
TTSBaseModel._device = None
mock_cuda_available.return_value = False
@@ -60,7 +62,7 @@ def test_setup_cuda_unavailable(mock_save, mock_load, mock_listdir, mock_join, m
TTSBaseModel.process_text = MagicMock(return_value=("dummy", [1,2,3]))
TTSBaseModel.generate_from_tokens = MagicMock(return_value=np.zeros(1000))
- voice_count = TTSBaseModel.setup()
+ voice_count = await TTSBaseModel.setup()
assert TTSBaseModel._device == "cpu"
assert voice_count == 2
diff --git a/api/tests/test_tts_service.py b/api/tests/test_tts_service.py
index 4e63ff1..4fa2c52 100644
--- a/api/tests/test_tts_service.py
+++ b/api/tests/test_tts_service.py
@@ -31,27 +31,6 @@ def sample_audio():
return np.sin(2 * np.pi * frequency * t).astype(np.float32)
-def test_split_text(tts_service):
- """Test text splitting into sentences"""
- text = "First sentence. Second sentence! Third sentence?"
- sentences = tts_service._split_text(text)
- assert len(sentences) == 3
- assert sentences[0] == "First sentence."
- assert sentences[1] == "Second sentence!"
- assert sentences[2] == "Third sentence?"
-
-
-def test_split_text_empty(tts_service):
- """Test splitting empty text"""
- assert tts_service._split_text("") == []
-
-
-def test_split_text_single_sentence(tts_service):
- """Test splitting single sentence"""
- text = "Just one sentence."
- assert tts_service._split_text(text) == ["Just one sentence."]
-
-
def test_audio_to_bytes(tts_service, sample_audio):
"""Test converting audio tensor to bytes"""
audio_bytes = tts_service._audio_to_bytes(sample_audio)
@@ -152,7 +131,7 @@ def test_generate_audio_phonemize_error(
mock_torch_load.return_value = torch.zeros((10, 24000))
mock_generate.return_value = (None, None)
- with pytest.raises(ValueError, match="No audio chunks were generated successfully"):
+ with pytest.raises(ValueError, match="No chunks were processed successfully"):
tts_service._generate_audio("Test text", "af", 1.0)
@@ -185,7 +164,7 @@ def test_generate_audio_error(
mock_exists.return_value = True
mock_torch_load.return_value = torch.zeros((10, 24000))
- with pytest.raises(ValueError, match="No audio chunks were generated successfully"):
+ with pytest.raises(ValueError, match="No chunks were processed successfully"):
tts_service._generate_audio("Test text", "af", 1.0)
diff --git a/assets/cpu_first_token_timeline_stream_openai.png b/assets/cpu_first_token_timeline_stream_openai.png
new file mode 100644
index 0000000..c06f7c5
Binary files /dev/null and b/assets/cpu_first_token_timeline_stream_openai.png differ
diff --git a/assets/format_comparison.png b/assets/format_comparison.png
new file mode 100644
index 0000000..f1669bc
Binary files /dev/null and b/assets/format_comparison.png differ
diff --git a/assets/gpu_first_token_latency_direct.png b/assets/gpu_first_token_latency_direct.png
new file mode 100644
index 0000000..769bbbc
Binary files /dev/null and b/assets/gpu_first_token_latency_direct.png differ
diff --git a/assets/gpu_first_token_latency_openai.png b/assets/gpu_first_token_latency_openai.png
new file mode 100644
index 0000000..06f1b13
Binary files /dev/null and b/assets/gpu_first_token_latency_openai.png differ
diff --git a/assets/gpu_first_token_timeline_direct.png b/assets/gpu_first_token_timeline_direct.png
new file mode 100644
index 0000000..37ae620
Binary files /dev/null and b/assets/gpu_first_token_timeline_direct.png differ
diff --git a/assets/gpu_first_token_timeline_openai.png b/assets/gpu_first_token_timeline_openai.png
new file mode 100644
index 0000000..fa566cc
Binary files /dev/null and b/assets/gpu_first_token_timeline_openai.png differ
diff --git a/assets/gpu_processing_time.png b/assets/gpu_processing_time.png
new file mode 100644
index 0000000..334e37a
Binary files /dev/null and b/assets/gpu_processing_time.png differ
diff --git a/assets/gpu_realtime_factor.png b/assets/gpu_realtime_factor.png
new file mode 100644
index 0000000..1e55996
Binary files /dev/null and b/assets/gpu_realtime_factor.png differ
diff --git a/assets/gpu_total_time_latency_direct.png b/assets/gpu_total_time_latency_direct.png
new file mode 100644
index 0000000..a76c467
Binary files /dev/null and b/assets/gpu_total_time_latency_direct.png differ
diff --git a/assets/gpu_total_time_latency_openai.png b/assets/gpu_total_time_latency_openai.png
new file mode 100644
index 0000000..0acec6a
Binary files /dev/null and b/assets/gpu_total_time_latency_openai.png differ
diff --git a/assets/voice_analysis.png b/assets/voice_analysis.png
new file mode 100644
index 0000000..401c718
Binary files /dev/null and b/assets/voice_analysis.png differ
diff --git a/docker-compose.cpu.yml b/docker-compose.cpu.yml
index d73a247..5bccbe2 100644
--- a/docker-compose.cpu.yml
+++ b/docker-compose.cpu.yml
@@ -45,6 +45,7 @@ services:
- ONNX_OPTIMIZATION_LEVEL=all
- ONNX_MEMORY_PATTERN=true
- ONNX_ARENA_EXTEND_STRATEGY=kNextPowerOfTwo
+
depends_on:
model-fetcher:
condition: service_healthy
diff --git a/docker-compose.yml b/docker-compose.yml
index cc3744b..1958f72 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -1,20 +1,26 @@
services:
model-fetcher:
image: datamachines/git-lfs:latest
+ environment:
+ - SKIP_MODEL_FETCH=${SKIP_MODEL_FETCH:-false}
volumes:
- ./Kokoro-82M:/app/Kokoro-82M
working_dir: /app/Kokoro-82M
command: >
sh -c "
- rm -f .git/index.lock;
- if [ -z \"$(ls -A .)\" ]; then
- git clone https://huggingface.co/hexgrad/Kokoro-82M .
- touch .cloned;
+ if [ \"$$SKIP_MODEL_FETCH\" = \"true\" ]; then
+ echo 'Skipping model fetch...' && touch .cloned;
else
- rm -f .git/index.lock && \
- git checkout main && \
- git pull origin main && \
- touch .cloned;
+ rm -f .git/index.lock;
+ if [ -z \"$(ls -A .)\" ]; then
+ git clone https://huggingface.co/hexgrad/Kokoro-82M .
+ touch .cloned;
+ else
+ rm -f .git/index.lock && \
+ git checkout main && \
+ git pull origin main && \
+ touch .cloned;
+ fi;
fi;
tail -f /dev/null
"
@@ -26,10 +32,10 @@ services:
start_period: 1s
kokoro-tts:
- image: ghcr.io/remsky/kokoro-fastapi:latest
+ # image: ghcr.io/remsky/kokoro-fastapi:latest
# Uncomment below to build from source instead of using the released image
- # build:
- # context: .
+ build:
+ context: .
volumes:
- ./api/src:/app/api/src
- ./Kokoro-82M:/app/Kokoro-82M
diff --git a/examples/assorted_checks/benchmarks/benchmark_first_token.py b/examples/assorted_checks/benchmarks/benchmark_first_token.py
new file mode 100644
index 0000000..a9e47bb
--- /dev/null
+++ b/examples/assorted_checks/benchmarks/benchmark_first_token.py
@@ -0,0 +1,172 @@
+#!/usr/bin/env python3
+import os
+import json
+import time
+
+import numpy as np
+import pandas as pd
+import requests
+from lib.shared_utils import save_json_results
+from lib.shared_plotting import plot_timeline, plot_correlation
+from lib.shared_benchmark_utils import enc, get_text_for_tokens
+
+
+def measure_first_token(
+ text: str, output_dir: str, tokens: int, run_number: int
+) -> dict:
+ """Measure time to audio via API calls and save the audio output"""
+ results = {
+ "text_length": len(text),
+ "token_count": len(enc.encode(text)),
+ "total_time": None,
+ "time_to_first_chunk": None,
+ "error": None,
+ "audio_path": None,
+ "audio_length": None, # Length of output audio in seconds
+ }
+
+ try:
+ start_time = time.time()
+
+ # Make request without streaming
+ response = requests.post(
+ "http://localhost:8880/v1/audio/speech",
+ json={
+ "model": "kokoro",
+ "input": text,
+ "voice": "af",
+ "response_format": "wav",
+ "stream": False,
+ },
+ timeout=1800,
+ )
+ response.raise_for_status()
+
+ # Save complete audio
+ audio_filename = f"benchmark_tokens{tokens}_run{run_number}.wav"
+ audio_path = os.path.join(output_dir, audio_filename)
+ results["audio_path"] = audio_path
+
+ content = response.content
+ with open(audio_path, "wb") as f:
+ f.write(content)
+
+ # Calculate audio length using scipy
+ import scipy.io.wavfile as wavfile
+
+ sample_rate, audio_data = wavfile.read(audio_path)
+ results["audio_length"] = len(audio_data) / sample_rate # Length in seconds
+ results["time_to_first_chunk"] = time.time() - start_time
+
+ results["total_time"] = time.time() - start_time
+ return results
+
+ except Exception as e:
+ results["error"] = str(e)
+ return results
+
+
+def main():
+ # Set up paths
+ script_dir = os.path.dirname(os.path.abspath(__file__))
+ output_dir = os.path.join(script_dir, "output_audio")
+ output_data_dir = os.path.join(script_dir, "output_data")
+
+ # Create output directories
+ os.makedirs(output_dir, exist_ok=True)
+ os.makedirs(output_data_dir, exist_ok=True)
+
+ # Load sample text
+ with open(
+ os.path.join(script_dir, "the_time_machine_hg_wells.txt"), "r", encoding="utf-8"
+ ) as f:
+ text = f.read()
+
+ # Test specific token counts
+ token_sizes = [10, 25, 50, 100, 200, 500]
+ all_results = []
+
+ for tokens in token_sizes:
+ print(f"\nTesting {tokens} tokens")
+ test_text = get_text_for_tokens(text, tokens)
+ actual_tokens = len(enc.encode(test_text))
+ print(f"Text preview: {test_text[:50]}...")
+
+ # Run test 3 times for each size to get average
+ for i in range(5):
+ print(f"Run {i+1}/3...")
+ result = measure_first_token(test_text, output_dir, tokens, i + 1)
+ result["target_tokens"] = tokens
+ result["actual_tokens"] = actual_tokens
+ result["run_number"] = i + 1
+
+ print(f"Time to Audio: {result.get('time_to_first_chunk', 'N/A'):.3f}s")
+ print(f"Total time: {result.get('total_time', 'N/A'):.3f}s")
+
+ if result["error"]:
+ print(f"Error: {result['error']}")
+
+ all_results.append(result)
+
+ # Calculate averages per token size
+ summary = {}
+ for tokens in token_sizes:
+ matching_results = [
+ r for r in all_results if r["target_tokens"] == tokens and not r["error"]
+ ]
+ if matching_results:
+ avg_first_chunk = sum(
+ r["time_to_first_chunk"] for r in matching_results
+ ) / len(matching_results)
+ avg_total = sum(r["total_time"] for r in matching_results) / len(
+ matching_results
+ )
+ avg_audio_length = sum(r["audio_length"] for r in matching_results) / len(
+ matching_results
+ )
+ summary[tokens] = {
+ "avg_time_to_first_chunk": round(avg_first_chunk, 3),
+ "avg_total_time": round(avg_total, 3),
+ "avg_audio_length": round(avg_audio_length, 3),
+ "num_successful_runs": len(matching_results),
+ }
+
+ # Save results
+ # Save results
+ results_data = {
+ "individual_runs": all_results,
+ "summary": summary,
+ "timestamp": time.strftime("%Y-%m-%d %H:%M:%S"),
+ }
+ save_json_results(
+ results_data, os.path.join(output_data_dir, "first_token_benchmark.json")
+ )
+
+ # Create plot directory if it doesn't exist
+ output_plots_dir = os.path.join(script_dir, "output_plots")
+ os.makedirs(output_plots_dir, exist_ok=True)
+
+ # Create DataFrame for plotting
+ df = pd.DataFrame(all_results)
+
+ # Create both plots
+ plot_correlation(
+ df,
+ "target_tokens",
+ "time_to_first_chunk",
+ "Time to Audio vs Input Size",
+ "Number of Input Tokens",
+ "Time to Audio (seconds)",
+ os.path.join(output_plots_dir, "first_token_latency.png"),
+ )
+
+ plot_timeline(df, os.path.join(output_plots_dir, "first_token_timeline.png"))
+
+ print("\nResults and plots saved to:")
+ print(f"- {os.path.join(output_data_dir, 'first_token_benchmark.json')}")
+ print(f"- {os.path.join(output_plots_dir, 'first_token_latency.png')}")
+ print(f"- {os.path.join(output_plots_dir, 'first_token_timeline.png')}")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/assorted_checks/benchmarks/benchmark_first_token_stream_unified.py b/examples/assorted_checks/benchmarks/benchmark_first_token_stream_unified.py
new file mode 100644
index 0000000..0b673ae
--- /dev/null
+++ b/examples/assorted_checks/benchmarks/benchmark_first_token_stream_unified.py
@@ -0,0 +1,195 @@
+#!/usr/bin/env python3
+import os
+import time
+
+import requests
+from openai import OpenAI
+from lib.stream_utils import run_benchmark
+
+OPENAI_CLIENT = OpenAI(
+ base_url="http://localhost:8880/v1", api_key="not-needed-for-local"
+)
+
+
+def measure_first_token_requests(
+ text: str, output_dir: str, tokens: int, run_number: int
+) -> dict:
+ """Measure time to audio via direct API calls and save the audio output"""
+ results = {
+ "text_length": len(text),
+ "token_count": None, # Will be set by run_benchmark
+ "total_time": None,
+ "time_to_first_chunk": None,
+ "error": None,
+ "audio_path": None,
+ "audio_length": None,
+ }
+
+ try:
+ start_time = time.time()
+
+ # Make request with streaming enabled
+ response = requests.post(
+ "http://localhost:8880/v1/audio/speech",
+ json={
+ "model": "kokoro",
+ "input": text,
+ "voice": "af",
+ "response_format": "pcm",
+ "stream": True,
+ },
+ stream=True,
+ timeout=1800,
+ )
+ response.raise_for_status()
+
+ # Save complete audio
+ audio_filename = f"benchmark_tokens{tokens}_run{run_number}_stream.wav"
+ audio_path = os.path.join(output_dir, audio_filename)
+ results["audio_path"] = audio_path
+
+ first_chunk_time = None
+ chunks = []
+ for chunk in response.iter_content(chunk_size=1024):
+ if chunk:
+ if first_chunk_time is None:
+ first_chunk_time = time.time()
+ results["time_to_first_chunk"] = first_chunk_time - start_time
+ chunks.append(chunk)
+
+ # Concatenate all PCM chunks
+ if not chunks:
+ raise ValueError("No audio chunks received")
+
+ all_audio_data = b"".join(chunks)
+
+ # Write as WAV file
+ import wave
+
+ with wave.open(audio_path, "wb") as wav_file:
+ wav_file.setnchannels(1) # Mono
+ wav_file.setsampwidth(2) # 2 bytes per sample (16-bit)
+ wav_file.setframerate(24000) # Known sample rate for Kokoro
+ wav_file.writeframes(all_audio_data)
+
+ # Calculate audio length using scipy
+ import scipy.io.wavfile as wavfile
+
+ sample_rate, audio_data = wavfile.read(audio_path)
+ results["audio_length"] = len(audio_data) / sample_rate # Length in seconds
+
+ results["total_time"] = time.time() - start_time
+
+ # Print debug info
+ print(f"Complete audio size: {len(all_audio_data)} bytes")
+ print(f"Number of chunks received: {len(chunks)}")
+ print(f"Audio length: {results['audio_length']:.3f}s")
+
+ return results
+
+ except Exception as e:
+ results["error"] = str(e)
+ return results
+
+
+def measure_first_token_openai(
+ text: str, output_dir: str, tokens: int, run_number: int
+) -> dict:
+ """Measure time to audio via OpenAI API calls and save the audio output"""
+ results = {
+ "text_length": len(text),
+ "token_count": None, # Will be set by run_benchmark
+ "total_time": None,
+ "time_to_first_chunk": None,
+ "error": None,
+ "audio_path": None,
+ "audio_length": None,
+ }
+
+ try:
+ start_time = time.time()
+
+ # Initialize OpenAI client
+
+ # Save complete audio
+ audio_filename = f"benchmark_tokens{tokens}_run{run_number}_stream_openai.wav"
+ audio_path = os.path.join(output_dir, audio_filename)
+ results["audio_path"] = audio_path
+
+ first_chunk_time = None
+ all_audio_data = bytearray()
+ chunk_count = 0
+
+ # Make streaming request using OpenAI client
+ with OPENAI_CLIENT.audio.speech.with_streaming_response.create(
+ model="kokoro",
+ voice="af",
+ response_format="pcm",
+ input=text,
+ ) as response:
+ for chunk in response.iter_bytes(chunk_size=1024):
+ if chunk:
+ chunk_count += 1
+ if first_chunk_time is None:
+ first_chunk_time = time.time()
+ results["time_to_first_chunk"] = first_chunk_time - start_time
+ all_audio_data.extend(chunk)
+
+ # Write as WAV file
+ import wave
+
+ with wave.open(audio_path, "wb") as wav_file:
+ wav_file.setnchannels(1) # Mono
+ wav_file.setsampwidth(2) # 2 bytes per sample (16-bit)
+ wav_file.setframerate(24000) # Known sample rate for Kokoro
+ wav_file.writeframes(all_audio_data)
+
+ # Calculate audio length using scipy
+ import scipy.io.wavfile as wavfile
+
+ sample_rate, audio_data = wavfile.read(audio_path)
+ results["audio_length"] = len(audio_data) / sample_rate # Length in seconds
+
+ results["total_time"] = time.time() - start_time
+
+ # Print debug info
+ print(f"Complete audio size: {len(all_audio_data)} bytes")
+ print(f"Number of chunks received: {chunk_count}")
+ print(f"Audio length: {results['audio_length']:.3f}s")
+
+ return results
+
+ except Exception as e:
+ results["error"] = str(e)
+ return results
+
+
+def main():
+ script_dir = os.path.dirname(os.path.abspath(__file__))
+ prefix='cpu'
+ # Run requests benchmark
+ print("\n=== Running Direct Requests Benchmark ===")
+ run_benchmark(
+ measure_first_token_requests,
+ output_dir=os.path.join(script_dir, "output_audio_stream"),
+ output_data_dir=os.path.join(script_dir, "output_data"),
+ output_plots_dir=os.path.join(script_dir, "output_plots"),
+ suffix="_stream",
+ plot_title_suffix="(Streaming)",
+ prefix=prefix
+ )
+ # Run OpenAI benchmark
+ print("\n=== Running OpenAI Library Benchmark ===")
+ run_benchmark(
+ measure_first_token_openai,
+ output_dir=os.path.join(script_dir, "output_audio_stream_openai"),
+ output_data_dir=os.path.join(script_dir, "output_data"),
+ output_plots_dir=os.path.join(script_dir, "output_plots"),
+ suffix="_stream_openai",
+ plot_title_suffix="(OpenAI Streaming)",
+ prefix=prefix
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/assorted_checks/benchmarks/benchmark_tts_rtf.py b/examples/assorted_checks/benchmarks/benchmark_tts_rtf.py
index 385abb0..7e4940d 100644
--- a/examples/assorted_checks/benchmarks/benchmark_tts_rtf.py
+++ b/examples/assorted_checks/benchmarks/benchmark_tts_rtf.py
@@ -1,30 +1,37 @@
#!/usr/bin/env python3
import os
+import sys
import json
import time
-import threading
import queue
-import pandas as pd
-import sys
+import threading
from datetime import datetime
-from lib.shared_plotting import plot_system_metrics, plot_correlation
+import pandas as pd
from lib.shared_utils import (
- get_system_metrics, save_json_results, write_benchmark_stats,
- real_time_factor
+ real_time_factor,
+ save_json_results,
+ get_system_metrics,
+ write_benchmark_stats,
)
+from lib.shared_plotting import plot_correlation, plot_system_metrics
from lib.shared_benchmark_utils import (
- get_text_for_tokens, make_tts_request, generate_token_sizes, enc
+ enc,
+ make_tts_request,
+ get_text_for_tokens,
+ generate_token_sizes,
)
+
class SystemMonitor:
def __init__(self, interval=1.0):
+ """Rough system tracker: Not always accurate"""
self.interval = interval
self.metrics_queue = queue.Queue()
self.stop_event = threading.Event()
self.metrics_timeline = []
self.start_time = None
-
+
def _monitor_loop(self):
"""Background thread function to collect system metrics."""
while not self.stop_event.is_set():
@@ -32,20 +39,20 @@ class SystemMonitor:
metrics["relative_time"] = time.time() - self.start_time
self.metrics_queue.put(metrics)
time.sleep(self.interval)
-
+
def start(self):
"""Start the monitoring thread."""
self.start_time = time.time()
self.monitor_thread = threading.Thread(target=self._monitor_loop)
self.monitor_thread.daemon = True
self.monitor_thread.start()
-
+
def stop(self):
"""Stop the monitoring thread and collect final metrics."""
self.stop_event.set()
- if hasattr(self, 'monitor_thread'):
+ if hasattr(self, "monitor_thread"):
self.monitor_thread.join(timeout=2)
-
+
# Collect all metrics from queue
while True:
try:
@@ -53,23 +60,24 @@ class SystemMonitor:
self.metrics_timeline.append(metrics)
except queue.Empty:
break
-
+
return self.metrics_timeline
+
def main():
# Initialize system monitor
monitor = SystemMonitor(interval=1.0) # 1 second interval
# Set prefix for output files (e.g. "gpu", "cpu", "onnx", etc.)
- prefix = "gpu"
+ prefix = "cpu"
# Generate token sizes
- if 'gpu' in prefix:
+ if "gpu" in prefix:
token_sizes = generate_token_sizes(
- max_tokens=5000, dense_step=150,
- dense_max=1000, sparse_step=1000)
- elif 'cpu' in prefix:
+ max_tokens=1000, dense_step=150, dense_max=1000, sparse_step=1000
+ )
+ elif "cpu" in prefix:
token_sizes = generate_token_sizes(
- max_tokens=1000, dense_step=300,
- dense_max=1000, sparse_step=0)
+ max_tokens=1000, dense_step=100, dense_max=500, sparse_step=250
+ )
else:
token_sizes = generate_token_sizes(max_tokens=3000)
@@ -78,7 +86,7 @@ def main():
output_dir = os.path.join(script_dir, "output_audio")
output_data_dir = os.path.join(script_dir, "output_data")
output_plots_dir = os.path.join(script_dir, "output_plots")
-
+
# Create output directories
os.makedirs(output_dir, exist_ok=True)
os.makedirs(output_data_dir, exist_ok=True)
@@ -90,7 +98,9 @@ def main():
filename = f"{prefix}_{filename}"
return os.path.join(path, filename)
- with open(os.path.join(script_dir, "the_time_machine_hg_wells.txt"), "r", encoding="utf-8") as f:
+ with open(
+ os.path.join(script_dir, "the_time_machine_hg_wells.txt"), "r", encoding="utf-8"
+ ) as f:
text = f.read()
total_tokens = len(enc.encode(text))
@@ -100,7 +110,7 @@ def main():
results = []
test_start_time = time.time()
-
+
# Start system monitoring
monitor.start()
@@ -114,7 +124,8 @@ def main():
processing_time, audio_length = make_tts_request(
chunk,
output_dir=output_dir,
- prefix=prefix
+ prefix=prefix,
+ stream=False, # Use non-streaming mode for RTF benchmarking
)
if processing_time is None or audio_length is None:
print("Breaking loop due to error")
@@ -123,14 +134,16 @@ def main():
# Calculate RTF using the correct formula
rtf = real_time_factor(processing_time, audio_length)
print(f"Real-Time Factor: {rtf:.5f}")
-
- results.append({
- "tokens": actual_tokens,
- "processing_time": processing_time,
- "output_length": audio_length,
- "rtf": rtf,
- "elapsed_time": round(time.time() - test_start_time, 2),
- })
+
+ results.append(
+ {
+ "tokens": actual_tokens,
+ "processing_time": processing_time,
+ "output_length": audio_length,
+ "rtf": rtf,
+ "elapsed_time": round(time.time() - test_start_time, 5),
+ }
+ )
df = pd.DataFrame(results)
if df.empty:
@@ -144,89 +157,101 @@ def main():
{
"title": "Benchmark Statistics (with correct RTF)",
"stats": {
- "Total tokens processed": df['tokens'].sum(),
- "Total audio generated (s)": df['output_length'].sum(),
- "Total test duration (s)": df['elapsed_time'].max(),
- "Average processing rate (tokens/s)": df['tokens_per_second'].mean(),
- "Average RTF": df['rtf'].mean(),
- "Average Real Time Speed": 1/df['rtf'].mean()
- }
+ "Total tokens processed": df["tokens"].sum(),
+ "Total audio generated (s)": df["output_length"].sum(),
+ "Total test duration (s)": df["elapsed_time"].max(),
+ "Average processing rate (tokens/s)": df["tokens_per_second"].mean(),
+ "Average RTF": df["rtf"].mean(),
+ "Average Real Time Speed": 1 / df["rtf"].mean(),
+ },
},
{
"title": "Per-chunk Stats",
"stats": {
- "Average chunk size (tokens)": df['tokens'].mean(),
- "Min chunk size (tokens)": df['tokens'].min(),
- "Max chunk size (tokens)": df['tokens'].max(),
- "Average processing time (s)": df['processing_time'].mean(),
- "Average output length (s)": df['output_length'].mean()
- }
+ "Average chunk size (tokens)": df["tokens"].mean(),
+ "Min chunk size (tokens)": df["tokens"].min(),
+ "Max chunk size (tokens)": df["tokens"].max(),
+ "Average processing time (s)": df["processing_time"].mean(),
+ "Average output length (s)": df["output_length"].mean(),
+ },
},
{
"title": "Performance Ranges",
"stats": {
"Processing rate range (tokens/s)": f"{df['tokens_per_second'].min():.2f} - {df['tokens_per_second'].max():.2f}",
"RTF range": f"{df['rtf'].min():.2f}x - {df['rtf'].max():.2f}x",
- "Real Time Speed range": f"{1/df['rtf'].max():.2f}x - {1/df['rtf'].min():.2f}x"
- }
- }
+ "Real Time Speed range": f"{1/df['rtf'].max():.2f}x - {1/df['rtf'].min():.2f}x",
+ },
+ },
]
- write_benchmark_stats(stats, prefix_path(output_data_dir, "benchmark_stats_rtf.txt"))
+ write_benchmark_stats(
+ stats, prefix_path(output_data_dir, "benchmark_stats_rtf.txt")
+ )
# Plot Processing Time vs Token Count
plot_correlation(
- df, "tokens", "processing_time",
+ df,
+ "tokens",
+ "processing_time",
"Processing Time vs Input Size",
"Number of Input Tokens",
"Processing Time (seconds)",
- prefix_path(output_plots_dir, "processing_time_rtf.png")
+ prefix_path(output_plots_dir, "processing_time_rtf.png"),
)
# Plot RTF vs Token Count
plot_correlation(
- df, "tokens", "rtf",
+ df,
+ "tokens",
+ "rtf",
"Real-Time Factor vs Input Size",
"Number of Input Tokens",
"Real-Time Factor (processing time / audio length)",
- prefix_path(output_plots_dir, "realtime_factor_rtf.png")
+ prefix_path(output_plots_dir, "realtime_factor_rtf.png"),
)
# Stop monitoring and get final metrics
final_metrics = monitor.stop()
-
+
# Convert metrics timeline to DataFrame for stats
metrics_df = pd.DataFrame(final_metrics)
-
+
# Add system usage stats
if not metrics_df.empty:
- stats.append({
- "title": "System Usage Statistics",
- "stats": {
- "Peak CPU Usage (%)": metrics_df['cpu_percent'].max(),
- "Avg CPU Usage (%)": metrics_df['cpu_percent'].mean(),
- "Peak RAM Usage (%)": metrics_df['ram_percent'].max(),
- "Avg RAM Usage (%)": metrics_df['ram_percent'].mean(),
- "Peak RAM Used (GB)": metrics_df['ram_used_gb'].max(),
- "Avg RAM Used (GB)": metrics_df['ram_used_gb'].mean(),
+ stats.append(
+ {
+ "title": "System Usage Statistics",
+ "stats": {
+ "Peak CPU Usage (%)": metrics_df["cpu_percent"].max(),
+ "Avg CPU Usage (%)": metrics_df["cpu_percent"].mean(),
+ "Peak RAM Usage (%)": metrics_df["ram_percent"].max(),
+ "Avg RAM Usage (%)": metrics_df["ram_percent"].mean(),
+ "Peak RAM Used (GB)": metrics_df["ram_used_gb"].max(),
+ "Avg RAM Used (GB)": metrics_df["ram_used_gb"].mean(),
+ },
}
- })
- if 'gpu_memory_used' in metrics_df:
- stats[-1]["stats"].update({
- "Peak GPU Memory (MB)": metrics_df['gpu_memory_used'].max(),
- "Avg GPU Memory (MB)": metrics_df['gpu_memory_used'].mean(),
- })
-
+ )
+ if "gpu_memory_used" in metrics_df:
+ stats[-1]["stats"].update(
+ {
+ "Peak GPU Memory (MB)": metrics_df["gpu_memory_used"].max(),
+ "Avg GPU Memory (MB)": metrics_df["gpu_memory_used"].mean(),
+ }
+ )
+
# Plot system metrics
- plot_system_metrics(final_metrics, prefix_path(output_plots_dir, "system_usage_rtf.png"))
+ plot_system_metrics(
+ final_metrics, prefix_path(output_plots_dir, "system_usage_rtf.png")
+ )
# Save final results
save_json_results(
{
"results": results,
"system_metrics": final_metrics,
- "test_duration": time.time() - test_start_time
+ "test_duration": time.time() - test_start_time,
},
- prefix_path(output_data_dir, "benchmark_results_rtf.json")
+ prefix_path(output_data_dir, "benchmark_results_rtf.json"),
)
print("\nResults saved to:")
diff --git a/examples/assorted_checks/benchmarks/depr_benchmark_tts.py b/examples/assorted_checks/benchmarks/depr_benchmark_tts.py
index 989c177..6fd600e 100644
--- a/examples/assorted_checks/benchmarks/depr_benchmark_tts.py
+++ b/examples/assorted_checks/benchmarks/depr_benchmark_tts.py
@@ -1,19 +1,30 @@
import os
import json
import time
+
import pandas as pd
-from examples.assorted_checks.lib.shared_plotting import plot_system_metrics, plot_correlation
+
from examples.assorted_checks.lib.shared_utils import (
- get_system_metrics, save_json_results, write_benchmark_stats
+ save_json_results,
+ get_system_metrics,
+ write_benchmark_stats,
+)
+from examples.assorted_checks.lib.shared_plotting import (
+ plot_correlation,
+ plot_system_metrics,
)
from examples.assorted_checks.lib.shared_benchmark_utils import (
- get_text_for_tokens, make_tts_request, generate_token_sizes, enc
+ enc,
+ make_tts_request,
+ get_text_for_tokens,
+ generate_token_sizes,
)
def main():
# Get optional prefix from first command line argument
import sys
+
prefix = sys.argv[1] if len(sys.argv) > 1 else ""
# Set up paths relative to this file
@@ -21,7 +32,7 @@ def main():
output_dir = os.path.join(script_dir, "output_audio")
output_data_dir = os.path.join(script_dir, "output_data")
output_plots_dir = os.path.join(script_dir, "output_plots")
-
+
# Create output directories
os.makedirs(output_dir, exist_ok=True)
os.makedirs(output_data_dir, exist_ok=True)
@@ -43,7 +54,6 @@ def main():
total_tokens = len(enc.encode(text))
print(f"Total tokens in file: {total_tokens}")
-
token_sizes = generate_token_sizes(total_tokens)
print(f"Testing sizes: {token_sizes}")
@@ -85,7 +95,7 @@ def main():
# Save intermediate results
save_json_results(
{"results": results, "system_metrics": system_metrics},
- prefix_path(output_data_dir, "benchmark_results.json")
+ prefix_path(output_data_dir, "benchmark_results.json"),
)
# Create DataFrame and calculate stats
@@ -102,53 +112,59 @@ def main():
{
"title": "Benchmark Statistics",
"stats": {
- "Total tokens processed": df['tokens'].sum(),
- "Total audio generated (s)": df['output_length'].sum(),
- "Total test duration (s)": df['elapsed_time'].max(),
- "Average processing rate (tokens/s)": df['tokens_per_second'].mean(),
- "Average realtime factor": df['realtime_factor'].mean()
- }
+ "Total tokens processed": df["tokens"].sum(),
+ "Total audio generated (s)": df["output_length"].sum(),
+ "Total test duration (s)": df["elapsed_time"].max(),
+ "Average processing rate (tokens/s)": df["tokens_per_second"].mean(),
+ "Average realtime factor": df["realtime_factor"].mean(),
+ },
},
{
"title": "Per-chunk Stats",
"stats": {
- "Average chunk size (tokens)": df['tokens'].mean(),
- "Min chunk size (tokens)": df['tokens'].min(),
- "Max chunk size (tokens)": df['tokens'].max(),
- "Average processing time (s)": df['processing_time'].mean(),
- "Average output length (s)": df['output_length'].mean()
- }
+ "Average chunk size (tokens)": df["tokens"].mean(),
+ "Min chunk size (tokens)": df["tokens"].min(),
+ "Max chunk size (tokens)": df["tokens"].max(),
+ "Average processing time (s)": df["processing_time"].mean(),
+ "Average output length (s)": df["output_length"].mean(),
+ },
},
{
"title": "Performance Ranges",
"stats": {
"Processing rate range (tokens/s)": f"{df['tokens_per_second'].min():.2f} - {df['tokens_per_second'].max():.2f}",
- "Realtime factor range": f"{df['realtime_factor'].min():.2f}x - {df['realtime_factor'].max():.2f}x"
- }
- }
+ "Realtime factor range": f"{df['realtime_factor'].min():.2f}x - {df['realtime_factor'].max():.2f}x",
+ },
+ },
]
write_benchmark_stats(stats, prefix_path(output_data_dir, "benchmark_stats.txt"))
# Plot Processing Time vs Token Count
plot_correlation(
- df, "tokens", "processing_time",
+ df,
+ "tokens",
+ "processing_time",
"Processing Time vs Input Size",
"Number of Input Tokens",
"Processing Time (seconds)",
- prefix_path(output_plots_dir, "processing_time.png")
+ prefix_path(output_plots_dir, "processing_time.png"),
)
# Plot Realtime Factor vs Token Count
plot_correlation(
- df, "tokens", "realtime_factor",
+ df,
+ "tokens",
+ "realtime_factor",
"Realtime Factor vs Input Size",
"Number of Input Tokens",
"Realtime Factor (output length / processing time)",
- prefix_path(output_plots_dir, "realtime_factor.png")
+ prefix_path(output_plots_dir, "realtime_factor.png"),
)
# Plot system metrics
- plot_system_metrics(system_metrics, prefix_path(output_plots_dir, "system_usage.png"))
+ plot_system_metrics(
+ system_metrics, prefix_path(output_plots_dir, "system_usage.png")
+ )
print("\nResults saved to:")
print(f"- {prefix_path(output_data_dir, 'benchmark_results.json')}")
diff --git a/examples/assorted_checks/benchmarks/lib/shared_benchmark_utils.py b/examples/assorted_checks/benchmarks/lib/shared_benchmark_utils.py
index c2fd1c4..f44f7eb 100644
--- a/examples/assorted_checks/benchmarks/lib/shared_benchmark_utils.py
+++ b/examples/assorted_checks/benchmarks/lib/shared_benchmark_utils.py
@@ -1,11 +1,12 @@
"""Shared utilities specific to TTS benchmarking."""
+
import time
-from typing import List, Optional, Tuple
+from typing import List, Tuple, Optional
import requests
import tiktoken
-from .shared_utils import get_audio_length, save_audio_file
+from .shared_utils import save_audio_file, get_audio_length
# Global tokenizer instance
enc = tiktoken.get_encoding("cl100k_base")
@@ -13,11 +14,11 @@ enc = tiktoken.get_encoding("cl100k_base")
def get_text_for_tokens(text: str, num_tokens: int) -> str:
"""Get a slice of text that contains exactly num_tokens tokens.
-
+
Args:
text: Input text to slice
num_tokens: Desired number of tokens
-
+
Returns:
str: Text slice containing exactly num_tokens tokens
"""
@@ -31,44 +32,69 @@ def make_tts_request(
text: str,
output_dir: str = None,
timeout: int = 1800,
- prefix: str = ""
+ prefix: str = "",
+ stream: bool = True,
) -> Tuple[Optional[float], Optional[float]]:
"""Make TTS request using OpenAI-compatible endpoint.
-
+
Args:
text: Input text to convert to speech
output_dir: Directory to save audio files. If None, audio won't be saved.
timeout: Request timeout in seconds
prefix: Optional prefix for output filenames
-
+
Returns:
tuple: (processing_time, audio_length) in seconds, or (None, None) on error
"""
try:
start_time = time.time()
- response = requests.post(
- "http://localhost:8880/v1/audio/speech",
- json={
- "model": "kokoro",
- "input": text,
- "voice": "af",
- "response_format": "wav",
- },
- timeout=timeout,
- )
- response.raise_for_status()
+ if stream:
+ # For streaming, we need to collect all chunks
+ audio_chunks = []
+ response = requests.post(
+ "http://localhost:8880/v1/audio/speech",
+ json={
+ "model": "kokoro",
+ "input": text,
+ "voice": "af",
+ "response_format": "wav",
+ "stream": True,
+ },
+ timeout=timeout,
+ stream=True,
+ )
+ response.raise_for_status()
+
+ for chunk in response.iter_content(chunk_size=8192):
+ if chunk:
+ audio_chunks.append(chunk)
+
+ # Combine all chunks
+ audio_data = b"".join(audio_chunks)
+ else:
+ response = requests.post(
+ "http://localhost:8880/v1/audio/speech",
+ json={
+ "model": "kokoro",
+ "input": text,
+ "voice": "af",
+ "response_format": "wav",
+ "stream": False,
+ },
+ timeout=timeout,
+ )
+ response.raise_for_status()
+ audio_data = response.content
processing_time = round(time.time() - start_time, 2)
- # Calculate audio length from response content
- audio_length = get_audio_length(response.content)
-
+ # Calculate audio length from audio data
+ audio_length = get_audio_length(audio_data)
+
# Save the audio file if output_dir is provided
if output_dir:
token_count = len(enc.encode(text))
output_file = save_audio_file(
- response.content,
- f"chunk_{token_count}_tokens",
- output_dir
+ audio_data, f"chunk_{token_count}_tokens", output_dir
)
print(f"Saved audio to {output_file}")
@@ -86,26 +112,26 @@ def generate_token_sizes(
max_tokens: int,
dense_step: int = 100,
dense_max: int = 1000,
- sparse_step: int = 1000
+ sparse_step: int = 1000,
) -> List[int]:
"""Generate token size ranges with dense sampling at start.
-
+
Args:
max_tokens: Maximum number of tokens to generate sizes up to
dense_step: Step size for dense sampling range
dense_max: Maximum value for dense sampling
sparse_step: Step size for sparse sampling range
-
+
Returns:
list: Sorted list of token sizes
"""
# Dense sampling at start
dense_range = list(range(dense_step, dense_max + 1, dense_step))
-
+
if max_tokens <= dense_max or sparse_step < dense_max:
return sorted(dense_range)
# Sparse sampling for larger sizes
sparse_range = list(range(dense_max + sparse_step, max_tokens + 1, sparse_step))
-
+
# Combine and deduplicate
return sorted(list(set(dense_range + sparse_range)))
diff --git a/examples/assorted_checks/benchmarks/lib/shared_plotting.py b/examples/assorted_checks/benchmarks/lib/shared_plotting.py
index b1679df..6ca4872 100644
--- a/examples/assorted_checks/benchmarks/lib/shared_plotting.py
+++ b/examples/assorted_checks/benchmarks/lib/shared_plotting.py
@@ -1,7 +1,10 @@
"""Shared plotting utilities for benchmarks and tests."""
+
+import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
+import matplotlib.patches as patches
# Common style configurations
STYLE_CONFIG = {
@@ -10,66 +13,71 @@ STYLE_CONFIG = {
"secondary_color": "#05d9e8",
"grid_color": "#ffffff",
"text_color": "#ffffff",
- "font_sizes": {
- "title": 16,
- "label": 14,
- "tick": 12,
- "text": 10
- }
+ "font_sizes": {"title": 16, "label": 14, "tick": 12, "text": 10},
}
+
def setup_plot(fig, ax, title, xlabel=None, ylabel=None):
"""Configure plot styling with consistent theme.
-
+
Args:
fig: matplotlib figure object
ax: matplotlib axis object
title: str, plot title
xlabel: str, optional x-axis label
ylabel: str, optional y-axis label
-
+
Returns:
tuple: (fig, ax) with applied styling
"""
# Grid styling
ax.grid(True, linestyle="--", alpha=0.3, color=STYLE_CONFIG["grid_color"])
-
+
# Title and labels
- ax.set_title(title, pad=20,
- fontsize=STYLE_CONFIG["font_sizes"]["title"],
- fontweight="bold",
- color=STYLE_CONFIG["text_color"])
-
+ ax.set_title(
+ title,
+ pad=20,
+ fontsize=STYLE_CONFIG["font_sizes"]["title"],
+ fontweight="bold",
+ color=STYLE_CONFIG["text_color"],
+ )
+
if xlabel:
- ax.set_xlabel(xlabel,
- fontsize=STYLE_CONFIG["font_sizes"]["label"],
- fontweight="medium",
- color=STYLE_CONFIG["text_color"])
+ ax.set_xlabel(
+ xlabel,
+ fontsize=STYLE_CONFIG["font_sizes"]["label"],
+ fontweight="medium",
+ color=STYLE_CONFIG["text_color"],
+ )
if ylabel:
- ax.set_ylabel(ylabel,
- fontsize=STYLE_CONFIG["font_sizes"]["label"],
- fontweight="medium",
- color=STYLE_CONFIG["text_color"])
-
+ ax.set_ylabel(
+ ylabel,
+ fontsize=STYLE_CONFIG["font_sizes"]["label"],
+ fontweight="medium",
+ color=STYLE_CONFIG["text_color"],
+ )
+
# Tick styling
- ax.tick_params(labelsize=STYLE_CONFIG["font_sizes"]["tick"],
- colors=STYLE_CONFIG["text_color"])
-
+ ax.tick_params(
+ labelsize=STYLE_CONFIG["font_sizes"]["tick"], colors=STYLE_CONFIG["text_color"]
+ )
+
# Spine styling
for spine in ax.spines.values():
spine.set_color(STYLE_CONFIG["text_color"])
spine.set_alpha(0.3)
spine.set_linewidth(0.5)
-
+
# Background colors
ax.set_facecolor(STYLE_CONFIG["background_color"])
fig.patch.set_facecolor(STYLE_CONFIG["background_color"])
-
+
return fig, ax
+
def plot_system_metrics(metrics_data, output_path):
"""Create plots for system metrics over time.
-
+
Args:
metrics_data: list of dicts containing system metrics
output_path: str, path to save the output plot
@@ -77,68 +85,281 @@ def plot_system_metrics(metrics_data, output_path):
df = pd.DataFrame(metrics_data)
df["timestamp"] = pd.to_datetime(df["timestamp"])
elapsed_time = (df["timestamp"] - df["timestamp"].iloc[0]).dt.total_seconds()
-
+
# Get baseline values
baseline_cpu = df["cpu_percent"].iloc[0]
baseline_ram = df["ram_used_gb"].iloc[0]
- baseline_gpu = df["gpu_memory_used"].iloc[0] / 1024 if "gpu_memory_used" in df.columns else None
-
+ baseline_gpu = (
+ df["gpu_memory_used"].iloc[0] / 1024
+ if "gpu_memory_used" in df.columns
+ else None
+ )
+
# Convert GPU memory to GB if present
if "gpu_memory_used" in df.columns:
df["gpu_memory_gb"] = df["gpu_memory_used"] / 1024
-
+
plt.style.use("dark_background")
-
+
# Create subplots based on available metrics
has_gpu = "gpu_memory_used" in df.columns
num_plots = 3 if has_gpu else 2
fig, axes = plt.subplots(num_plots, 1, figsize=(15, 5 * num_plots))
fig.patch.set_facecolor(STYLE_CONFIG["background_color"])
-
+
# Smoothing window
window = min(5, len(df) // 2)
-
+
# Plot CPU Usage
smoothed_cpu = df["cpu_percent"].rolling(window=window, center=True).mean()
- sns.lineplot(x=elapsed_time, y=smoothed_cpu, ax=axes[0],
- color=STYLE_CONFIG["primary_color"], linewidth=2)
- axes[0].axhline(y=baseline_cpu, color=STYLE_CONFIG["secondary_color"],
- linestyle="--", alpha=0.5, label="Baseline")
- setup_plot(fig, axes[0], "CPU Usage Over Time",
- xlabel="Time (seconds)", ylabel="CPU Usage (%)")
+ sns.lineplot(
+ x=elapsed_time,
+ y=smoothed_cpu,
+ ax=axes[0],
+ color=STYLE_CONFIG["primary_color"],
+ linewidth=2,
+ )
+ axes[0].axhline(
+ y=baseline_cpu,
+ color=STYLE_CONFIG["secondary_color"],
+ linestyle="--",
+ alpha=0.5,
+ label="Baseline",
+ )
+ setup_plot(
+ fig,
+ axes[0],
+ "CPU Usage Over Time",
+ xlabel="Time (seconds)",
+ ylabel="CPU Usage (%)",
+ )
axes[0].set_ylim(0, max(df["cpu_percent"]) * 1.1)
axes[0].legend()
-
+
# Plot RAM Usage
smoothed_ram = df["ram_used_gb"].rolling(window=window, center=True).mean()
- sns.lineplot(x=elapsed_time, y=smoothed_ram, ax=axes[1],
- color=STYLE_CONFIG["secondary_color"], linewidth=2)
- axes[1].axhline(y=baseline_ram, color=STYLE_CONFIG["primary_color"],
- linestyle="--", alpha=0.5, label="Baseline")
- setup_plot(fig, axes[1], "RAM Usage Over Time",
- xlabel="Time (seconds)", ylabel="RAM Usage (GB)")
+ sns.lineplot(
+ x=elapsed_time,
+ y=smoothed_ram,
+ ax=axes[1],
+ color=STYLE_CONFIG["secondary_color"],
+ linewidth=2,
+ )
+ axes[1].axhline(
+ y=baseline_ram,
+ color=STYLE_CONFIG["primary_color"],
+ linestyle="--",
+ alpha=0.5,
+ label="Baseline",
+ )
+ setup_plot(
+ fig,
+ axes[1],
+ "RAM Usage Over Time",
+ xlabel="Time (seconds)",
+ ylabel="RAM Usage (GB)",
+ )
axes[1].set_ylim(0, max(df["ram_used_gb"]) * 1.1)
axes[1].legend()
-
+
# Plot GPU Memory if available
if has_gpu:
smoothed_gpu = df["gpu_memory_gb"].rolling(window=window, center=True).mean()
- sns.lineplot(x=elapsed_time, y=smoothed_gpu, ax=axes[2],
- color=STYLE_CONFIG["primary_color"], linewidth=2)
- axes[2].axhline(y=baseline_gpu, color=STYLE_CONFIG["secondary_color"],
- linestyle="--", alpha=0.5, label="Baseline")
- setup_plot(fig, axes[2], "GPU Memory Usage Over Time",
- xlabel="Time (seconds)", ylabel="GPU Memory (GB)")
+ sns.lineplot(
+ x=elapsed_time,
+ y=smoothed_gpu,
+ ax=axes[2],
+ color=STYLE_CONFIG["primary_color"],
+ linewidth=2,
+ )
+ axes[2].axhline(
+ y=baseline_gpu,
+ color=STYLE_CONFIG["secondary_color"],
+ linestyle="--",
+ alpha=0.5,
+ label="Baseline",
+ )
+ setup_plot(
+ fig,
+ axes[2],
+ "GPU Memory Usage Over Time",
+ xlabel="Time (seconds)",
+ ylabel="GPU Memory (GB)",
+ )
axes[2].set_ylim(0, max(df["gpu_memory_gb"]) * 1.1)
axes[2].legend()
-
+
plt.tight_layout()
plt.savefig(output_path, dpi=300, bbox_inches="tight")
plt.close()
+
+def plot_timeline(df, output_path, suffix="", prefix=""):
+ """Create timeline plot showing latency for each run.
+
+ Args:
+ df: pandas DataFrame containing run data with columns:
+ - target_tokens: number of tokens
+ - run_number: run iteration
+ - time_to_first_chunk: latency to first token
+ output_path: str, path to save the output plot
+ """
+ plt.style.use("dark_background")
+
+ # Sort by tokens and run number
+ df = df.sort_values(["target_tokens", "run_number"])
+
+ # Create figure and axis
+ fig, ax = plt.subplots(figsize=(12, 6))
+
+ # Calculate y positions for each run with tighter grouping
+ unique_tokens = sorted(df["target_tokens"].unique())
+ y_positions = {}
+ current_y = 0
+ group_spacing = 0.8 # Space between groups
+ run_spacing = 0.2 # Space between runs in a group
+
+ for tokens in unique_tokens:
+ runs = df[df["target_tokens"] == tokens]
+ base_y = current_y
+ for i, (_, run) in enumerate(runs.iterrows()):
+ y_positions[(tokens, run["run_number"])] = base_y + (i * run_spacing)
+ current_y = base_y + (len(runs) * run_spacing) + group_spacing
+
+ # Plot bars and points with more transparency
+ bar_height = 0.15
+ for _, row in df.iterrows():
+ y = y_positions[(row["target_tokens"], row["run_number"])]
+ latency = row["time_to_first_chunk"]
+
+ # Latency bar
+ ax.add_patch(
+ patches.Rectangle(
+ (0, y - bar_height / 2),
+ latency,
+ bar_height,
+ facecolor=STYLE_CONFIG["primary_color"],
+ alpha=0.3,
+ )
+ )
+
+ # End point
+ ax.plot(
+ latency,
+ y,
+ "o",
+ color=STYLE_CONFIG["secondary_color"],
+ markersize=4,
+ alpha=0.5,
+ )
+
+ # Add mean lines and values for each token group
+ for tokens in unique_tokens:
+ token_runs = df[df["target_tokens"] == tokens]
+ mean_latency = token_runs["time_to_first_chunk"].mean()
+ y_positions_for_token = [
+ y_positions[(tokens, run["run_number"])] for _, run in token_runs.iterrows()
+ ]
+ min_y = min(y_positions_for_token)
+ max_y = max(y_positions_for_token)
+ group_center = (min_y + max_y) / 2
+
+ # Plot mean line with gradient alpha
+ gradient = np.linspace(0.2, 0.8, 100)
+ for i in range(len(gradient) - 1):
+ y1 = (
+ min_y
+ - bar_height
+ + (max_y - min_y + 2 * bar_height) * (i / len(gradient))
+ )
+ y2 = (
+ min_y
+ - bar_height
+ + (max_y - min_y + 2 * bar_height) * ((i + 1) / len(gradient))
+ )
+ ax.plot(
+ [mean_latency, mean_latency],
+ [y1, y2],
+ "-",
+ color=STYLE_CONFIG["secondary_color"],
+ linewidth=3,
+ alpha=gradient[i],
+ )
+
+ # Add mean value label with background
+ label_text = f"Mean: {mean_latency:.3f}s"
+ bbox_props = dict(
+ facecolor=STYLE_CONFIG["background_color"],
+ edgecolor=STYLE_CONFIG["secondary_color"],
+ alpha=0.8,
+ pad=3,
+ linewidth=1,
+ )
+ ax.text(
+ mean_latency + 0.02,
+ group_center,
+ label_text,
+ color=STYLE_CONFIG["secondary_color"],
+ va="center",
+ fontsize=10,
+ fontweight="bold",
+ bbox=bbox_props,
+ )
+
+ # Customize plot
+ ax.set_ylim(-1, current_y)
+ ax.set_xlim(0, df["time_to_first_chunk"].max() * 1.3) # Extra space for labels
+
+ # Add labels for token groups with tighter spacing
+ group_positions = {}
+ for tokens in unique_tokens:
+ runs = df[df["target_tokens"] == tokens]
+ y_positions_for_token = [
+ y_positions[(tokens, run["run_number"])] for _, run in runs.iterrows()
+ ]
+ group_positions[tokens] = sum(y_positions_for_token) / len(
+ y_positions_for_token
+ )
+ plt.axhline(
+ y=min(y_positions_for_token) - bar_height,
+ color="white",
+ alpha=0.1,
+ linestyle="-",
+ )
+
+ # Calculate mean audio length for each token group
+ audio_lengths = {}
+ for tokens in unique_tokens:
+ token_runs = df[df["target_tokens"] == tokens]
+ audio_lengths[tokens] = token_runs["audio_length"].mean()
+
+ # Set y-ticks at group centers with token counts and audio lengths
+ plt.yticks(
+ list(group_positions.values()),
+ [
+ f"{tokens} tokens\n({audio_lengths[tokens]:.1f}s)"
+ for tokens in group_positions.keys()
+ ],
+ fontsize=10,
+ )
+
+ # Customize appearance
+ setup_plot(
+ fig,
+ ax,
+ prefix.upper() + " Time-To-Audio Latency " + suffix,
+ xlabel="Time (seconds)",
+ ylabel="Input Size",
+ )
+
+ plt.tight_layout()
+ plt.savefig(output_path, dpi=300, bbox_inches="tight")
+ plt.close()
+
+
def plot_correlation(df, x, y, title, xlabel, ylabel, output_path):
"""Create correlation plot with regression line and correlation coefficient.
-
+
Args:
df: pandas DataFrame containing the data
x: str, column name for x-axis
@@ -149,28 +370,40 @@ def plot_correlation(df, x, y, title, xlabel, ylabel, output_path):
output_path: str, path to save the output plot
"""
plt.style.use("dark_background")
-
+
fig, ax = plt.subplots(figsize=(12, 8))
-
+
# Scatter plot
- sns.scatterplot(data=df, x=x, y=y, s=100, alpha=0.6,
- color=STYLE_CONFIG["primary_color"])
-
+ sns.scatterplot(
+ data=df, x=x, y=y, s=100, alpha=0.6, color=STYLE_CONFIG["primary_color"]
+ )
+
# Regression line
- sns.regplot(data=df, x=x, y=y, scatter=False,
- color=STYLE_CONFIG["secondary_color"],
- line_kws={"linewidth": 2})
-
+ sns.regplot(
+ data=df,
+ x=x,
+ y=y,
+ scatter=False,
+ color=STYLE_CONFIG["secondary_color"],
+ line_kws={"linewidth": 2},
+ )
+
# Add correlation coefficient
corr = df[x].corr(df[y])
- plt.text(0.05, 0.95, f"Correlation: {corr:.2f}",
- transform=ax.transAxes,
- fontsize=STYLE_CONFIG["font_sizes"]["text"],
- color=STYLE_CONFIG["text_color"],
- bbox=dict(facecolor=STYLE_CONFIG["background_color"],
- edgecolor=STYLE_CONFIG["text_color"],
- alpha=0.7))
-
+ plt.text(
+ 0.05,
+ 0.95,
+ f"Correlation: {corr:.2f}",
+ transform=ax.transAxes,
+ fontsize=STYLE_CONFIG["font_sizes"]["text"],
+ color=STYLE_CONFIG["text_color"],
+ bbox=dict(
+ facecolor=STYLE_CONFIG["background_color"],
+ edgecolor=STYLE_CONFIG["text_color"],
+ alpha=0.7,
+ ),
+ )
+
setup_plot(fig, ax, title, xlabel=xlabel, ylabel=ylabel)
plt.savefig(output_path, dpi=300, bbox_inches="tight")
plt.close()
diff --git a/examples/assorted_checks/benchmarks/lib/shared_utils.py b/examples/assorted_checks/benchmarks/lib/shared_utils.py
index a9c872e..1e3fbdb 100644
--- a/examples/assorted_checks/benchmarks/lib/shared_utils.py
+++ b/examples/assorted_checks/benchmarks/lib/shared_utils.py
@@ -1,9 +1,10 @@
"""Shared utilities for benchmarks and tests."""
+
import os
import json
import subprocess
+from typing import Any, Dict, List, Union, Optional
from datetime import datetime
-from typing import Any, Dict, List, Optional, Union
import psutil
import scipy.io.wavfile as wavfile
@@ -12,28 +13,46 @@ import scipy.io.wavfile as wavfile
TORCH_AVAILABLE = False
try:
import torch
+
TORCH_AVAILABLE = torch.cuda.is_available()
except ImportError:
pass
+def check_audio_file_is_silent(audio_path: str, threshold: float = 0.01) -> bool:
+ """Check if an audio file is silent by comparing peak amplitude to a threshold.
+
+ Args:
+ audio_path: Path to the audio file
+ threshold: Peak amplitude threshold for silence
+
+ Returns:
+ bool: True if audio is silent, False otherwise
+ """
+ rate, data = wavfile.read(audio_path)
+ peak_amplitude = max(abs(data.min()), abs(data.max())) / 32768.0 # 16-bit audio
+
+ return peak_amplitude < threshold
+
+
def get_audio_length(audio_data: bytes, temp_dir: str = None) -> float:
"""Get audio length in seconds from bytes data.
-
+
Args:
audio_data: Raw audio bytes
temp_dir: Directory for temporary file. If None, uses system temp directory.
-
+
Returns:
float: Audio length in seconds
"""
if temp_dir is None:
import tempfile
+
temp_dir = tempfile.gettempdir()
-
+
temp_path = os.path.join(temp_dir, "temp.wav")
os.makedirs(temp_dir, exist_ok=True)
-
+
with open(temp_path, "wb") as f:
f.write(audio_data)
@@ -47,11 +66,11 @@ def get_audio_length(audio_data: bytes, temp_dir: str = None) -> float:
def get_gpu_memory(average: bool = True) -> Optional[Union[float, List[float]]]:
"""Get GPU memory usage using PyTorch if available, falling back to nvidia-smi.
-
+
Args:
average: If True and multiple GPUs present, returns average memory usage.
If False, returns list of memory usage per GPU.
-
+
Returns:
float or List[float] or None: GPU memory usage in MB. Returns None if no GPU available.
If average=False and multiple GPUs present, returns list of values.
@@ -60,19 +79,23 @@ def get_gpu_memory(average: bool = True) -> Optional[Union[float, List[float]]]:
n_gpus = torch.cuda.device_count()
memory_used = []
for i in range(n_gpus):
- memory_used.append(torch.cuda.memory_allocated(i) / 1024**2) # Convert to MB
-
+ memory_used.append(
+ torch.cuda.memory_allocated(i) / 1024**2
+ ) # Convert to MB
+
if average and len(memory_used) > 0:
return sum(memory_used) / len(memory_used)
return memory_used if len(memory_used) > 1 else memory_used[0]
-
+
# Fall back to nvidia-smi
try:
result = subprocess.check_output(
["nvidia-smi", "--query-gpu=memory.used", "--format=csv,nounits,noheader"]
)
- memory_values = [float(x.strip()) for x in result.decode("utf-8").split("\n") if x.strip()]
-
+ memory_values = [
+ float(x.strip()) for x in result.decode("utf-8").split("\n") if x.strip()
+ ]
+
if average and len(memory_values) > 0:
return sum(memory_values) / len(memory_values)
return memory_values if len(memory_values) > 1 else memory_values[0]
@@ -82,14 +105,14 @@ def get_gpu_memory(average: bool = True) -> Optional[Union[float, List[float]]]:
def get_system_metrics() -> Dict[str, Union[str, float]]:
"""Get current system metrics including CPU, RAM, and GPU if available.
-
+
Returns:
dict: System metrics including timestamp, CPU%, RAM%, RAM GB, and GPU MB if available
"""
# Get per-CPU percentages and calculate average
cpu_percentages = psutil.cpu_percent(percpu=True)
avg_cpu = sum(cpu_percentages) / len(cpu_percentages)
-
+
metrics = {
"timestamp": datetime.now().isoformat(),
"cpu_percent": round(avg_cpu, 2),
@@ -106,40 +129,40 @@ def get_system_metrics() -> Dict[str, Union[str, float]]:
def save_audio_file(audio_data: bytes, identifier: str, output_dir: str) -> str:
"""Save audio data to a file with proper naming and directory creation.
-
+
Args:
audio_data: Raw audio bytes
identifier: String to identify this audio file (e.g. token count, test name)
output_dir: Directory to save the file
-
+
Returns:
str: Path to the saved audio file
"""
os.makedirs(output_dir, exist_ok=True)
output_file = os.path.join(output_dir, f"{identifier}.wav")
-
+
with open(output_file, "wb") as f:
f.write(audio_data)
-
+
return output_file
def write_benchmark_stats(stats: List[Dict[str, Any]], output_file: str) -> None:
"""Write benchmark statistics to a file in a clean, organized format.
-
+
Args:
stats: List of dictionaries containing stat name/value pairs
output_file: Path to output file
"""
os.makedirs(os.path.dirname(output_file), exist_ok=True)
-
+
with open(output_file, "w") as f:
for section in stats:
# Write section header
f.write(f"=== {section['title']} ===\n\n")
-
+
# Write stats
- for label, value in section['stats'].items():
+ for label, value in section["stats"].items():
if isinstance(value, float):
f.write(f"{label}: {value:.2f}\n")
else:
@@ -149,7 +172,7 @@ def write_benchmark_stats(stats: List[Dict[str, Any]], output_file: str) -> None
def save_json_results(results: Dict[str, Any], output_file: str) -> None:
"""Save benchmark results to a JSON file with proper formatting.
-
+
Args:
results: Dictionary of results to save
output_file: Path to output file
@@ -159,14 +182,16 @@ def save_json_results(results: Dict[str, Any], output_file: str) -> None:
json.dump(results, f, indent=2)
-def real_time_factor(processing_time: float, audio_length: float, decimals: int = 2) -> float:
+def real_time_factor(
+ processing_time: float, audio_length: float, decimals: int = 2
+) -> float:
"""Calculate Real-Time Factor (RTF) as processing-time / length-of-audio.
-
+
Args:
processing_time: Time taken to process/generate audio
audio_length: Length of the generated audio
decimals: Number of decimal places to round to
-
+
Returns:
float: RTF value
"""
diff --git a/examples/assorted_checks/benchmarks/lib/stream_utils.py b/examples/assorted_checks/benchmarks/lib/stream_utils.py
new file mode 100644
index 0000000..623b18a
--- /dev/null
+++ b/examples/assorted_checks/benchmarks/lib/stream_utils.py
@@ -0,0 +1,205 @@
+#!/usr/bin/env python3
+import os
+import time
+import wave
+from typing import Any, Dict, List, Callable, Optional
+
+import pandas as pd
+import scipy.io.wavfile as wavfile
+
+from .shared_utils import save_json_results
+from .shared_plotting import plot_timeline, plot_correlation
+from .shared_benchmark_utils import enc, get_text_for_tokens
+
+
+def check_audio_silence(audio_path: str) -> bool:
+ """Check if audio file contains only silence"""
+ sample_rate, audio_data = wavfile.read(audio_path)
+ # Convert to float for RMS calculation
+ audio_float = audio_data.astype(float)
+ # Calculate RMS value
+ rms = (audio_float**2).mean() ** 0.5
+ # Define silence threshold (adjust if needed)
+ SILENCE_THRESHOLD = 50.0
+ return rms < SILENCE_THRESHOLD
+
+
+def process_benchmark_results(
+ all_results: List[Dict[str, Any]], token_sizes: List[int]
+) -> Dict[str, Any]:
+ """Process benchmark results and generate summary"""
+ summary = {}
+ for tokens in token_sizes:
+ matching_results = [
+ r for r in all_results if r["target_tokens"] == tokens and not r["error"]
+ ]
+ if matching_results:
+ avg_first_chunk = sum(
+ r["time_to_first_chunk"] for r in matching_results
+ ) / len(matching_results)
+ avg_total = sum(r["total_time"] for r in matching_results) / len(
+ matching_results
+ )
+ avg_audio_length = sum(r["audio_length"] for r in matching_results) / len(
+ matching_results
+ )
+ summary[tokens] = {
+ "avg_time_to_first_chunk": round(avg_first_chunk, 3),
+ "avg_total_time": round(avg_total, 3),
+ "avg_audio_length": round(avg_audio_length, 3),
+ "num_successful_runs": len(matching_results),
+ }
+ return summary
+
+
+def save_benchmark_results(
+ all_results: List[Dict[str, Any]],
+ summary: Dict[str, Any],
+ output_data_dir: str,
+ output_plots_dir: str,
+ suffix: str,
+ plot_title_suffix: str,
+ prefix: str = "",
+):
+ """Save benchmark results and generate plots"""
+ # Save results
+ results_data = {
+ "individual_runs": all_results,
+ "summary": summary,
+ "timestamp": time.strftime("%Y-%m-%d %H:%M:%S"),
+ }
+ save_json_results(
+ results_data,
+ os.path.join(output_data_dir, f"{prefix}first_token_benchmark{suffix}.json"),
+ )
+
+ # Create DataFrame for plotting
+ df = pd.DataFrame(all_results)
+
+ # Create plots
+ plot_correlation(
+ df,
+ "target_tokens",
+ "time_to_first_chunk",
+ f"Time to First Audio vs Input Size {plot_title_suffix}",
+ "Number of Input Tokens",
+ "Time to First Audio (seconds)",
+ os.path.join(output_plots_dir, f"{prefix}first_token_latency{suffix}.png"),
+ )
+
+ plot_correlation(
+ df,
+ "target_tokens",
+ "total_time",
+ f"Total Time vs Input Size {plot_title_suffix}",
+ "Number of Input Tokens",
+ "Total Time (seconds)",
+ os.path.join(output_plots_dir, f"{prefix}total_time_latency{suffix}.png"),
+ )
+
+ plot_timeline(
+ df,
+ os.path.join(output_plots_dir, f"{prefix}first_token_timeline{suffix}.png"),
+ suffix=plot_title_suffix,
+ )
+
+
+def run_benchmark(
+ measure_func: Callable,
+ output_dir: str,
+ output_data_dir: str,
+ output_plots_dir: str,
+ suffix: str = "",
+ plot_title_suffix: str = "",
+ num_runs: int = 5,
+ client=None,
+ prefix="",
+):
+ """Run benchmark with the given measurement function"""
+ # Create output directories
+ os.makedirs(output_dir, exist_ok=True)
+ os.makedirs(output_data_dir, exist_ok=True)
+ os.makedirs(output_plots_dir, exist_ok=True)
+
+ # Load sample text
+ script_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+ with open(
+ os.path.join(script_dir, "the_time_machine_hg_wells.txt"), "r", encoding="utf-8"
+ ) as f:
+ text = f.read()
+
+ # Test specific token counts
+ token_sizes = [10, 50, 100, 250, 500]
+ all_results = []
+ silent_files = []
+
+ for tokens in token_sizes:
+ print(
+ f"\nTesting {tokens} tokens{' ' + plot_title_suffix if plot_title_suffix else ''}"
+ )
+ test_text = get_text_for_tokens(text, tokens)
+ actual_tokens = len(enc.encode(test_text))
+ print(f"Text preview: {test_text[:50]}...")
+
+ for i in range(num_runs):
+ print(f"Run {i+1}/{num_runs}...")
+ result = measure_func(test_text, output_dir, tokens, i + 1)
+ result["target_tokens"] = tokens
+ result["actual_tokens"] = actual_tokens
+ result["run_number"] = i + 1
+
+ # Handle time to first audio
+ first_chunk = result.get('time_to_first_chunk')
+ print(
+ f"Time to First Audio: {f'{first_chunk:.3f}s' if first_chunk is not None else 'N/A'}"
+ )
+
+ # Handle total time
+ total_time = result.get('total_time')
+ print(
+ f"Time to Save Complete: {f'{total_time:.3f}s' if total_time is not None else 'N/A'}"
+ )
+
+ # Handle audio length
+ audio_length = result.get('audio_length')
+ print(
+ f"Audio length: {f'{audio_length:.3f}s' if audio_length is not None else 'N/A'}"
+ )
+ # Calculate streaming overhead only if both values exist
+ if total_time is not None and first_chunk is not None:
+ print(f"Streaming overhead: {(total_time - first_chunk):.3f}s")
+ else:
+ print("Streaming overhead: N/A")
+
+ if result["error"]:
+ print(f"Error: {result['error']}")
+ elif result["audio_path"] and check_audio_silence(result["audio_path"]):
+ silent_files.append(result["audio_path"])
+
+ all_results.append(result)
+
+ # Process and save results
+ summary = process_benchmark_results(all_results, token_sizes)
+ save_benchmark_results(
+ all_results,
+ summary,
+ output_data_dir,
+ output_plots_dir,
+ suffix,
+ plot_title_suffix,
+ )
+
+ # Print paths
+ print("\nResults and plots saved to:")
+ print(f"- {os.path.join(output_data_dir, f'{prefix}first_token_benchmark{suffix}.json')}")
+ print(f"- {os.path.join(output_plots_dir, f'{prefix}first_token_latency{suffix}.png')}")
+ print(f"- {os.path.join(output_plots_dir, f'{prefix}total_time_latency{suffix}.png')}")
+ print(f"- {os.path.join(output_plots_dir, f'{prefix}first_token_timeline{suffix}.png')}")
+
+ # Print silence check summary
+ if silent_files:
+ print("\nWARNING: The following files contain only silence:")
+ for file in silent_files:
+ print(f"- {file}")
+ else:
+ print("\nAll generated audio files contain valid audio content.")
diff --git a/examples/assorted_checks/benchmarks/output_data/benchmark_results.json b/examples/assorted_checks/benchmarks/output_data/benchmark_results.json
deleted file mode 100644
index 5c60933..0000000
--- a/examples/assorted_checks/benchmarks/output_data/benchmark_results.json
+++ /dev/null
@@ -1,111 +0,0 @@
-{
- "results": [
- {
- "tokens": 100,
- "processing_time": 18.833295583724976,
- "output_length": 31.15,
- "realtime_factor": 1.6539856161403135,
- "elapsed_time": 19.024322748184204
- },
- {
- "tokens": 200,
- "processing_time": 38.95506024360657,
- "output_length": 62.6,
- "realtime_factor": 1.6069799304257042,
- "elapsed_time": 58.21527123451233
- },
- {
- "tokens": 300,
- "processing_time": 49.74252939224243,
- "output_length": 96.325,
- "realtime_factor": 1.9364716908630366,
- "elapsed_time": 108.19673728942871
- },
- {
- "tokens": 400,
- "processing_time": 61.349056243896484,
- "output_length": 128.575,
- "realtime_factor": 2.095794261102292,
- "elapsed_time": 169.733656167984
- },
- {
- "tokens": 500,
- "processing_time": 82.86568236351013,
- "output_length": 158.575,
- "realtime_factor": 1.9136389815071193,
- "elapsed_time": 252.7968451976776
- }
- ],
- "system_metrics": [
- {
- "timestamp": "2025-01-03T00:13:49.865330",
- "cpu_percent": 8.0,
- "ram_percent": 39.4,
- "ram_used_gb": 25.03811264038086,
- "gpu_memory_used": 1204.0
- },
- {
- "timestamp": "2025-01-03T00:14:08.781551",
- "cpu_percent": 26.8,
- "ram_percent": 42.6,
- "ram_used_gb": 27.090862274169922,
- "gpu_memory_used": 1225.0
- },
- {
- "timestamp": "2025-01-03T00:14:08.916973",
- "cpu_percent": 16.1,
- "ram_percent": 42.6,
- "ram_used_gb": 27.089553833007812,
- "gpu_memory_used": 1225.0
- },
- {
- "timestamp": "2025-01-03T00:14:47.979053",
- "cpu_percent": 31.5,
- "ram_percent": 43.6,
- "ram_used_gb": 27.714427947998047,
- "gpu_memory_used": 1225.0
- },
- {
- "timestamp": "2025-01-03T00:14:48.098976",
- "cpu_percent": 20.0,
- "ram_percent": 43.6,
- "ram_used_gb": 27.704315185546875,
- "gpu_memory_used": 1211.0
- },
- {
- "timestamp": "2025-01-03T00:15:37.944729",
- "cpu_percent": 29.7,
- "ram_percent": 38.6,
- "ram_used_gb": 24.53925323486328,
- "gpu_memory_used": 1217.0
- },
- {
- "timestamp": "2025-01-03T00:15:38.071915",
- "cpu_percent": 8.6,
- "ram_percent": 38.5,
- "ram_used_gb": 24.51690673828125,
- "gpu_memory_used": 1208.0
- },
- {
- "timestamp": "2025-01-03T00:16:39.525449",
- "cpu_percent": 23.4,
- "ram_percent": 38.8,
- "ram_used_gb": 24.71230697631836,
- "gpu_memory_used": 1221.0
- },
- {
- "timestamp": "2025-01-03T00:16:39.612442",
- "cpu_percent": 5.5,
- "ram_percent": 38.9,
- "ram_used_gb": 24.72066879272461,
- "gpu_memory_used": 1221.0
- },
- {
- "timestamp": "2025-01-03T00:18:02.569076",
- "cpu_percent": 27.4,
- "ram_percent": 39.1,
- "ram_used_gb": 24.868202209472656,
- "gpu_memory_used": 1264.0
- }
- ]
-}
\ No newline at end of file
diff --git a/examples/assorted_checks/benchmarks/output_data/benchmark_results_cpu.json b/examples/assorted_checks/benchmarks/output_data/benchmark_results_cpu.json
deleted file mode 100644
index 52f8f04..0000000
--- a/examples/assorted_checks/benchmarks/output_data/benchmark_results_cpu.json
+++ /dev/null
@@ -1,216 +0,0 @@
-{
- "results": [
- {
- "tokens": 100,
- "processing_time": 14.349808931350708,
- "output_length": 31.15,
- "rtf": 0.46,
- "elapsed_time": 14.716031074523926
- },
- {
- "tokens": 200,
- "processing_time": 28.341803312301636,
- "output_length": 62.6,
- "rtf": 0.45,
- "elapsed_time": 43.44207406044006
- },
- {
- "tokens": 300,
- "processing_time": 43.352553606033325,
- "output_length": 96.325,
- "rtf": 0.45,
- "elapsed_time": 87.26906609535217
- },
- {
- "tokens": 400,
- "processing_time": 71.02449822425842,
- "output_length": 128.575,
- "rtf": 0.55,
- "elapsed_time": 158.7198133468628
- },
- {
- "tokens": 500,
- "processing_time": 70.92521691322327,
- "output_length": 158.575,
- "rtf": 0.45,
- "elapsed_time": 230.01379895210266
- },
- {
- "tokens": 600,
- "processing_time": 83.6328592300415,
- "output_length": 189.25,
- "rtf": 0.44,
- "elapsed_time": 314.02610969543457
- },
- {
- "tokens": 700,
- "processing_time": 103.0810194015503,
- "output_length": 222.075,
- "rtf": 0.46,
- "elapsed_time": 417.5678551197052
- },
- {
- "tokens": 800,
- "processing_time": 127.02162909507751,
- "output_length": 253.85,
- "rtf": 0.5,
- "elapsed_time": 545.0128681659698
- },
- {
- "tokens": 900,
- "processing_time": 130.49781227111816,
- "output_length": 283.775,
- "rtf": 0.46,
- "elapsed_time": 675.8943417072296
- },
- {
- "tokens": 1000,
- "processing_time": 154.76425909996033,
- "output_length": 315.475,
- "rtf": 0.49,
- "elapsed_time": 831.0677945613861
- }
- ],
- "system_metrics": [
- {
- "timestamp": "2025-01-03T00:23:52.896889",
- "cpu_percent": 4.5,
- "ram_percent": 39.1,
- "ram_used_gb": 24.86032485961914,
- "gpu_memory_used": 1281.0
- },
- {
- "timestamp": "2025-01-03T00:24:07.429461",
- "cpu_percent": 4.5,
- "ram_percent": 39.1,
- "ram_used_gb": 24.847564697265625,
- "gpu_memory_used": 1285.0
- },
- {
- "timestamp": "2025-01-03T00:24:07.620587",
- "cpu_percent": 2.7,
- "ram_percent": 39.1,
- "ram_used_gb": 24.846607208251953,
- "gpu_memory_used": 1275.0
- },
- {
- "timestamp": "2025-01-03T00:24:36.140754",
- "cpu_percent": 5.4,
- "ram_percent": 39.1,
- "ram_used_gb": 24.857810974121094,
- "gpu_memory_used": 1267.0
- },
- {
- "timestamp": "2025-01-03T00:24:36.340675",
- "cpu_percent": 6.2,
- "ram_percent": 39.1,
- "ram_used_gb": 24.85773468017578,
- "gpu_memory_used": 1267.0
- },
- {
- "timestamp": "2025-01-03T00:25:19.905634",
- "cpu_percent": 29.1,
- "ram_percent": 39.2,
- "ram_used_gb": 24.920318603515625,
- "gpu_memory_used": 1256.0
- },
- {
- "timestamp": "2025-01-03T00:25:20.182219",
- "cpu_percent": 20.0,
- "ram_percent": 39.2,
- "ram_used_gb": 24.930198669433594,
- "gpu_memory_used": 1256.0
- },
- {
- "timestamp": "2025-01-03T00:26:31.414760",
- "cpu_percent": 5.3,
- "ram_percent": 39.5,
- "ram_used_gb": 25.127891540527344,
- "gpu_memory_used": 1259.0
- },
- {
- "timestamp": "2025-01-03T00:26:31.617256",
- "cpu_percent": 3.6,
- "ram_percent": 39.5,
- "ram_used_gb": 25.126346588134766,
- "gpu_memory_used": 1252.0
- },
- {
- "timestamp": "2025-01-03T00:27:42.736097",
- "cpu_percent": 10.5,
- "ram_percent": 39.5,
- "ram_used_gb": 25.100231170654297,
- "gpu_memory_used": 1249.0
- },
- {
- "timestamp": "2025-01-03T00:27:42.912870",
- "cpu_percent": 5.3,
- "ram_percent": 39.5,
- "ram_used_gb": 25.098285675048828,
- "gpu_memory_used": 1249.0
- },
- {
- "timestamp": "2025-01-03T00:29:06.725264",
- "cpu_percent": 8.9,
- "ram_percent": 39.5,
- "ram_used_gb": 25.123123168945312,
- "gpu_memory_used": 1239.0
- },
- {
- "timestamp": "2025-01-03T00:29:06.928826",
- "cpu_percent": 5.5,
- "ram_percent": 39.5,
- "ram_used_gb": 25.128646850585938,
- "gpu_memory_used": 1239.0
- },
- {
- "timestamp": "2025-01-03T00:30:50.206349",
- "cpu_percent": 49.6,
- "ram_percent": 39.6,
- "ram_used_gb": 25.162948608398438,
- "gpu_memory_used": 1245.0
- },
- {
- "timestamp": "2025-01-03T00:30:50.491837",
- "cpu_percent": 14.8,
- "ram_percent": 39.5,
- "ram_used_gb": 25.13379669189453,
- "gpu_memory_used": 1245.0
- },
- {
- "timestamp": "2025-01-03T00:32:57.721467",
- "cpu_percent": 6.2,
- "ram_percent": 39.6,
- "ram_used_gb": 25.187721252441406,
- "gpu_memory_used": 1384.0
- },
- {
- "timestamp": "2025-01-03T00:32:57.913350",
- "cpu_percent": 3.6,
- "ram_percent": 39.6,
- "ram_used_gb": 25.199390411376953,
- "gpu_memory_used": 1384.0
- },
- {
- "timestamp": "2025-01-03T00:35:08.608730",
- "cpu_percent": 6.3,
- "ram_percent": 39.8,
- "ram_used_gb": 25.311710357666016,
- "gpu_memory_used": 1330.0
- },
- {
- "timestamp": "2025-01-03T00:35:08.791851",
- "cpu_percent": 5.3,
- "ram_percent": 39.8,
- "ram_used_gb": 25.326683044433594,
- "gpu_memory_used": 1333.0
- },
- {
- "timestamp": "2025-01-03T00:37:43.782406",
- "cpu_percent": 6.8,
- "ram_percent": 40.6,
- "ram_used_gb": 25.803058624267578,
- "gpu_memory_used": 1409.0
- }
- ]
-}
\ No newline at end of file
diff --git a/examples/assorted_checks/benchmarks/output_data/benchmark_results_rtf.json b/examples/assorted_checks/benchmarks/output_data/benchmark_results_rtf.json
deleted file mode 100644
index 59ad009..0000000
--- a/examples/assorted_checks/benchmarks/output_data/benchmark_results_rtf.json
+++ /dev/null
@@ -1,300 +0,0 @@
-{
- "results": [
- {
- "tokens": 100,
- "processing_time": 0.96,
- "output_length": 31.1,
- "rtf": 0.03,
- "elapsed_time": 1.11
- },
- {
- "tokens": 250,
- "processing_time": 2.23,
- "output_length": 77.17,
- "rtf": 0.03,
- "elapsed_time": 3.49
- },
- {
- "tokens": 400,
- "processing_time": 4.05,
- "output_length": 128.05,
- "rtf": 0.03,
- "elapsed_time": 7.77
- },
- {
- "tokens": 550,
- "processing_time": 4.06,
- "output_length": 171.45,
- "rtf": 0.02,
- "elapsed_time": 12.0
- },
- {
- "tokens": 700,
- "processing_time": 6.01,
- "output_length": 221.6,
- "rtf": 0.03,
- "elapsed_time": 18.16
- },
- {
- "tokens": 850,
- "processing_time": 6.9,
- "output_length": 269.1,
- "rtf": 0.03,
- "elapsed_time": 25.21
- },
- {
- "tokens": 1000,
- "processing_time": 7.65,
- "output_length": 315.05,
- "rtf": 0.02,
- "elapsed_time": 33.03
- },
- {
- "tokens": 6000,
- "processing_time": 48.7,
- "output_length": 1837.1,
- "rtf": 0.03,
- "elapsed_time": 82.21
- },
- {
- "tokens": 11000,
- "processing_time": 92.44,
- "output_length": 3388.57,
- "rtf": 0.03,
- "elapsed_time": 175.46
- },
- {
- "tokens": 16000,
- "processing_time": 163.61,
- "output_length": 4977.32,
- "rtf": 0.03,
- "elapsed_time": 340.46
- },
- {
- "tokens": 21000,
- "processing_time": 209.72,
- "output_length": 6533.3,
- "rtf": 0.03,
- "elapsed_time": 551.92
- },
- {
- "tokens": 26000,
- "processing_time": 329.35,
- "output_length": 8068.15,
- "rtf": 0.04,
- "elapsed_time": 883.37
- },
- {
- "tokens": 31000,
- "processing_time": 473.52,
- "output_length": 9611.48,
- "rtf": 0.05,
- "elapsed_time": 1359.28
- },
- {
- "tokens": 36000,
- "processing_time": 650.98,
- "output_length": 11157.15,
- "rtf": 0.06,
- "elapsed_time": 2012.9
- }
- ],
- "system_metrics": [
- {
- "timestamp": "2025-01-03T14:41:01.331735",
- "cpu_percent": 7.5,
- "ram_percent": 50.2,
- "ram_used_gb": 31.960269927978516,
- "gpu_memory_used": 3191.0
- },
- {
- "timestamp": "2025-01-03T14:41:02.357116",
- "cpu_percent": 17.01,
- "ram_percent": 50.2,
- "ram_used_gb": 31.96163558959961,
- "gpu_memory_used": 3426.0
- },
- {
- "timestamp": "2025-01-03T14:41:02.445009",
- "cpu_percent": 9.5,
- "ram_percent": 50.3,
- "ram_used_gb": 31.966781616210938,
- "gpu_memory_used": 3426.0
- },
- {
- "timestamp": "2025-01-03T14:41:04.742152",
- "cpu_percent": 18.27,
- "ram_percent": 50.4,
- "ram_used_gb": 32.08788299560547,
- "gpu_memory_used": 3642.0
- },
- {
- "timestamp": "2025-01-03T14:41:04.847795",
- "cpu_percent": 16.27,
- "ram_percent": 50.5,
- "ram_used_gb": 32.094364166259766,
- "gpu_memory_used": 3640.0
- },
- {
- "timestamp": "2025-01-03T14:41:09.019590",
- "cpu_percent": 15.97,
- "ram_percent": 50.7,
- "ram_used_gb": 32.23244094848633,
- "gpu_memory_used": 3640.0
- },
- {
- "timestamp": "2025-01-03T14:41:09.110324",
- "cpu_percent": 3.54,
- "ram_percent": 50.7,
- "ram_used_gb": 32.234458923339844,
- "gpu_memory_used": 3640.0
- },
- {
- "timestamp": "2025-01-03T14:41:13.252607",
- "cpu_percent": 13.4,
- "ram_percent": 50.6,
- "ram_used_gb": 32.194271087646484,
- "gpu_memory_used": 3935.0
- },
- {
- "timestamp": "2025-01-03T14:41:13.327557",
- "cpu_percent": 4.69,
- "ram_percent": 50.6,
- "ram_used_gb": 32.191776275634766,
- "gpu_memory_used": 3935.0
- },
- {
- "timestamp": "2025-01-03T14:41:19.413633",
- "cpu_percent": 12.92,
- "ram_percent": 50.9,
- "ram_used_gb": 32.3467903137207,
- "gpu_memory_used": 4250.0
- },
- {
- "timestamp": "2025-01-03T14:41:19.492758",
- "cpu_percent": 7.5,
- "ram_percent": 50.8,
- "ram_used_gb": 32.34375,
- "gpu_memory_used": 4250.0
- },
- {
- "timestamp": "2025-01-03T14:41:26.467284",
- "cpu_percent": 13.09,
- "ram_percent": 51.2,
- "ram_used_gb": 32.56281280517578,
- "gpu_memory_used": 4249.0
- },
- {
- "timestamp": "2025-01-03T14:41:26.553559",
- "cpu_percent": 8.39,
- "ram_percent": 51.2,
- "ram_used_gb": 32.56183624267578,
- "gpu_memory_used": 4249.0
- },
- {
- "timestamp": "2025-01-03T14:41:34.284362",
- "cpu_percent": 12.61,
- "ram_percent": 51.7,
- "ram_used_gb": 32.874778747558594,
- "gpu_memory_used": 4250.0
- },
- {
- "timestamp": "2025-01-03T14:41:34.362353",
- "cpu_percent": 1.25,
- "ram_percent": 51.7,
- "ram_used_gb": 32.87461471557617,
- "gpu_memory_used": 4250.0
- },
- {
- "timestamp": "2025-01-03T14:42:23.471312",
- "cpu_percent": 11.64,
- "ram_percent": 54.9,
- "ram_used_gb": 34.90264129638672,
- "gpu_memory_used": 4647.0
- },
- {
- "timestamp": "2025-01-03T14:42:23.547203",
- "cpu_percent": 5.31,
- "ram_percent": 54.9,
- "ram_used_gb": 34.91563415527344,
- "gpu_memory_used": 4647.0
- },
- {
- "timestamp": "2025-01-03T14:43:56.724933",
- "cpu_percent": 12.97,
- "ram_percent": 59.5,
- "ram_used_gb": 37.84241485595703,
- "gpu_memory_used": 4655.0
- },
- {
- "timestamp": "2025-01-03T14:43:56.815453",
- "cpu_percent": 11.75,
- "ram_percent": 59.5,
- "ram_used_gb": 37.832679748535156,
- "gpu_memory_used": 4655.0
- },
- {
- "timestamp": "2025-01-03T14:46:41.705155",
- "cpu_percent": 12.94,
- "ram_percent": 66.3,
- "ram_used_gb": 42.1534538269043,
- "gpu_memory_used": 4729.0
- },
- {
- "timestamp": "2025-01-03T14:46:41.835177",
- "cpu_percent": 7.73,
- "ram_percent": 66.2,
- "ram_used_gb": 42.13554000854492,
- "gpu_memory_used": 4729.0
- },
- {
- "timestamp": "2025-01-03T14:50:13.166236",
- "cpu_percent": 11.62,
- "ram_percent": 73.4,
- "ram_used_gb": 46.71288299560547,
- "gpu_memory_used": 4676.0
- },
- {
- "timestamp": "2025-01-03T14:50:13.261611",
- "cpu_percent": 8.16,
- "ram_percent": 73.4,
- "ram_used_gb": 46.71356201171875,
- "gpu_memory_used": 4676.0
- },
- {
- "timestamp": "2025-01-03T14:55:44.623607",
- "cpu_percent": 12.92,
- "ram_percent": 82.8,
- "ram_used_gb": 52.65533447265625,
- "gpu_memory_used": 4636.0
- },
- {
- "timestamp": "2025-01-03T14:55:44.735410",
- "cpu_percent": 15.29,
- "ram_percent": 82.7,
- "ram_used_gb": 52.63290786743164,
- "gpu_memory_used": 4636.0
- },
- {
- "timestamp": "2025-01-03T15:03:40.534449",
- "cpu_percent": 13.88,
- "ram_percent": 85.0,
- "ram_used_gb": 54.050071716308594,
- "gpu_memory_used": 4771.0
- },
- {
- "timestamp": "2025-01-03T15:03:40.638708",
- "cpu_percent": 12.21,
- "ram_percent": 85.0,
- "ram_used_gb": 54.053733825683594,
- "gpu_memory_used": 4771.0
- },
- {
- "timestamp": "2025-01-03T15:14:34.159142",
- "cpu_percent": 14.51,
- "ram_percent": 78.1,
- "ram_used_gb": 49.70396423339844,
- "gpu_memory_used": 4739.0
- }
- ]
-}
\ No newline at end of file
diff --git a/examples/assorted_checks/benchmarks/output_data/benchmark_stats_cpu.txt b/examples/assorted_checks/benchmarks/output_data/benchmark_stats_cpu.txt
deleted file mode 100644
index 010d116..0000000
--- a/examples/assorted_checks/benchmarks/output_data/benchmark_stats_cpu.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-=== Benchmark Statistics (with correct RTF) ===
-
-Overall Stats:
-Total tokens processed: 5500
-Total audio generated: 1741.65s
-Total test duration: 831.07s
-Average processing rate: 6.72 tokens/second
-Average RTF: 0.47x
-
-Per-chunk Stats:
-Average chunk size: 550.00 tokens
-Min chunk size: 100.00 tokens
-Max chunk size: 1000.00 tokens
-Average processing time: 82.70s
-Average output length: 174.17s
-
-Performance Ranges:
-Processing rate range: 5.63 - 7.17 tokens/second
-RTF range: 0.44x - 0.55x
diff --git a/examples/assorted_checks/benchmarks/output_data/benchmark_stats_rtf.txt b/examples/assorted_checks/benchmarks/output_data/benchmark_stats_rtf.txt
deleted file mode 100644
index e7bed5f..0000000
--- a/examples/assorted_checks/benchmarks/output_data/benchmark_stats_rtf.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-=== Benchmark Statistics (with correct RTF) ===
-
-Overall Stats:
-Total tokens processed: 150850
-Total audio generated: 46786.59s
-Total test duration: 2012.90s
-Average processing rate: 104.34 tokens/second
-Average RTF: 0.03x
-
diff --git a/examples/assorted_checks/benchmarks/output_data/cpu_benchmark_results_rtf.json b/examples/assorted_checks/benchmarks/output_data/cpu_benchmark_results_rtf.json
index edcb334..447b2d0 100644
--- a/examples/assorted_checks/benchmarks/output_data/cpu_benchmark_results_rtf.json
+++ b/examples/assorted_checks/benchmarks/output_data/cpu_benchmark_results_rtf.json
@@ -2,1803 +2,2219 @@
"results": [
{
"tokens": 300,
- "processing_time": 41.62,
+ "processing_time": 56.61,
"output_length": 96.425,
- "rtf": 0.43,
- "elapsed_time": 41.68
+ "rtf": 0.59,
+ "elapsed_time": 56.70357
},
{
"tokens": 600,
- "processing_time": 81.72,
+ "processing_time": 105.39,
"output_length": 188.675,
- "rtf": 0.43,
- "elapsed_time": 123.49
+ "rtf": 0.56,
+ "elapsed_time": 162.20408
},
{
"tokens": 900,
- "processing_time": 120.55,
+ "processing_time": 143.67,
"output_length": 283.425,
- "rtf": 0.43,
- "elapsed_time": 244.1
+ "rtf": 0.51,
+ "elapsed_time": 306.01951
}
],
"system_metrics": [
{
- "timestamp": "2025-01-04T01:30:26.991154",
- "cpu_percent": 7.83,
- "ram_percent": 48.2,
- "ram_used_gb": 30.669906616210938,
- "gpu_memory_used": 1243.0,
- "relative_time": 0.07800030708312988
+ "timestamp": "2025-01-06T01:59:21.480266",
+ "cpu_percent": 18.78,
+ "ram_percent": 47.7,
+ "ram_used_gb": 30.34305191040039,
+ "gpu_memory_used": 2069.0,
+ "relative_time": 0.09699058532714844
},
{
- "timestamp": "2025-01-04T01:30:28.079669",
- "cpu_percent": 59.43,
- "ram_percent": 48.2,
- "ram_used_gb": 30.675106048583984,
- "gpu_memory_used": 1244.0,
- "relative_time": 1.1842052936553955
+ "timestamp": "2025-01-06T01:59:22.587112",
+ "cpu_percent": 45.13,
+ "ram_percent": 47.7,
+ "ram_used_gb": 30.35516357421875,
+ "gpu_memory_used": 2068.0,
+ "relative_time": 1.2145774364471436
},
{
- "timestamp": "2025-01-04T01:30:29.185881",
- "cpu_percent": 57.14,
+ "timestamp": "2025-01-06T01:59:23.704498",
+ "cpu_percent": 72.19,
+ "ram_percent": 47.7,
+ "ram_used_gb": 30.345977783203125,
+ "gpu_memory_used": 2064.0,
+ "relative_time": 2.308037042617798
+ },
+ {
+ "timestamp": "2025-01-06T01:59:24.792248",
+ "cpu_percent": 74.04,
+ "ram_percent": 47.7,
+ "ram_used_gb": 30.324016571044922,
+ "gpu_memory_used": 2063.0,
+ "relative_time": 3.423358201980591
+ },
+ {
+ "timestamp": "2025-01-06T01:59:25.912823",
+ "cpu_percent": 72.79,
+ "ram_percent": 47.7,
+ "ram_used_gb": 30.340232849121094,
+ "gpu_memory_used": 2060.0,
+ "relative_time": 4.514510154724121
+ },
+ {
+ "timestamp": "2025-01-06T01:59:27.003820",
+ "cpu_percent": 65.58,
+ "ram_percent": 47.7,
+ "ram_used_gb": 30.333145141601562,
+ "gpu_memory_used": 2060.0,
+ "relative_time": 5.624518871307373
+ },
+ {
+ "timestamp": "2025-01-06T01:59:28.108050",
+ "cpu_percent": 73.56,
+ "ram_percent": 47.7,
+ "ram_used_gb": 30.33941650390625,
+ "gpu_memory_used": 2060.0,
+ "relative_time": 6.728093385696411
+ },
+ {
+ "timestamp": "2025-01-06T01:59:29.209197",
+ "cpu_percent": 74.51,
+ "ram_percent": 47.7,
+ "ram_used_gb": 30.35086441040039,
+ "gpu_memory_used": 2060.0,
+ "relative_time": 7.804106950759888
+ },
+ {
+ "timestamp": "2025-01-06T01:59:30.285102",
+ "cpu_percent": 62.56,
+ "ram_percent": 47.7,
+ "ram_used_gb": 30.335765838623047,
+ "gpu_memory_used": 2060.0,
+ "relative_time": 8.90535569190979
+ },
+ {
+ "timestamp": "2025-01-06T01:59:31.388382",
+ "cpu_percent": 70.13,
+ "ram_percent": 47.7,
+ "ram_used_gb": 30.331851959228516,
+ "gpu_memory_used": 2060.0,
+ "relative_time": 10.00930905342102
+ },
+ {
+ "timestamp": "2025-01-06T01:59:32.493094",
+ "cpu_percent": 73.96,
+ "ram_percent": 47.7,
+ "ram_used_gb": 30.34304428100586,
+ "gpu_memory_used": 2060.0,
+ "relative_time": 11.135397672653198
+ },
+ {
+ "timestamp": "2025-01-06T01:59:33.626989",
+ "cpu_percent": 76.87,
+ "ram_percent": 47.7,
+ "ram_used_gb": 30.352691650390625,
+ "gpu_memory_used": 2060.0,
+ "relative_time": 12.232722282409668
+ },
+ {
+ "timestamp": "2025-01-06T01:59:34.720747",
+ "cpu_percent": 60.67,
+ "ram_percent": 47.7,
+ "ram_used_gb": 30.355037689208984,
+ "gpu_memory_used": 2060.0,
+ "relative_time": 13.359250783920288
+ },
+ {
+ "timestamp": "2025-01-06T01:59:35.840613",
+ "cpu_percent": 72.88,
+ "ram_percent": 47.7,
+ "ram_used_gb": 30.32884979248047,
+ "gpu_memory_used": 2060.0,
+ "relative_time": 14.481508016586304
+ },
+ {
+ "timestamp": "2025-01-06T01:59:36.967478",
+ "cpu_percent": 66.28,
+ "ram_percent": 47.7,
+ "ram_used_gb": 30.325313568115234,
+ "gpu_memory_used": 2060.0,
+ "relative_time": 15.61866569519043
+ },
+ {
+ "timestamp": "2025-01-06T01:59:38.100358",
+ "cpu_percent": 81.44,
+ "ram_percent": 47.7,
+ "ram_used_gb": 30.32506561279297,
+ "gpu_memory_used": 2055.0,
+ "relative_time": 16.75141143798828
+ },
+ {
+ "timestamp": "2025-01-06T01:59:39.234215",
+ "cpu_percent": 78.94,
+ "ram_percent": 47.7,
+ "ram_used_gb": 30.32697296142578,
+ "gpu_memory_used": 2064.0,
+ "relative_time": 17.868074655532837
+ },
+ {
+ "timestamp": "2025-01-06T01:59:40.353820",
+ "cpu_percent": 90.03,
+ "ram_percent": 47.6,
+ "ram_used_gb": 30.304851531982422,
+ "gpu_memory_used": 2064.0,
+ "relative_time": 18.969531536102295
+ },
+ {
+ "timestamp": "2025-01-06T01:59:41.459510",
+ "cpu_percent": 69.47,
+ "ram_percent": 47.6,
+ "ram_used_gb": 30.290199279785156,
+ "gpu_memory_used": 2064.0,
+ "relative_time": 20.0858416557312
+ },
+ {
+ "timestamp": "2025-01-06T01:59:42.576804",
+ "cpu_percent": 74.6,
+ "ram_percent": 47.7,
+ "ram_used_gb": 30.310714721679688,
+ "gpu_memory_used": 2064.0,
+ "relative_time": 21.232748985290527
+ },
+ {
+ "timestamp": "2025-01-06T01:59:43.714609",
+ "cpu_percent": 71.29,
+ "ram_percent": 47.6,
+ "ram_used_gb": 30.309490203857422,
+ "gpu_memory_used": 2064.0,
+ "relative_time": 22.3442165851593
+ },
+ {
+ "timestamp": "2025-01-06T01:59:44.828524",
+ "cpu_percent": 72.72,
+ "ram_percent": 47.6,
+ "ram_used_gb": 30.286724090576172,
+ "gpu_memory_used": 2064.0,
+ "relative_time": 23.47381353378296
+ },
+ {
+ "timestamp": "2025-01-06T01:59:45.965351",
+ "cpu_percent": 75.83,
+ "ram_percent": 47.6,
+ "ram_used_gb": 30.27065658569336,
+ "gpu_memory_used": 2064.0,
+ "relative_time": 24.611122608184814
+ },
+ {
+ "timestamp": "2025-01-06T01:59:47.101999",
+ "cpu_percent": 69.91,
+ "ram_percent": 47.6,
+ "ram_used_gb": 30.303489685058594,
+ "gpu_memory_used": 2064.0,
+ "relative_time": 25.740842819213867
+ },
+ {
+ "timestamp": "2025-01-06T01:59:48.224089",
+ "cpu_percent": 69.72,
+ "ram_percent": 47.6,
+ "ram_used_gb": 30.308269500732422,
+ "gpu_memory_used": 2064.0,
+ "relative_time": 26.859923839569092
+ },
+ {
+ "timestamp": "2025-01-06T01:59:49.344323",
+ "cpu_percent": 72.56,
+ "ram_percent": 47.7,
+ "ram_used_gb": 30.340702056884766,
+ "gpu_memory_used": 2077.0,
+ "relative_time": 27.983789205551147
+ },
+ {
+ "timestamp": "2025-01-06T01:59:50.476329",
+ "cpu_percent": 70.04,
+ "ram_percent": 47.6,
+ "ram_used_gb": 30.294044494628906,
+ "gpu_memory_used": 2077.0,
+ "relative_time": 29.103161811828613
+ },
+ {
+ "timestamp": "2025-01-06T01:59:51.584187",
+ "cpu_percent": 60.73,
+ "ram_percent": 47.6,
+ "ram_used_gb": 30.3031005859375,
+ "gpu_memory_used": 2077.0,
+ "relative_time": 30.226102352142334
+ },
+ {
+ "timestamp": "2025-01-06T01:59:52.714254",
+ "cpu_percent": 69.69,
+ "ram_percent": 47.6,
+ "ram_used_gb": 30.30960464477539,
+ "gpu_memory_used": 2076.0,
+ "relative_time": 31.348071575164795
+ },
+ {
+ "timestamp": "2025-01-06T01:59:53.833567",
+ "cpu_percent": 79.26,
+ "ram_percent": 47.7,
+ "ram_used_gb": 30.327259063720703,
+ "gpu_memory_used": 2076.0,
+ "relative_time": 32.47895312309265
+ },
+ {
+ "timestamp": "2025-01-06T01:59:54.969955",
+ "cpu_percent": 76.92,
+ "ram_percent": 47.6,
+ "ram_used_gb": 30.295856475830078,
+ "gpu_memory_used": 2075.0,
+ "relative_time": 33.593998670578
+ },
+ {
+ "timestamp": "2025-01-06T01:59:56.074960",
+ "cpu_percent": 81.26,
+ "ram_percent": 47.6,
+ "ram_used_gb": 30.309158325195312,
+ "gpu_memory_used": 2066.0,
+ "relative_time": 34.720394134521484
+ },
+ {
+ "timestamp": "2025-01-06T01:59:57.210513",
+ "cpu_percent": 82.42,
+ "ram_percent": 47.7,
+ "ram_used_gb": 30.345314025878906,
+ "gpu_memory_used": 2068.0,
+ "relative_time": 35.84532642364502
+ },
+ {
+ "timestamp": "2025-01-06T01:59:58.330832",
+ "cpu_percent": 95.36,
"ram_percent": 47.9,
- "ram_used_gb": 30.473060607910156,
- "gpu_memory_used": 1246.0,
- "relative_time": 2.31345796585083
+ "ram_used_gb": 30.43775177001953,
+ "gpu_memory_used": 2069.0,
+ "relative_time": 36.99730587005615
},
{
- "timestamp": "2025-01-04T01:30:30.312825",
- "cpu_percent": 49.54,
- "ram_percent": 47.9,
- "ram_used_gb": 30.49838638305664,
- "gpu_memory_used": 1248.0,
- "relative_time": 3.42720627784729
- },
- {
- "timestamp": "2025-01-04T01:30:31.421201",
- "cpu_percent": 47.16,
- "ram_percent": 47.9,
- "ram_used_gb": 30.44550323486328,
- "gpu_memory_used": 1251.0,
- "relative_time": 4.517812728881836
- },
- {
- "timestamp": "2025-01-04T01:30:32.514913",
- "cpu_percent": 47.98,
+ "timestamp": "2025-01-06T01:59:59.487468",
+ "cpu_percent": 100.0,
"ram_percent": 47.8,
- "ram_used_gb": 30.41952896118164,
- "gpu_memory_used": 1251.0,
- "relative_time": 5.647390604019165
+ "ram_used_gb": 30.407115936279297,
+ "gpu_memory_used": 2069.0,
+ "relative_time": 38.232134103775024
},
{
- "timestamp": "2025-01-04T01:30:33.649021",
- "cpu_percent": 48.55,
- "ram_percent": 47.8,
- "ram_used_gb": 30.400592803955078,
- "gpu_memory_used": 1249.0,
- "relative_time": 6.729969263076782
- },
- {
- "timestamp": "2025-01-04T01:30:34.723785",
- "cpu_percent": 43.88,
- "ram_percent": 47.8,
- "ram_used_gb": 30.390079498291016,
- "gpu_memory_used": 1253.0,
- "relative_time": 7.860571622848511
- },
- {
- "timestamp": "2025-01-04T01:30:35.864707",
- "cpu_percent": 50.01,
- "ram_percent": 47.8,
- "ram_used_gb": 30.380477905273438,
- "gpu_memory_used": 1253.0,
- "relative_time": 8.9869704246521
- },
- {
- "timestamp": "2025-01-04T01:30:36.982950",
- "cpu_percent": 49.29,
- "ram_percent": 47.8,
- "ram_used_gb": 30.41130828857422,
- "gpu_memory_used": 1255.0,
- "relative_time": 10.097310066223145
- },
- {
- "timestamp": "2025-01-04T01:30:38.099505",
- "cpu_percent": 52.99,
- "ram_percent": 47.8,
- "ram_used_gb": 30.410892486572266,
- "gpu_memory_used": 1252.0,
- "relative_time": 11.204046249389648
- },
- {
- "timestamp": "2025-01-04T01:30:39.205066",
- "cpu_percent": 42.98,
- "ram_percent": 47.8,
- "ram_used_gb": 30.40534210205078,
- "gpu_memory_used": 1253.0,
- "relative_time": 12.306914329528809
- },
- {
- "timestamp": "2025-01-04T01:30:40.305591",
- "cpu_percent": 47.11,
- "ram_percent": 47.8,
- "ram_used_gb": 30.40200424194336,
- "gpu_memory_used": 1253.0,
- "relative_time": 13.411193370819092
- },
- {
- "timestamp": "2025-01-04T01:30:41.410928",
- "cpu_percent": 50.09,
- "ram_percent": 47.8,
- "ram_used_gb": 30.39764404296875,
- "gpu_memory_used": 1260.0,
- "relative_time": 14.534100770950317
- },
- {
- "timestamp": "2025-01-04T01:30:42.530654",
- "cpu_percent": 57.82,
- "ram_percent": 47.8,
- "ram_used_gb": 30.39893341064453,
- "gpu_memory_used": 1256.0,
- "relative_time": 15.66111135482788
- },
- {
- "timestamp": "2025-01-04T01:30:43.666031",
- "cpu_percent": 52.61,
- "ram_percent": 47.8,
- "ram_used_gb": 30.37706756591797,
- "gpu_memory_used": 1256.0,
- "relative_time": 16.79327368736267
- },
- {
- "timestamp": "2025-01-04T01:30:44.794904",
- "cpu_percent": 57.14,
- "ram_percent": 47.7,
- "ram_used_gb": 30.36868667602539,
- "gpu_memory_used": 1256.0,
- "relative_time": 17.861677646636963
- },
- {
- "timestamp": "2025-01-04T01:30:45.865891",
- "cpu_percent": 66.7,
- "ram_percent": 47.7,
- "ram_used_gb": 30.371902465820312,
- "gpu_memory_used": 1257.0,
- "relative_time": 18.96451497077942
- },
- {
- "timestamp": "2025-01-04T01:30:46.971206",
- "cpu_percent": 53.61,
- "ram_percent": 47.7,
- "ram_used_gb": 30.352508544921875,
- "gpu_memory_used": 1254.0,
- "relative_time": 20.086195945739746
- },
- {
- "timestamp": "2025-01-04T01:30:48.089632",
- "cpu_percent": 50.26,
- "ram_percent": 47.7,
- "ram_used_gb": 30.349388122558594,
- "gpu_memory_used": 1248.0,
- "relative_time": 21.199003219604492
- },
- {
- "timestamp": "2025-01-04T01:30:49.191842",
- "cpu_percent": 48.22,
- "ram_percent": 47.7,
- "ram_used_gb": 30.344642639160156,
- "gpu_memory_used": 1251.0,
- "relative_time": 22.322958946228027
- },
- {
- "timestamp": "2025-01-04T01:30:50.324994",
- "cpu_percent": 55.64,
- "ram_percent": 47.7,
- "ram_used_gb": 30.35323715209961,
- "gpu_memory_used": 1251.0,
- "relative_time": 23.469967365264893
- },
- {
- "timestamp": "2025-01-04T01:30:51.477231",
- "cpu_percent": 45.68,
- "ram_percent": 47.7,
- "ram_used_gb": 30.35232162475586,
- "gpu_memory_used": 1251.0,
- "relative_time": 24.579415798187256
- },
- {
- "timestamp": "2025-01-04T01:30:52.585934",
- "cpu_percent": 46.07,
- "ram_percent": 47.7,
- "ram_used_gb": 30.32147216796875,
- "gpu_memory_used": 1244.0,
- "relative_time": 25.71301007270813
- },
- {
- "timestamp": "2025-01-04T01:30:53.707821",
- "cpu_percent": 47.54,
- "ram_percent": 47.6,
- "ram_used_gb": 30.296611785888672,
- "gpu_memory_used": 1244.0,
- "relative_time": 26.7750301361084
- },
- {
- "timestamp": "2025-01-04T01:30:54.766880",
- "cpu_percent": 44.8,
- "ram_percent": 47.6,
- "ram_used_gb": 30.28769302368164,
- "gpu_memory_used": 1237.0,
- "relative_time": 27.87526297569275
- },
- {
- "timestamp": "2025-01-04T01:30:55.873403",
- "cpu_percent": 48.82,
- "ram_percent": 47.6,
- "ram_used_gb": 30.285594940185547,
- "gpu_memory_used": 1237.0,
- "relative_time": 29.00292205810547
- },
- {
- "timestamp": "2025-01-04T01:30:57.003386",
- "cpu_percent": 55.54,
- "ram_percent": 47.6,
- "ram_used_gb": 30.30721664428711,
- "gpu_memory_used": 1237.0,
- "relative_time": 30.13248038291931
- },
- {
- "timestamp": "2025-01-04T01:30:58.135723",
- "cpu_percent": 46.97,
- "ram_percent": 47.7,
- "ram_used_gb": 30.319698333740234,
- "gpu_memory_used": 1237.0,
- "relative_time": 31.280652046203613
- },
- {
- "timestamp": "2025-01-04T01:30:59.274397",
- "cpu_percent": 46.94,
- "ram_percent": 47.7,
- "ram_used_gb": 30.31420135498047,
- "gpu_memory_used": 1239.0,
- "relative_time": 32.39983797073364
- },
- {
- "timestamp": "2025-01-04T01:31:00.405545",
- "cpu_percent": 53.81,
- "ram_percent": 47.7,
- "ram_used_gb": 30.335922241210938,
- "gpu_memory_used": 1243.0,
- "relative_time": 33.502938985824585
- },
- {
- "timestamp": "2025-01-04T01:31:01.497496",
- "cpu_percent": 51.0,
- "ram_percent": 47.7,
- "ram_used_gb": 30.325199127197266,
- "gpu_memory_used": 1243.0,
- "relative_time": 34.584938526153564
- },
- {
- "timestamp": "2025-01-04T01:31:02.583134",
- "cpu_percent": 49.26,
- "ram_percent": 47.6,
- "ram_used_gb": 30.30097198486328,
- "gpu_memory_used": 1243.0,
- "relative_time": 35.680947065353394
- },
- {
- "timestamp": "2025-01-04T01:31:03.686381",
- "cpu_percent": 48.91,
- "ram_percent": 47.6,
- "ram_used_gb": 30.300418853759766,
- "gpu_memory_used": 1243.0,
- "relative_time": 36.786722898483276
- },
- {
- "timestamp": "2025-01-04T01:31:04.786497",
- "cpu_percent": 48.69,
- "ram_percent": 47.6,
- "ram_used_gb": 30.29620361328125,
- "gpu_memory_used": 1243.0,
- "relative_time": 37.90794491767883
- },
- {
- "timestamp": "2025-01-04T01:31:05.908563",
- "cpu_percent": 50.43,
- "ram_percent": 47.6,
- "ram_used_gb": 30.29269027709961,
- "gpu_memory_used": 1243.0,
- "relative_time": 39.01517176628113
- },
- {
- "timestamp": "2025-01-04T01:31:07.014496",
- "cpu_percent": 48.22,
- "ram_percent": 47.6,
- "ram_used_gb": 30.298015594482422,
- "gpu_memory_used": 1243.0,
- "relative_time": 40.118446826934814
- },
- {
- "timestamp": "2025-01-04T01:31:08.120066",
- "cpu_percent": 47.47,
- "ram_percent": 47.7,
- "ram_used_gb": 30.312705993652344,
- "gpu_memory_used": 1243.0,
- "relative_time": 41.22802424430847
- },
- {
- "timestamp": "2025-01-04T01:31:09.225367",
- "cpu_percent": 41.09,
- "ram_percent": 47.7,
- "ram_used_gb": 30.34886932373047,
- "gpu_memory_used": 1244.0,
- "relative_time": 42.34174656867981
- },
- {
- "timestamp": "2025-01-04T01:31:10.339308",
- "cpu_percent": 44.12,
- "ram_percent": 47.7,
- "ram_used_gb": 30.353790283203125,
- "gpu_memory_used": 1245.0,
- "relative_time": 43.44456744194031
- },
- {
- "timestamp": "2025-01-04T01:31:11.443944",
- "cpu_percent": 48.99,
- "ram_percent": 47.7,
- "ram_used_gb": 30.34658432006836,
- "gpu_memory_used": 1245.0,
- "relative_time": 44.53658318519592
- },
- {
- "timestamp": "2025-01-04T01:31:12.533026",
- "cpu_percent": 47.62,
- "ram_percent": 47.7,
- "ram_used_gb": 30.318241119384766,
- "gpu_memory_used": 1245.0,
- "relative_time": 45.6171441078186
- },
- {
- "timestamp": "2025-01-04T01:31:13.617044",
- "cpu_percent": 49.3,
- "ram_percent": 47.7,
- "ram_used_gb": 30.318588256835938,
- "gpu_memory_used": 1245.0,
- "relative_time": 46.71653604507446
- },
- {
- "timestamp": "2025-01-04T01:31:14.718976",
- "cpu_percent": 48.42,
- "ram_percent": 47.7,
- "ram_used_gb": 30.316349029541016,
- "gpu_memory_used": 1239.0,
- "relative_time": 47.80844783782959
- },
- {
- "timestamp": "2025-01-04T01:31:15.805079",
- "cpu_percent": 47.56,
- "ram_percent": 47.6,
- "ram_used_gb": 30.30520248413086,
- "gpu_memory_used": 1239.0,
- "relative_time": 48.90499949455261
- },
- {
- "timestamp": "2025-01-04T01:31:16.902878",
- "cpu_percent": 49.11,
- "ram_percent": 47.6,
- "ram_used_gb": 30.306812286376953,
- "gpu_memory_used": 1232.0,
- "relative_time": 50.034260749816895
- },
- {
- "timestamp": "2025-01-04T01:31:18.035723",
- "cpu_percent": 45.81,
- "ram_percent": 47.7,
- "ram_used_gb": 30.32524871826172,
- "gpu_memory_used": 1237.0,
- "relative_time": 51.1371693611145
- },
- {
- "timestamp": "2025-01-04T01:31:19.143169",
- "cpu_percent": 49.94,
- "ram_percent": 47.7,
- "ram_used_gb": 30.323795318603516,
- "gpu_memory_used": 1237.0,
- "relative_time": 52.227344274520874
- },
- {
- "timestamp": "2025-01-04T01:31:20.230256",
- "cpu_percent": 39.57,
- "ram_percent": 47.7,
- "ram_used_gb": 30.330493927001953,
- "gpu_memory_used": 1237.0,
- "relative_time": 53.34033155441284
- },
- {
- "timestamp": "2025-01-04T01:31:21.331797",
- "cpu_percent": 44.34,
- "ram_percent": 47.7,
- "ram_used_gb": 30.330425262451172,
- "gpu_memory_used": 1237.0,
- "relative_time": 54.45246958732605
- },
- {
- "timestamp": "2025-01-04T01:31:22.450663",
- "cpu_percent": 46.87,
- "ram_percent": 47.6,
- "ram_used_gb": 30.3084716796875,
- "gpu_memory_used": 1237.0,
- "relative_time": 55.55728077888489
- },
- {
- "timestamp": "2025-01-04T01:31:23.550691",
- "cpu_percent": 49.88,
- "ram_percent": 47.6,
- "ram_used_gb": 30.309173583984375,
- "gpu_memory_used": 1243.0,
- "relative_time": 56.65515089035034
- },
- {
- "timestamp": "2025-01-04T01:31:24.650939",
- "cpu_percent": 51.21,
- "ram_percent": 47.6,
- "ram_used_gb": 30.30620574951172,
- "gpu_memory_used": 1243.0,
- "relative_time": 57.726617097854614
- },
- {
- "timestamp": "2025-01-04T01:31:25.728955",
- "cpu_percent": 45.22,
- "ram_percent": 47.6,
- "ram_used_gb": 30.291912078857422,
- "gpu_memory_used": 1243.0,
- "relative_time": 58.82792663574219
- },
- {
- "timestamp": "2025-01-04T01:31:26.829490",
- "cpu_percent": 48.86,
- "ram_percent": 47.6,
- "ram_used_gb": 30.289695739746094,
- "gpu_memory_used": 1243.0,
- "relative_time": 59.93786025047302
- },
- {
- "timestamp": "2025-01-04T01:31:27.937071",
- "cpu_percent": 45.69,
- "ram_percent": 47.6,
- "ram_used_gb": 30.302818298339844,
- "gpu_memory_used": 1243.0,
- "relative_time": 61.05047869682312
- },
- {
- "timestamp": "2025-01-04T01:31:29.044046",
- "cpu_percent": 51.09,
- "ram_percent": 47.6,
- "ram_used_gb": 30.307464599609375,
- "gpu_memory_used": 1243.0,
- "relative_time": 62.159112215042114
- },
- {
- "timestamp": "2025-01-04T01:31:30.162426",
- "cpu_percent": 47.04,
- "ram_percent": 47.7,
- "ram_used_gb": 30.32668685913086,
- "gpu_memory_used": 1243.0,
- "relative_time": 63.249592542648315
- },
- {
- "timestamp": "2025-01-04T01:31:31.251755",
- "cpu_percent": 45.32,
- "ram_percent": 47.7,
- "ram_used_gb": 30.330463409423828,
- "gpu_memory_used": 1243.0,
- "relative_time": 64.35896062850952
- },
- {
- "timestamp": "2025-01-04T01:31:32.362284",
- "cpu_percent": 47.2,
- "ram_percent": 47.7,
- "ram_used_gb": 30.314319610595703,
- "gpu_memory_used": 1239.0,
- "relative_time": 65.4672338962555
- },
- {
- "timestamp": "2025-01-04T01:31:33.468921",
- "cpu_percent": 48.94,
- "ram_percent": 47.6,
- "ram_used_gb": 30.308246612548828,
- "gpu_memory_used": 1243.0,
- "relative_time": 66.5955581665039
- },
- {
- "timestamp": "2025-01-04T01:31:34.594176",
- "cpu_percent": 47.88,
- "ram_percent": 47.6,
- "ram_used_gb": 30.29806137084961,
- "gpu_memory_used": 1243.0,
- "relative_time": 67.68029594421387
- },
- {
- "timestamp": "2025-01-04T01:31:35.682260",
- "cpu_percent": 45.92,
- "ram_percent": 47.6,
- "ram_used_gb": 30.299114227294922,
- "gpu_memory_used": 1243.0,
- "relative_time": 68.7970290184021
- },
- {
- "timestamp": "2025-01-04T01:31:36.802433",
- "cpu_percent": 51.07,
- "ram_percent": 47.6,
- "ram_used_gb": 30.29195785522461,
- "gpu_memory_used": 1243.0,
- "relative_time": 69.92168736457825
- },
- {
- "timestamp": "2025-01-04T01:31:37.926464",
- "cpu_percent": 47.29,
- "ram_percent": 47.7,
- "ram_used_gb": 30.324363708496094,
- "gpu_memory_used": 1243.0,
- "relative_time": 71.05467820167542
- },
- {
- "timestamp": "2025-01-04T01:31:39.059936",
- "cpu_percent": 48.91,
- "ram_percent": 47.7,
- "ram_used_gb": 30.32428741455078,
- "gpu_memory_used": 1243.0,
- "relative_time": 72.14405465126038
- },
- {
- "timestamp": "2025-01-04T01:31:40.142859",
- "cpu_percent": 44.66,
- "ram_percent": 47.7,
- "ram_used_gb": 30.33354949951172,
- "gpu_memory_used": 1243.0,
- "relative_time": 73.25559496879578
- },
- {
- "timestamp": "2025-01-04T01:31:41.254868",
- "cpu_percent": 48.98,
- "ram_percent": 47.7,
- "ram_used_gb": 30.344337463378906,
- "gpu_memory_used": 1237.0,
- "relative_time": 74.35676956176758
- },
- {
- "timestamp": "2025-01-04T01:31:42.354977",
- "cpu_percent": 50.79,
- "ram_percent": 47.7,
- "ram_used_gb": 30.322650909423828,
- "gpu_memory_used": 1237.0,
- "relative_time": 75.43929266929626
- },
- {
- "timestamp": "2025-01-04T01:31:43.432869",
- "cpu_percent": 45.86,
- "ram_percent": 47.7,
- "ram_used_gb": 30.316268920898438,
- "gpu_memory_used": 1237.0,
- "relative_time": 76.53794598579407
- },
- {
- "timestamp": "2025-01-04T01:31:44.535917",
- "cpu_percent": 47.22,
- "ram_percent": 47.6,
- "ram_used_gb": 30.308757781982422,
- "gpu_memory_used": 1237.0,
- "relative_time": 77.6620762348175
- },
- {
- "timestamp": "2025-01-04T01:31:45.666281",
- "cpu_percent": 51.06,
- "ram_percent": 47.6,
- "ram_used_gb": 30.307342529296875,
- "gpu_memory_used": 1237.0,
- "relative_time": 78.77155900001526
- },
- {
- "timestamp": "2025-01-04T01:31:46.771605",
- "cpu_percent": 47.82,
- "ram_percent": 47.6,
- "ram_used_gb": 30.298141479492188,
- "gpu_memory_used": 1237.0,
- "relative_time": 79.87201809883118
- },
- {
- "timestamp": "2025-01-04T01:31:47.874817",
- "cpu_percent": 44.51,
- "ram_percent": 47.7,
- "ram_used_gb": 30.322750091552734,
- "gpu_memory_used": 1243.0,
- "relative_time": 80.97521829605103
- },
- {
- "timestamp": "2025-01-04T01:31:48.983338",
- "cpu_percent": 47.69,
- "ram_percent": 47.7,
- "ram_used_gb": 30.3226318359375,
- "gpu_memory_used": 1243.0,
- "relative_time": 82.09707593917847
- },
- {
- "timestamp": "2025-01-04T01:31:50.102541",
- "cpu_percent": 42.36,
- "ram_percent": 47.7,
- "ram_used_gb": 30.32965087890625,
- "gpu_memory_used": 1243.0,
- "relative_time": 83.20944809913635
- },
- {
- "timestamp": "2025-01-04T01:31:51.204766",
- "cpu_percent": 45.87,
- "ram_percent": 47.7,
- "ram_used_gb": 30.32353973388672,
- "gpu_memory_used": 1243.0,
- "relative_time": 84.31531429290771
- },
- {
- "timestamp": "2025-01-04T01:31:52.310873",
- "cpu_percent": 50.01,
- "ram_percent": 47.6,
- "ram_used_gb": 30.296016693115234,
- "gpu_memory_used": 1247.0,
- "relative_time": 85.4254515171051
- },
- {
- "timestamp": "2025-01-04T01:31:53.429342",
- "cpu_percent": 49.65,
- "ram_percent": 47.6,
- "ram_used_gb": 30.306453704833984,
- "gpu_memory_used": 1246.0,
- "relative_time": 86.51991653442383
- },
- {
- "timestamp": "2025-01-04T01:31:54.517894",
- "cpu_percent": 47.29,
- "ram_percent": 47.6,
- "ram_used_gb": 30.30263900756836,
- "gpu_memory_used": 1245.0,
- "relative_time": 87.60364723205566
- },
- {
- "timestamp": "2025-01-04T01:31:55.602848",
- "cpu_percent": 47.48,
- "ram_percent": 47.6,
- "ram_used_gb": 30.303203582763672,
- "gpu_memory_used": 1245.0,
- "relative_time": 88.68531346321106
- },
- {
- "timestamp": "2025-01-04T01:31:56.677895",
- "cpu_percent": 46.74,
- "ram_percent": 47.6,
- "ram_used_gb": 30.29749298095703,
- "gpu_memory_used": 1245.0,
- "relative_time": 89.78639531135559
- },
- {
- "timestamp": "2025-01-04T01:31:57.794084",
- "cpu_percent": 43.92,
- "ram_percent": 47.7,
- "ram_used_gb": 30.313438415527344,
- "gpu_memory_used": 1244.0,
- "relative_time": 90.89922308921814
- },
- {
- "timestamp": "2025-01-04T01:31:58.901464",
- "cpu_percent": 48.88,
- "ram_percent": 47.7,
- "ram_used_gb": 30.32254409790039,
- "gpu_memory_used": 1244.0,
- "relative_time": 91.96823143959045
- },
- {
- "timestamp": "2025-01-04T01:31:59.972227",
- "cpu_percent": 38.89,
- "ram_percent": 47.7,
- "ram_used_gb": 30.32897186279297,
- "gpu_memory_used": 1245.0,
- "relative_time": 93.08689904212952
- },
- {
- "timestamp": "2025-01-04T01:32:01.089013",
- "cpu_percent": 49.22,
- "ram_percent": 47.7,
- "ram_used_gb": 30.328304290771484,
- "gpu_memory_used": 1250.0,
- "relative_time": 94.20951867103577
- },
- {
- "timestamp": "2025-01-04T01:32:02.202304",
- "cpu_percent": 46.56,
- "ram_percent": 47.6,
- "ram_used_gb": 30.29920196533203,
- "gpu_memory_used": 1250.0,
- "relative_time": 95.29210877418518
- },
- {
- "timestamp": "2025-01-04T01:32:03.292108",
- "cpu_percent": 46.39,
- "ram_percent": 47.6,
- "ram_used_gb": 30.308143615722656,
- "gpu_memory_used": 1250.0,
- "relative_time": 96.40629982948303
- },
- {
- "timestamp": "2025-01-04T01:32:04.402400",
- "cpu_percent": 49.88,
- "ram_percent": 47.6,
- "ram_used_gb": 30.310047149658203,
- "gpu_memory_used": 1250.0,
- "relative_time": 97.51973557472229
- },
- {
- "timestamp": "2025-01-04T01:32:05.513450",
- "cpu_percent": 53.28,
- "ram_percent": 47.6,
- "ram_used_gb": 30.30374526977539,
- "gpu_memory_used": 1249.0,
- "relative_time": 98.62612318992615
- },
- {
- "timestamp": "2025-01-04T01:32:06.631627",
- "cpu_percent": 44.65,
- "ram_percent": 47.6,
- "ram_used_gb": 30.30333709716797,
- "gpu_memory_used": 1242.0,
- "relative_time": 99.73457670211792
- },
- {
- "timestamp": "2025-01-04T01:32:07.736449",
- "cpu_percent": 50.93,
- "ram_percent": 47.7,
- "ram_used_gb": 30.33118438720703,
- "gpu_memory_used": 1242.0,
- "relative_time": 100.85807871818542
- },
- {
- "timestamp": "2025-01-04T01:32:08.860429",
- "cpu_percent": 62.71,
- "ram_percent": 47.8,
- "ram_used_gb": 30.41672134399414,
- "gpu_memory_used": 1244.0,
- "relative_time": 102.08941197395325
- },
- {
- "timestamp": "2025-01-04T01:32:10.080974",
- "cpu_percent": 96.29,
- "ram_percent": 47.9,
- "ram_used_gb": 30.45757293701172,
- "gpu_memory_used": 1245.0,
- "relative_time": 103.18154048919678
- },
- {
- "timestamp": "2025-01-04T01:32:11.187912",
- "cpu_percent": 49.09,
- "ram_percent": 47.9,
- "ram_used_gb": 30.445499420166016,
- "gpu_memory_used": 1245.0,
- "relative_time": 104.30198311805725
- },
- {
- "timestamp": "2025-01-04T01:32:12.306213",
- "cpu_percent": 51.15,
- "ram_percent": 47.8,
- "ram_used_gb": 30.42266845703125,
- "gpu_memory_used": 1240.0,
- "relative_time": 105.43745422363281
- },
- {
- "timestamp": "2025-01-04T01:32:13.437791",
- "cpu_percent": 47.79,
- "ram_percent": 47.8,
- "ram_used_gb": 30.40296173095703,
- "gpu_memory_used": 1236.0,
- "relative_time": 106.55609393119812
- },
- {
- "timestamp": "2025-01-04T01:32:14.548441",
- "cpu_percent": 39.41,
- "ram_percent": 47.8,
- "ram_used_gb": 30.406475067138672,
- "gpu_memory_used": 1244.0,
- "relative_time": 107.67082047462463
- },
- {
- "timestamp": "2025-01-04T01:32:15.666526",
- "cpu_percent": 77.07,
- "ram_percent": 47.8,
- "ram_used_gb": 30.424407958984375,
- "gpu_memory_used": 1247.0,
- "relative_time": 108.7851665019989
- },
- {
- "timestamp": "2025-01-04T01:32:16.780793",
- "cpu_percent": 49.13,
- "ram_percent": 47.8,
- "ram_used_gb": 30.429065704345703,
- "gpu_memory_used": 1246.0,
- "relative_time": 109.88107633590698
- },
- {
- "timestamp": "2025-01-04T01:32:17.879071",
- "cpu_percent": 82.96,
- "ram_percent": 47.8,
- "ram_used_gb": 30.428447723388672,
- "gpu_memory_used": 1281.0,
- "relative_time": 111.02328372001648
- },
- {
- "timestamp": "2025-01-04T01:32:19.026978",
- "cpu_percent": 74.64,
- "ram_percent": 47.8,
- "ram_used_gb": 30.430500030517578,
- "gpu_memory_used": 1281.0,
- "relative_time": 112.15347504615784
- },
- {
- "timestamp": "2025-01-04T01:32:20.156784",
- "cpu_percent": 76.94,
- "ram_percent": 47.8,
- "ram_used_gb": 30.40774917602539,
- "gpu_memory_used": 1274.0,
- "relative_time": 113.31317591667175
- },
- {
- "timestamp": "2025-01-04T01:32:21.310871",
- "cpu_percent": 69.52,
- "ram_percent": 47.8,
- "ram_used_gb": 30.429115295410156,
- "gpu_memory_used": 1273.0,
- "relative_time": 114.42301273345947
- },
- {
- "timestamp": "2025-01-04T01:32:22.424508",
- "cpu_percent": 74.47,
- "ram_percent": 47.8,
- "ram_used_gb": 30.403045654296875,
- "gpu_memory_used": 1274.0,
- "relative_time": 115.52539491653442
- },
- {
- "timestamp": "2025-01-04T01:32:23.525673",
- "cpu_percent": 67.2,
- "ram_percent": 47.9,
- "ram_used_gb": 30.484474182128906,
- "gpu_memory_used": 1273.0,
- "relative_time": 116.61319661140442
- },
- {
- "timestamp": "2025-01-04T01:32:24.613302",
- "cpu_percent": 57.41,
- "ram_percent": 47.7,
- "ram_used_gb": 30.35879135131836,
- "gpu_memory_used": 1270.0,
- "relative_time": 117.72619676589966
- },
- {
- "timestamp": "2025-01-04T01:32:25.730732",
- "cpu_percent": 45.97,
- "ram_percent": 47.7,
- "ram_used_gb": 30.347335815429688,
- "gpu_memory_used": 1270.0,
- "relative_time": 118.84320116043091
- },
- {
- "timestamp": "2025-01-04T01:32:26.845420",
- "cpu_percent": 47.74,
- "ram_percent": 47.7,
- "ram_used_gb": 30.354007720947266,
- "gpu_memory_used": 1265.0,
- "relative_time": 119.96074485778809
- },
- {
- "timestamp": "2025-01-04T01:32:27.964248",
- "cpu_percent": 60.0,
- "ram_percent": 47.7,
- "ram_used_gb": 30.3675537109375,
- "gpu_memory_used": 1258.0,
- "relative_time": 121.09439873695374
- },
- {
- "timestamp": "2025-01-04T01:32:29.094542",
- "cpu_percent": 54.46,
- "ram_percent": 47.7,
- "ram_used_gb": 30.367305755615234,
- "gpu_memory_used": 1230.0,
- "relative_time": 122.24102592468262
- },
- {
- "timestamp": "2025-01-04T01:32:30.244200",
- "cpu_percent": 56.21,
- "ram_percent": 47.7,
- "ram_used_gb": 30.364959716796875,
- "gpu_memory_used": 1230.0,
- "relative_time": 123.34450554847717
- },
- {
- "timestamp": "2025-01-04T01:32:31.346103",
- "cpu_percent": 40.66,
- "ram_percent": 47.8,
- "ram_used_gb": 30.420738220214844,
- "gpu_memory_used": 1235.0,
- "relative_time": 124.46777892112732
- },
- {
- "timestamp": "2025-01-04T01:32:32.463710",
- "cpu_percent": 51.66,
- "ram_percent": 47.8,
- "ram_used_gb": 30.396198272705078,
- "gpu_memory_used": 1235.0,
- "relative_time": 125.57916116714478
- },
- {
- "timestamp": "2025-01-04T01:32:33.580811",
- "cpu_percent": 49.68,
- "ram_percent": 47.8,
- "ram_used_gb": 30.40151596069336,
- "gpu_memory_used": 1236.0,
- "relative_time": 126.6768786907196
- },
- {
- "timestamp": "2025-01-04T01:32:34.668960",
- "cpu_percent": 49.09,
- "ram_percent": 47.8,
- "ram_used_gb": 30.380916595458984,
- "gpu_memory_used": 1236.0,
- "relative_time": 127.73568296432495
- },
- {
- "timestamp": "2025-01-04T01:32:35.729484",
- "cpu_percent": 48.53,
- "ram_percent": 47.8,
- "ram_used_gb": 30.385761260986328,
- "gpu_memory_used": 1236.0,
- "relative_time": 128.85891699790955
- },
- {
- "timestamp": "2025-01-04T01:32:36.849812",
- "cpu_percent": 52.39,
- "ram_percent": 47.8,
- "ram_used_gb": 30.414752960205078,
- "gpu_memory_used": 1235.0,
- "relative_time": 129.9150390625
- },
- {
- "timestamp": "2025-01-04T01:32:37.919974",
- "cpu_percent": 46.89,
- "ram_percent": 47.9,
- "ram_used_gb": 30.450218200683594,
- "gpu_memory_used": 1235.0,
- "relative_time": 131.00502228736877
- },
- {
- "timestamp": "2025-01-04T01:32:39.008115",
- "cpu_percent": 46.59,
- "ram_percent": 47.8,
- "ram_used_gb": 30.436458587646484,
- "gpu_memory_used": 1235.0,
- "relative_time": 132.10191130638123
- },
- {
- "timestamp": "2025-01-04T01:32:40.095463",
- "cpu_percent": 45.76,
- "ram_percent": 47.9,
- "ram_used_gb": 30.443893432617188,
- "gpu_memory_used": 1224.0,
- "relative_time": 133.26839780807495
- },
- {
- "timestamp": "2025-01-04T01:32:41.265737",
- "cpu_percent": 56.94,
- "ram_percent": 47.8,
- "ram_used_gb": 30.41216278076172,
- "gpu_memory_used": 1224.0,
- "relative_time": 134.32926607131958
- },
- {
- "timestamp": "2025-01-04T01:32:42.321015",
- "cpu_percent": 40.36,
- "ram_percent": 47.8,
- "ram_used_gb": 30.386669158935547,
- "gpu_memory_used": 1224.0,
- "relative_time": 135.40537309646606
- },
- {
- "timestamp": "2025-01-04T01:32:43.400382",
- "cpu_percent": 44.51,
- "ram_percent": 47.8,
- "ram_used_gb": 30.39049530029297,
- "gpu_memory_used": 1224.0,
- "relative_time": 136.52469301223755
- },
- {
- "timestamp": "2025-01-04T01:32:44.524119",
- "cpu_percent": 50.29,
- "ram_percent": 47.8,
- "ram_used_gb": 30.38903045654297,
- "gpu_memory_used": 1220.0,
- "relative_time": 137.60522270202637
- },
- {
- "timestamp": "2025-01-04T01:32:45.599869",
- "cpu_percent": 51.69,
- "ram_percent": 47.8,
- "ram_used_gb": 30.378681182861328,
- "gpu_memory_used": 1213.0,
- "relative_time": 138.7130560874939
- },
- {
- "timestamp": "2025-01-04T01:32:46.711674",
- "cpu_percent": 49.55,
- "ram_percent": 47.7,
- "ram_used_gb": 30.34076690673828,
- "gpu_memory_used": 1213.0,
- "relative_time": 139.8105547428131
- },
- {
- "timestamp": "2025-01-04T01:32:47.813091",
- "cpu_percent": 44.5,
- "ram_percent": 47.7,
- "ram_used_gb": 30.343746185302734,
- "gpu_memory_used": 1213.0,
- "relative_time": 140.91643166542053
- },
- {
- "timestamp": "2025-01-04T01:32:48.917679",
- "cpu_percent": 43.76,
- "ram_percent": 47.7,
- "ram_used_gb": 30.354793548583984,
- "gpu_memory_used": 1213.0,
- "relative_time": 142.04264283180237
- },
- {
- "timestamp": "2025-01-04T01:32:50.047653",
- "cpu_percent": 48.41,
- "ram_percent": 47.7,
- "ram_used_gb": 30.361080169677734,
- "gpu_memory_used": 1219.0,
- "relative_time": 143.14667677879333
- },
- {
- "timestamp": "2025-01-04T01:32:51.153490",
- "cpu_percent": 57.01,
- "ram_percent": 47.9,
- "ram_used_gb": 30.4390869140625,
- "gpu_memory_used": 1232.0,
- "relative_time": 144.2709481716156
- },
- {
- "timestamp": "2025-01-04T01:32:52.272196",
- "cpu_percent": 54.69,
- "ram_percent": 47.9,
- "ram_used_gb": 30.46664047241211,
- "gpu_memory_used": 1236.0,
- "relative_time": 145.36933588981628
- },
- {
- "timestamp": "2025-01-04T01:32:53.374563",
- "cpu_percent": 51.37,
- "ram_percent": 47.9,
- "ram_used_gb": 30.487388610839844,
- "gpu_memory_used": 1245.0,
- "relative_time": 146.4400930404663
- },
- {
- "timestamp": "2025-01-04T01:32:54.445178",
- "cpu_percent": 47.76,
- "ram_percent": 47.9,
- "ram_used_gb": 30.475635528564453,
- "gpu_memory_used": 1241.0,
- "relative_time": 147.5295627117157
- },
- {
- "timestamp": "2025-01-04T01:32:55.520495",
- "cpu_percent": 49.24,
- "ram_percent": 47.9,
- "ram_used_gb": 30.47634506225586,
- "gpu_memory_used": 1236.0,
- "relative_time": 148.5926468372345
- },
- {
- "timestamp": "2025-01-04T01:32:56.591995",
- "cpu_percent": 53.63,
- "ram_percent": 47.9,
- "ram_used_gb": 30.49687957763672,
- "gpu_memory_used": 1251.0,
- "relative_time": 149.72928547859192
- },
- {
- "timestamp": "2025-01-04T01:32:57.727346",
- "cpu_percent": 65.04,
- "ram_percent": 48.1,
- "ram_used_gb": 30.59111785888672,
- "gpu_memory_used": 1251.0,
- "relative_time": 150.86237502098083
- },
- {
- "timestamp": "2025-01-04T01:32:58.862812",
- "cpu_percent": 71.05,
- "ram_percent": 48.2,
- "ram_used_gb": 30.633731842041016,
- "gpu_memory_used": 1263.0,
- "relative_time": 152.03348207473755
- },
- {
- "timestamp": "2025-01-04T01:33:00.037915",
- "cpu_percent": 85.87,
- "ram_percent": 48.2,
- "ram_used_gb": 30.68001937866211,
- "gpu_memory_used": 1253.0,
- "relative_time": 153.1551034450531
- },
- {
- "timestamp": "2025-01-04T01:33:01.158119",
- "cpu_percent": 59.8,
- "ram_percent": 48.2,
- "ram_used_gb": 30.69198989868164,
- "gpu_memory_used": 1252.0,
- "relative_time": 154.2606840133667
- },
- {
- "timestamp": "2025-01-04T01:33:02.262390",
- "cpu_percent": 45.33,
+ "timestamp": "2025-01-06T02:00:00.714760",
+ "cpu_percent": 100.0,
"ram_percent": 48.3,
- "ram_used_gb": 30.743839263916016,
- "gpu_memory_used": 1252.0,
- "relative_time": 155.3663365840912
+ "ram_used_gb": 30.697307586669922,
+ "gpu_memory_used": 2069.0,
+ "relative_time": 39.4050030708313
},
{
- "timestamp": "2025-01-04T01:33:03.369936",
- "cpu_percent": 35.41,
- "ram_percent": 48.2,
- "ram_used_gb": 30.68472671508789,
- "gpu_memory_used": 1252.0,
- "relative_time": 156.4842345714569
+ "timestamp": "2025-01-06T02:00:01.888028",
+ "cpu_percent": 95.29,
+ "ram_percent": 48.0,
+ "ram_used_gb": 30.559246063232422,
+ "gpu_memory_used": 2069.0,
+ "relative_time": 40.55751347541809
},
{
- "timestamp": "2025-01-04T01:33:04.488089",
- "cpu_percent": 47.22,
- "ram_percent": 48.4,
- "ram_used_gb": 30.78485870361328,
- "gpu_memory_used": 1254.0,
- "relative_time": 157.58868670463562
- },
- {
- "timestamp": "2025-01-04T01:33:05.592303",
- "cpu_percent": 36.14,
- "ram_percent": 48.5,
- "ram_used_gb": 30.87320327758789,
- "gpu_memory_used": 1254.0,
- "relative_time": 158.71629786491394
- },
- {
- "timestamp": "2025-01-04T01:33:06.721317",
- "cpu_percent": 38.46,
- "ram_percent": 48.2,
- "ram_used_gb": 30.668170928955078,
- "gpu_memory_used": 1254.0,
- "relative_time": 159.82655477523804
- },
- {
- "timestamp": "2025-01-04T01:33:07.827187",
- "cpu_percent": 35.81,
- "ram_percent": 48.4,
- "ram_used_gb": 30.777912139892578,
- "gpu_memory_used": 1254.0,
- "relative_time": 160.94229197502136
- },
- {
- "timestamp": "2025-01-04T01:33:08.943035",
- "cpu_percent": 39.24,
- "ram_percent": 48.5,
- "ram_used_gb": 30.86941146850586,
- "gpu_memory_used": 1254.0,
- "relative_time": 162.06378889083862
- },
- {
- "timestamp": "2025-01-04T01:33:10.063208",
- "cpu_percent": 51.52,
+ "timestamp": "2025-01-06T02:00:03.044225",
+ "cpu_percent": 96.41,
"ram_percent": 48.1,
- "ram_used_gb": 30.624229431152344,
- "gpu_memory_used": 1254.0,
- "relative_time": 163.16198420524597
+ "ram_used_gb": 30.57846450805664,
+ "gpu_memory_used": 2070.0,
+ "relative_time": 41.67921280860901
},
{
- "timestamp": "2025-01-04T01:33:11.163067",
- "cpu_percent": 48.99,
+ "timestamp": "2025-01-06T02:00:04.173396",
+ "cpu_percent": 98.56,
"ram_percent": 48.1,
- "ram_used_gb": 30.612281799316406,
- "gpu_memory_used": 1254.0,
- "relative_time": 164.26579809188843
+ "ram_used_gb": 30.579666137695312,
+ "gpu_memory_used": 2070.0,
+ "relative_time": 42.861982583999634
},
{
- "timestamp": "2025-01-04T01:33:12.266417",
+ "timestamp": "2025-01-06T02:00:05.356206",
+ "cpu_percent": 98.14,
+ "ram_percent": 48.3,
+ "ram_used_gb": 30.72429656982422,
+ "gpu_memory_used": 2070.0,
+ "relative_time": 44.000171184539795
+ },
+ {
+ "timestamp": "2025-01-06T02:00:06.485051",
+ "cpu_percent": 95.54,
+ "ram_percent": 48.0,
+ "ram_used_gb": 30.531700134277344,
+ "gpu_memory_used": 2069.0,
+ "relative_time": 45.14318823814392
+ },
+ {
+ "timestamp": "2025-01-06T02:00:07.624181",
+ "cpu_percent": 96.77,
+ "ram_percent": 48.1,
+ "ram_used_gb": 30.568119049072266,
+ "gpu_memory_used": 2070.0,
+ "relative_time": 46.264710903167725
+ },
+ {
+ "timestamp": "2025-01-06T02:00:08.758279",
+ "cpu_percent": 99.74,
+ "ram_percent": 48.0,
+ "ram_used_gb": 30.530475616455078,
+ "gpu_memory_used": 2071.0,
+ "relative_time": 47.39181351661682
+ },
+ {
+ "timestamp": "2025-01-06T02:00:09.876762",
+ "cpu_percent": 98.61,
+ "ram_percent": 48.1,
+ "ram_used_gb": 30.617321014404297,
+ "gpu_memory_used": 2072.0,
+ "relative_time": 48.66594886779785
+ },
+ {
+ "timestamp": "2025-01-06T02:00:11.145450",
+ "cpu_percent": 100.0,
+ "ram_percent": 48.2,
+ "ram_used_gb": 30.632938385009766,
+ "gpu_memory_used": 2071.0,
+ "relative_time": 49.81949210166931
+ },
+ {
+ "timestamp": "2025-01-06T02:00:12.300318",
+ "cpu_percent": 100.0,
+ "ram_percent": 48.3,
+ "ram_used_gb": 30.732288360595703,
+ "gpu_memory_used": 2068.0,
+ "relative_time": 50.97749900817871
+ },
+ {
+ "timestamp": "2025-01-06T02:00:13.467022",
+ "cpu_percent": 98.76,
+ "ram_percent": 48.1,
+ "ram_used_gb": 30.61214828491211,
+ "gpu_memory_used": 2069.0,
+ "relative_time": 52.09201526641846
+ },
+ {
+ "timestamp": "2025-01-06T02:00:14.576042",
+ "cpu_percent": 100.0,
+ "ram_percent": 48.3,
+ "ram_used_gb": 30.70488739013672,
+ "gpu_memory_used": 2083.0,
+ "relative_time": 53.26009011268616
+ },
+ {
+ "timestamp": "2025-01-06T02:00:15.749641",
+ "cpu_percent": 99.92,
+ "ram_percent": 48.3,
+ "ram_used_gb": 30.736248016357422,
+ "gpu_memory_used": 2082.0,
+ "relative_time": 54.40339493751526
+ },
+ {
+ "timestamp": "2025-01-06T02:00:16.896314",
+ "cpu_percent": 85.65,
+ "ram_percent": 48.1,
+ "ram_used_gb": 30.594017028808594,
+ "gpu_memory_used": 2082.0,
+ "relative_time": 55.51317882537842
+ },
+ {
+ "timestamp": "2025-01-06T02:00:17.999548",
+ "cpu_percent": 89.3,
+ "ram_percent": 48.1,
+ "ram_used_gb": 30.596832275390625,
+ "gpu_memory_used": 2082.0,
+ "relative_time": 56.65741968154907
+ },
+ {
+ "timestamp": "2025-01-06T02:00:19.138009",
+ "cpu_percent": 29.01,
+ "ram_percent": 48.1,
+ "ram_used_gb": 30.623390197753906,
+ "gpu_memory_used": 2073.0,
+ "relative_time": 57.802518367767334
+ },
+ {
+ "timestamp": "2025-01-06T02:00:20.291026",
+ "cpu_percent": 67.97,
+ "ram_percent": 48.2,
+ "ram_used_gb": 30.647132873535156,
+ "gpu_memory_used": 2073.0,
+ "relative_time": 58.922008991241455
+ },
+ {
+ "timestamp": "2025-01-06T02:00:21.412675",
+ "cpu_percent": 82.7,
+ "ram_percent": 48.2,
+ "ram_used_gb": 30.6431884765625,
+ "gpu_memory_used": 2071.0,
+ "relative_time": 60.05585193634033
+ },
+ {
+ "timestamp": "2025-01-06T02:00:22.550916",
+ "cpu_percent": 77.03,
+ "ram_percent": 48.2,
+ "ram_used_gb": 30.654644012451172,
+ "gpu_memory_used": 2069.0,
+ "relative_time": 61.18237376213074
+ },
+ {
+ "timestamp": "2025-01-06T02:00:23.670003",
+ "cpu_percent": 85.95,
+ "ram_percent": 48.2,
+ "ram_used_gb": 30.648406982421875,
+ "gpu_memory_used": 2069.0,
+ "relative_time": 62.266594648361206
+ },
+ {
+ "timestamp": "2025-01-06T02:00:24.747817",
+ "cpu_percent": 78.11,
+ "ram_percent": 48.2,
+ "ram_used_gb": 30.651287078857422,
+ "gpu_memory_used": 2062.0,
+ "relative_time": 63.40120267868042
+ },
+ {
+ "timestamp": "2025-01-06T02:00:25.895696",
+ "cpu_percent": 83.28,
+ "ram_percent": 48.2,
+ "ram_used_gb": 30.665794372558594,
+ "gpu_memory_used": 2062.0,
+ "relative_time": 64.55349016189575
+ },
+ {
+ "timestamp": "2025-01-06T02:00:27.046675",
+ "cpu_percent": 84.96,
+ "ram_percent": 48.2,
+ "ram_used_gb": 30.64820098876953,
+ "gpu_memory_used": 2062.0,
+ "relative_time": 65.66735076904297
+ },
+ {
+ "timestamp": "2025-01-06T02:00:28.152257",
+ "cpu_percent": 84.77,
+ "ram_percent": 48.2,
+ "ram_used_gb": 30.66122817993164,
+ "gpu_memory_used": 2074.0,
+ "relative_time": 66.77637505531311
+ },
+ {
+ "timestamp": "2025-01-06T02:00:29.261199",
+ "cpu_percent": 99.39,
+ "ram_percent": 48.2,
+ "ram_used_gb": 30.667972564697266,
+ "gpu_memory_used": 2082.0,
+ "relative_time": 67.92725157737732
+ },
+ {
+ "timestamp": "2025-01-06T02:00:30.411510",
+ "cpu_percent": 85.6,
+ "ram_percent": 48.2,
+ "ram_used_gb": 30.687171936035156,
+ "gpu_memory_used": 2082.0,
+ "relative_time": 69.0316333770752
+ },
+ {
+ "timestamp": "2025-01-06T02:00:31.521991",
+ "cpu_percent": 94.26,
+ "ram_percent": 48.2,
+ "ram_used_gb": 30.66115951538086,
+ "gpu_memory_used": 2104.0,
+ "relative_time": 70.16249108314514
+ },
+ {
+ "timestamp": "2025-01-06T02:00:32.654440",
+ "cpu_percent": 91.09,
+ "ram_percent": 48.2,
+ "ram_used_gb": 30.67467498779297,
+ "gpu_memory_used": 2104.0,
+ "relative_time": 71.28759574890137
+ },
+ {
+ "timestamp": "2025-01-06T02:00:33.775019",
+ "cpu_percent": 78.39,
+ "ram_percent": 48.2,
+ "ram_used_gb": 30.673912048339844,
+ "gpu_memory_used": 2101.0,
+ "relative_time": 72.43123960494995
+ },
+ {
+ "timestamp": "2025-01-06T02:00:34.911532",
+ "cpu_percent": 65.79,
+ "ram_percent": 48.2,
+ "ram_used_gb": 30.675281524658203,
+ "gpu_memory_used": 2097.0,
+ "relative_time": 73.5528736114502
+ },
+ {
+ "timestamp": "2025-01-06T02:00:36.044747",
+ "cpu_percent": 84.46,
+ "ram_percent": 48.2,
+ "ram_used_gb": 30.673004150390625,
+ "gpu_memory_used": 2097.0,
+ "relative_time": 74.67303442955017
+ },
+ {
+ "timestamp": "2025-01-06T02:00:37.166882",
+ "cpu_percent": 80.36,
+ "ram_percent": 48.2,
+ "ram_used_gb": 30.672035217285156,
+ "gpu_memory_used": 2085.0,
+ "relative_time": 75.80277967453003
+ },
+ {
+ "timestamp": "2025-01-06T02:00:38.287897",
+ "cpu_percent": 82.75,
+ "ram_percent": 48.2,
+ "ram_used_gb": 30.677658081054688,
+ "gpu_memory_used": 2071.0,
+ "relative_time": 76.90284442901611
+ },
+ {
+ "timestamp": "2025-01-06T02:00:39.391528",
+ "cpu_percent": 81.51,
+ "ram_percent": 48.2,
+ "ram_used_gb": 30.668991088867188,
+ "gpu_memory_used": 2071.0,
+ "relative_time": 78.05355834960938
+ },
+ {
+ "timestamp": "2025-01-06T02:00:40.541574",
+ "cpu_percent": 89.08,
+ "ram_percent": 48.2,
+ "ram_used_gb": 30.680435180664062,
+ "gpu_memory_used": 2070.0,
+ "relative_time": 79.18938493728638
+ },
+ {
+ "timestamp": "2025-01-06T02:00:41.681131",
+ "cpu_percent": 77.08,
+ "ram_percent": 48.2,
+ "ram_used_gb": 30.680683135986328,
+ "gpu_memory_used": 2075.0,
+ "relative_time": 80.32201266288757
+ },
+ {
+ "timestamp": "2025-01-06T02:00:42.815968",
+ "cpu_percent": 92.74,
+ "ram_percent": 48.0,
+ "ram_used_gb": 30.51242446899414,
+ "gpu_memory_used": 2075.0,
+ "relative_time": 81.43525695800781
+ },
+ {
+ "timestamp": "2025-01-06T02:00:43.925903",
+ "cpu_percent": 69.67,
+ "ram_percent": 48.0,
+ "ram_used_gb": 30.508132934570312,
+ "gpu_memory_used": 2073.0,
+ "relative_time": 82.55602645874023
+ },
+ {
+ "timestamp": "2025-01-06T02:00:45.048108",
+ "cpu_percent": 76.51,
+ "ram_percent": 47.9,
+ "ram_used_gb": 30.498050689697266,
+ "gpu_memory_used": 2073.0,
+ "relative_time": 83.67183542251587
+ },
+ {
+ "timestamp": "2025-01-06T02:00:46.157342",
+ "cpu_percent": 71.96,
+ "ram_percent": 47.9,
+ "ram_used_gb": 30.499706268310547,
+ "gpu_memory_used": 2073.0,
+ "relative_time": 84.79261541366577
+ },
+ {
+ "timestamp": "2025-01-06T02:00:47.282643",
+ "cpu_percent": 70.89,
+ "ram_percent": 47.9,
+ "ram_used_gb": 30.492206573486328,
+ "gpu_memory_used": 2073.0,
+ "relative_time": 85.89637041091919
+ },
+ {
+ "timestamp": "2025-01-06T02:00:48.385779",
+ "cpu_percent": 72.06,
+ "ram_percent": 47.9,
+ "ram_used_gb": 30.494159698486328,
+ "gpu_memory_used": 2072.0,
+ "relative_time": 86.99722957611084
+ },
+ {
+ "timestamp": "2025-01-06T02:00:49.489042",
+ "cpu_percent": 69.02,
+ "ram_percent": 48.0,
+ "ram_used_gb": 30.5125732421875,
+ "gpu_memory_used": 2072.0,
+ "relative_time": 88.13648676872253
+ },
+ {
+ "timestamp": "2025-01-06T02:00:50.619717",
+ "cpu_percent": 89.34,
+ "ram_percent": 48.0,
+ "ram_used_gb": 30.516311645507812,
+ "gpu_memory_used": 2072.0,
+ "relative_time": 89.25186491012573
+ },
+ {
+ "timestamp": "2025-01-06T02:00:51.730818",
+ "cpu_percent": 88.79,
+ "ram_percent": 48.0,
+ "ram_used_gb": 30.56262969970703,
+ "gpu_memory_used": 2067.0,
+ "relative_time": 90.39395761489868
+ },
+ {
+ "timestamp": "2025-01-06T02:00:52.879559",
+ "cpu_percent": 81.64,
+ "ram_percent": 48.2,
+ "ram_used_gb": 30.665996551513672,
+ "gpu_memory_used": 2078.0,
+ "relative_time": 91.57850098609924
+ },
+ {
+ "timestamp": "2025-01-06T02:00:54.072796",
+ "cpu_percent": 97.22,
+ "ram_percent": 48.3,
+ "ram_used_gb": 30.707321166992188,
+ "gpu_memory_used": 2079.0,
+ "relative_time": 92.74180054664612
+ },
+ {
+ "timestamp": "2025-01-06T02:00:55.221833",
+ "cpu_percent": 79.49,
+ "ram_percent": 48.1,
+ "ram_used_gb": 30.58167266845703,
+ "gpu_memory_used": 2079.0,
+ "relative_time": 93.87428188323975
+ },
+ {
+ "timestamp": "2025-01-06T02:00:56.368456",
+ "cpu_percent": 89.45,
+ "ram_percent": 48.2,
+ "ram_used_gb": 30.635387420654297,
+ "gpu_memory_used": 2079.0,
+ "relative_time": 95.03554892539978
+ },
+ {
+ "timestamp": "2025-01-06T02:00:57.518791",
+ "cpu_percent": 82.44,
+ "ram_percent": 48.2,
+ "ram_used_gb": 30.68875503540039,
+ "gpu_memory_used": 2075.0,
+ "relative_time": 96.16753339767456
+ },
+ {
+ "timestamp": "2025-01-06T02:00:58.651967",
+ "cpu_percent": 87.59,
+ "ram_percent": 48.3,
+ "ram_used_gb": 30.73841094970703,
+ "gpu_memory_used": 2075.0,
+ "relative_time": 97.31249833106995
+ },
+ {
+ "timestamp": "2025-01-06T02:00:59.802502",
+ "cpu_percent": 72.6,
+ "ram_percent": 48.5,
+ "ram_used_gb": 30.828655242919922,
+ "gpu_memory_used": 2074.0,
+ "relative_time": 98.44322228431702
+ },
+ {
+ "timestamp": "2025-01-06T02:01:00.930471",
+ "cpu_percent": 92.01,
+ "ram_percent": 48.6,
+ "ram_used_gb": 30.916194915771484,
+ "gpu_memory_used": 2057.0,
+ "relative_time": 99.55358910560608
+ },
+ {
+ "timestamp": "2025-01-06T02:01:02.046338",
+ "cpu_percent": 77.78,
+ "ram_percent": 48.7,
+ "ram_used_gb": 30.970348358154297,
+ "gpu_memory_used": 2059.0,
+ "relative_time": 100.7326455116272
+ },
+ {
+ "timestamp": "2025-01-06T02:01:03.221413",
+ "cpu_percent": 87.99,
+ "ram_percent": 48.8,
+ "ram_used_gb": 31.030376434326172,
+ "gpu_memory_used": 2065.0,
+ "relative_time": 101.87276792526245
+ },
+ {
+ "timestamp": "2025-01-06T02:01:04.365536",
+ "cpu_percent": 83.47,
+ "ram_percent": 48.8,
+ "ram_used_gb": 31.04818344116211,
+ "gpu_memory_used": 2066.0,
+ "relative_time": 103.00344848632812
+ },
+ {
+ "timestamp": "2025-01-06T02:01:05.495547",
+ "cpu_percent": 73.88,
+ "ram_percent": 48.3,
+ "ram_used_gb": 30.707366943359375,
+ "gpu_memory_used": 2066.0,
+ "relative_time": 104.14199256896973
+ },
+ {
+ "timestamp": "2025-01-06T02:01:06.627593",
+ "cpu_percent": 81.92,
+ "ram_percent": 48.3,
+ "ram_used_gb": 30.735427856445312,
+ "gpu_memory_used": 2065.0,
+ "relative_time": 105.27145409584045
+ },
+ {
+ "timestamp": "2025-01-06T02:01:07.762940",
+ "cpu_percent": 93.88,
+ "ram_percent": 48.3,
+ "ram_used_gb": 30.71178436279297,
+ "gpu_memory_used": 2073.0,
+ "relative_time": 106.42445015907288
+ },
+ {
+ "timestamp": "2025-01-06T02:01:08.908747",
+ "cpu_percent": 78.18,
+ "ram_percent": 48.2,
+ "ram_used_gb": 30.685108184814453,
+ "gpu_memory_used": 2073.0,
+ "relative_time": 107.53882956504822
+ },
+ {
+ "timestamp": "2025-01-06T02:01:10.023117",
+ "cpu_percent": 80.25,
+ "ram_percent": 48.3,
+ "ram_used_gb": 30.693721771240234,
+ "gpu_memory_used": 2073.0,
+ "relative_time": 108.62077617645264
+ },
+ {
+ "timestamp": "2025-01-06T02:01:11.125442",
+ "cpu_percent": 74.96,
+ "ram_percent": 45.8,
+ "ram_used_gb": 29.10287094116211,
+ "gpu_memory_used": 1121.0,
+ "relative_time": 109.79581832885742
+ },
+ {
+ "timestamp": "2025-01-06T02:01:12.277407",
+ "cpu_percent": 82.89,
+ "ram_percent": 45.6,
+ "ram_used_gb": 28.987510681152344,
+ "gpu_memory_used": 1115.0,
+ "relative_time": 110.89319515228271
+ },
+ {
+ "timestamp": "2025-01-06T02:01:13.382871",
+ "cpu_percent": 79.66,
+ "ram_percent": 45.6,
+ "ram_used_gb": 28.993927001953125,
+ "gpu_memory_used": 1119.0,
+ "relative_time": 112.00745177268982
+ },
+ {
+ "timestamp": "2025-01-06T02:01:14.498991",
+ "cpu_percent": 86.41,
+ "ram_percent": 45.6,
+ "ram_used_gb": 29.035633087158203,
+ "gpu_memory_used": 1119.0,
+ "relative_time": 113.1147940158844
+ },
+ {
+ "timestamp": "2025-01-06T02:01:15.600091",
+ "cpu_percent": 94.06,
+ "ram_percent": 45.6,
+ "ram_used_gb": 28.978424072265625,
+ "gpu_memory_used": 1115.0,
+ "relative_time": 114.23285436630249
+ },
+ {
+ "timestamp": "2025-01-06T02:01:16.722096",
+ "cpu_percent": 72.28,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.96765899658203,
+ "gpu_memory_used": 1113.0,
+ "relative_time": 115.35184216499329
+ },
+ {
+ "timestamp": "2025-01-06T02:01:17.840265",
+ "cpu_percent": 74.65,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.946372985839844,
+ "gpu_memory_used": 1115.0,
+ "relative_time": 116.5044412612915
+ },
+ {
+ "timestamp": "2025-01-06T02:01:18.991455",
+ "cpu_percent": 87.44,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.957916259765625,
+ "gpu_memory_used": 1123.0,
+ "relative_time": 117.65444111824036
+ },
+ {
+ "timestamp": "2025-01-06T02:01:20.139485",
+ "cpu_percent": 86.77,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.956340789794922,
+ "gpu_memory_used": 1126.0,
+ "relative_time": 118.77146029472351
+ },
+ {
+ "timestamp": "2025-01-06T02:01:21.266216",
+ "cpu_percent": 75.49,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.958038330078125,
+ "gpu_memory_used": 1126.0,
+ "relative_time": 119.87909436225891
+ },
+ {
+ "timestamp": "2025-01-06T02:01:22.363169",
+ "cpu_percent": 79.51,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.949172973632812,
+ "gpu_memory_used": 1124.0,
+ "relative_time": 121.01380467414856
+ },
+ {
+ "timestamp": "2025-01-06T02:01:23.498096",
+ "cpu_percent": 85.16,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.958885192871094,
+ "gpu_memory_used": 1124.0,
+ "relative_time": 122.11237931251526
+ },
+ {
+ "timestamp": "2025-01-06T02:01:24.603852",
+ "cpu_percent": 67.16,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.936931610107422,
+ "gpu_memory_used": 1124.0,
+ "relative_time": 123.20061945915222
+ },
+ {
+ "timestamp": "2025-01-06T02:01:25.691913",
+ "cpu_percent": 68.53,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.933303833007812,
+ "gpu_memory_used": 1118.0,
+ "relative_time": 124.33519053459167
+ },
+ {
+ "timestamp": "2025-01-06T02:01:26.828745",
+ "cpu_percent": 80.47,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.923110961914062,
+ "gpu_memory_used": 1118.0,
+ "relative_time": 125.48865795135498
+ },
+ {
+ "timestamp": "2025-01-06T02:01:27.979825",
+ "cpu_percent": 79.44,
+ "ram_percent": 45.4,
+ "ram_used_gb": 28.900054931640625,
+ "gpu_memory_used": 1125.0,
+ "relative_time": 126.59582543373108
+ },
+ {
+ "timestamp": "2025-01-06T02:01:29.080446",
+ "cpu_percent": 72.07,
+ "ram_percent": 45.4,
+ "ram_used_gb": 28.896743774414062,
+ "gpu_memory_used": 1125.0,
+ "relative_time": 127.70408391952515
+ },
+ {
+ "timestamp": "2025-01-06T02:01:30.187503",
+ "cpu_percent": 79.71,
+ "ram_percent": 45.4,
+ "ram_used_gb": 28.90612030029297,
+ "gpu_memory_used": 1126.0,
+ "relative_time": 128.84098839759827
+ },
+ {
+ "timestamp": "2025-01-06T02:01:31.327260",
+ "cpu_percent": 82.71,
+ "ram_percent": 45.4,
+ "ram_used_gb": 28.899269104003906,
+ "gpu_memory_used": 1126.0,
+ "relative_time": 129.94981002807617
+ },
+ {
+ "timestamp": "2025-01-06T02:01:32.435487",
+ "cpu_percent": 69.43,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.913204193115234,
+ "gpu_memory_used": 1124.0,
+ "relative_time": 131.05302381515503
+ },
+ {
+ "timestamp": "2025-01-06T02:01:33.538589",
+ "cpu_percent": 69.85,
+ "ram_percent": 45.4,
+ "ram_used_gb": 28.902801513671875,
+ "gpu_memory_used": 1117.0,
+ "relative_time": 132.18232107162476
+ },
+ {
+ "timestamp": "2025-01-06T02:01:34.676977",
+ "cpu_percent": 76.86,
+ "ram_percent": 45.4,
+ "ram_used_gb": 28.90363311767578,
+ "gpu_memory_used": 1113.0,
+ "relative_time": 133.3194751739502
+ },
+ {
+ "timestamp": "2025-01-06T02:01:35.813043",
+ "cpu_percent": 92.46,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.924484252929688,
+ "gpu_memory_used": 1111.0,
+ "relative_time": 134.4480812549591
+ },
+ {
+ "timestamp": "2025-01-06T02:01:36.934534",
+ "cpu_percent": 91.6,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.93694305419922,
+ "gpu_memory_used": 1112.0,
+ "relative_time": 135.552565574646
+ },
+ {
+ "timestamp": "2025-01-06T02:01:38.046439",
+ "cpu_percent": 87.57,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.928115844726562,
+ "gpu_memory_used": 1112.0,
+ "relative_time": 136.66557025909424
+ },
+ {
+ "timestamp": "2025-01-06T02:01:39.150774",
+ "cpu_percent": 71.04,
+ "ram_percent": 45.4,
+ "ram_used_gb": 28.89849853515625,
+ "gpu_memory_used": 1112.0,
+ "relative_time": 137.79479718208313
+ },
+ {
+ "timestamp": "2025-01-06T02:01:40.286829",
+ "cpu_percent": 72.61,
+ "ram_percent": 45.4,
+ "ram_used_gb": 28.89031219482422,
+ "gpu_memory_used": 1111.0,
+ "relative_time": 138.92065834999084
+ },
+ {
+ "timestamp": "2025-01-06T02:01:41.411672",
+ "cpu_percent": 71.29,
+ "ram_percent": 45.4,
+ "ram_used_gb": 28.862991333007812,
+ "gpu_memory_used": 1111.0,
+ "relative_time": 140.02822160720825
+ },
+ {
+ "timestamp": "2025-01-06T02:01:42.514155",
+ "cpu_percent": 73.17,
+ "ram_percent": 45.4,
+ "ram_used_gb": 28.872455596923828,
+ "gpu_memory_used": 1112.0,
+ "relative_time": 141.16313910484314
+ },
+ {
+ "timestamp": "2025-01-06T02:01:43.647529",
+ "cpu_percent": 79.79,
+ "ram_percent": 45.4,
+ "ram_used_gb": 28.88043212890625,
+ "gpu_memory_used": 1107.0,
+ "relative_time": 142.30250215530396
+ },
+ {
+ "timestamp": "2025-01-06T02:01:44.783347",
+ "cpu_percent": 80.66,
+ "ram_percent": 45.4,
+ "ram_used_gb": 28.876739501953125,
+ "gpu_memory_used": 1107.0,
+ "relative_time": 143.39962935447693
+ },
+ {
+ "timestamp": "2025-01-06T02:01:45.887340",
+ "cpu_percent": 53.24,
+ "ram_percent": 45.4,
+ "ram_used_gb": 28.876079559326172,
+ "gpu_memory_used": 1107.0,
+ "relative_time": 144.51881790161133
+ },
+ {
+ "timestamp": "2025-01-06T02:01:47.006876",
+ "cpu_percent": 76.76,
+ "ram_percent": 45.4,
+ "ram_used_gb": 28.878875732421875,
+ "gpu_memory_used": 1106.0,
+ "relative_time": 145.62467503547668
+ },
+ {
+ "timestamp": "2025-01-06T02:01:48.112999",
+ "cpu_percent": 87.83,
+ "ram_percent": 45.4,
+ "ram_used_gb": 28.910343170166016,
+ "gpu_memory_used": 1109.0,
+ "relative_time": 146.79565000534058
+ },
+ {
+ "timestamp": "2025-01-06T02:01:49.275639",
+ "cpu_percent": 78.68,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.941452026367188,
+ "gpu_memory_used": 1112.0,
+ "relative_time": 147.9065911769867
+ },
+ {
+ "timestamp": "2025-01-06T02:01:50.392507",
+ "cpu_percent": 92.35,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.93001937866211,
+ "gpu_memory_used": 1111.0,
+ "relative_time": 149.0513255596161
+ },
+ {
+ "timestamp": "2025-01-06T02:01:51.531827",
+ "cpu_percent": 74.92,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.972396850585938,
+ "gpu_memory_used": 1111.0,
+ "relative_time": 150.190589427948
+ },
+ {
+ "timestamp": "2025-01-06T02:01:52.681259",
+ "cpu_percent": 83.39,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.97113800048828,
+ "gpu_memory_used": 1109.0,
+ "relative_time": 151.35132122039795
+ },
+ {
+ "timestamp": "2025-01-06T02:01:53.835701",
+ "cpu_percent": 76.66,
+ "ram_percent": 45.6,
+ "ram_used_gb": 29.026382446289062,
+ "gpu_memory_used": 1109.0,
+ "relative_time": 152.50396990776062
+ },
+ {
+ "timestamp": "2025-01-06T02:01:54.984694",
+ "cpu_percent": 90.89,
+ "ram_percent": 45.6,
+ "ram_used_gb": 28.993335723876953,
+ "gpu_memory_used": 1117.0,
+ "relative_time": 153.65559840202332
+ },
+ {
+ "timestamp": "2025-01-06T02:01:56.136412",
+ "cpu_percent": 87.1,
+ "ram_percent": 45.6,
+ "ram_used_gb": 28.996482849121094,
+ "gpu_memory_used": 1114.0,
+ "relative_time": 154.75753116607666
+ },
+ {
+ "timestamp": "2025-01-06T02:01:57.248956",
+ "cpu_percent": 94.48,
+ "ram_percent": 45.6,
+ "ram_used_gb": 29.01409912109375,
+ "gpu_memory_used": 1114.0,
+ "relative_time": 155.8850748538971
+ },
+ {
+ "timestamp": "2025-01-06T02:01:58.363971",
+ "cpu_percent": 68.38,
+ "ram_percent": 45.6,
+ "ram_used_gb": 29.0169677734375,
+ "gpu_memory_used": 1114.0,
+ "relative_time": 156.99017000198364
+ },
+ {
+ "timestamp": "2025-01-06T02:01:59.482082",
+ "cpu_percent": 66.96,
+ "ram_percent": 45.6,
+ "ram_used_gb": 28.99835205078125,
+ "gpu_memory_used": 1112.0,
+ "relative_time": 158.11476922035217
+ },
+ {
+ "timestamp": "2025-01-06T02:02:00.599761",
+ "cpu_percent": 72.88,
+ "ram_percent": 45.6,
+ "ram_used_gb": 29.014999389648438,
+ "gpu_memory_used": 1111.0,
+ "relative_time": 159.23513007164001
+ },
+ {
+ "timestamp": "2025-01-06T02:02:01.724187",
+ "cpu_percent": 83.27,
+ "ram_percent": 45.6,
+ "ram_used_gb": 28.979934692382812,
+ "gpu_memory_used": 1109.0,
+ "relative_time": 160.3893871307373
+ },
+ {
+ "timestamp": "2025-01-06T02:02:02.874035",
+ "cpu_percent": 84.25,
+ "ram_percent": 45.6,
+ "ram_used_gb": 29.00198745727539,
+ "gpu_memory_used": 1110.0,
+ "relative_time": 161.4882082939148
+ },
+ {
+ "timestamp": "2025-01-06T02:02:03.982069",
"cpu_percent": 46.27,
- "ram_percent": 48.1,
- "ram_used_gb": 30.584861755371094,
- "gpu_memory_used": 1252.0,
- "relative_time": 165.35981583595276
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.048011779785156,
+ "gpu_memory_used": 1108.0,
+ "relative_time": 162.579003572464
},
{
- "timestamp": "2025-01-04T01:33:13.354673",
- "cpu_percent": 45.71,
- "ram_percent": 48.1,
- "ram_used_gb": 30.582279205322266,
- "gpu_memory_used": 1252.0,
- "relative_time": 166.45263361930847
+ "timestamp": "2025-01-06T02:02:05.067839",
+ "cpu_percent": 23.25,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.041690826416016,
+ "gpu_memory_used": 1102.0,
+ "relative_time": 163.69112300872803
},
{
- "timestamp": "2025-01-04T01:33:14.447308",
- "cpu_percent": 48.69,
- "ram_percent": 48.1,
- "ram_used_gb": 30.584793090820312,
- "gpu_memory_used": 1252.0,
- "relative_time": 167.54857754707336
+ "timestamp": "2025-01-06T02:02:06.170967",
+ "cpu_percent": 68.39,
+ "ram_percent": 45.6,
+ "ram_used_gb": 29.02823257446289,
+ "gpu_memory_used": 1102.0,
+ "relative_time": 164.8040177822113
},
{
- "timestamp": "2025-01-04T01:33:15.552042",
- "cpu_percent": 48.66,
- "ram_percent": 48.1,
- "ram_used_gb": 30.580883026123047,
- "gpu_memory_used": 1252.0,
- "relative_time": 168.659592628479
+ "timestamp": "2025-01-06T02:02:07.291947",
+ "cpu_percent": 67.11,
+ "ram_percent": 45.6,
+ "ram_used_gb": 29.026527404785156,
+ "gpu_memory_used": 1102.0,
+ "relative_time": 165.9222002029419
},
{
- "timestamp": "2025-01-04T01:33:16.653015",
- "cpu_percent": 50.37,
- "ram_percent": 48.1,
- "ram_used_gb": 30.573726654052734,
- "gpu_memory_used": 1252.0,
- "relative_time": 169.7969992160797
+ "timestamp": "2025-01-06T02:02:08.407847",
+ "cpu_percent": 75.64,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.04144287109375,
+ "gpu_memory_used": 1101.0,
+ "relative_time": 167.02184295654297
},
{
- "timestamp": "2025-01-04T01:33:17.802854",
- "cpu_percent": 49.45,
- "ram_percent": 48.1,
- "ram_used_gb": 30.587318420410156,
- "gpu_memory_used": 1252.0,
- "relative_time": 170.891606092453
+ "timestamp": "2025-01-06T02:02:09.510086",
+ "cpu_percent": 66.88,
+ "ram_percent": 45.6,
+ "ram_used_gb": 28.97747039794922,
+ "gpu_memory_used": 1101.0,
+ "relative_time": 168.15068435668945
},
{
- "timestamp": "2025-01-04T01:33:18.893192",
- "cpu_percent": 50.16,
- "ram_percent": 48.1,
- "ram_used_gb": 30.5953369140625,
- "gpu_memory_used": 1252.0,
- "relative_time": 172.0133557319641
+ "timestamp": "2025-01-06T02:02:10.642415",
+ "cpu_percent": 69.38,
+ "ram_percent": 45.6,
+ "ram_used_gb": 28.98614501953125,
+ "gpu_memory_used": 1101.0,
+ "relative_time": 169.26578378677368
},
{
- "timestamp": "2025-01-04T01:33:20.008593",
- "cpu_percent": 47.57,
- "ram_percent": 48.1,
- "ram_used_gb": 30.6124267578125,
- "gpu_memory_used": 1252.0,
- "relative_time": 173.0913679599762
+ "timestamp": "2025-01-06T02:02:11.757629",
+ "cpu_percent": 75.01,
+ "ram_percent": 45.6,
+ "ram_used_gb": 28.98458480834961,
+ "gpu_memory_used": 1101.0,
+ "relative_time": 170.3762068748474
},
{
- "timestamp": "2025-01-04T01:33:21.097576",
- "cpu_percent": 44.32,
- "ram_percent": 48.1,
- "ram_used_gb": 30.584686279296875,
- "gpu_memory_used": 1252.0,
- "relative_time": 174.20030999183655
+ "timestamp": "2025-01-06T02:02:12.861200",
+ "cpu_percent": 65.3,
+ "ram_percent": 45.6,
+ "ram_used_gb": 28.996482849121094,
+ "gpu_memory_used": 1101.0,
+ "relative_time": 171.49599766731262
},
{
- "timestamp": "2025-01-04T01:33:22.201335",
- "cpu_percent": 49.01,
- "ram_percent": 48.0,
- "ram_used_gb": 30.547630310058594,
- "gpu_memory_used": 1252.0,
- "relative_time": 175.30235862731934
+ "timestamp": "2025-01-06T02:02:13.979027",
+ "cpu_percent": 70.03,
+ "ram_percent": 45.6,
+ "ram_used_gb": 29.004348754882812,
+ "gpu_memory_used": 1101.0,
+ "relative_time": 172.63518261909485
},
{
- "timestamp": "2025-01-04T01:33:23.306131",
- "cpu_percent": 43.7,
- "ram_percent": 48.0,
- "ram_used_gb": 30.559757232666016,
- "gpu_memory_used": 1251.0,
- "relative_time": 176.40550017356873
+ "timestamp": "2025-01-06T02:02:15.127058",
+ "cpu_percent": 74.56,
+ "ram_percent": 45.6,
+ "ram_used_gb": 28.98357391357422,
+ "gpu_memory_used": 1100.0,
+ "relative_time": 173.78076720237732
},
{
- "timestamp": "2025-01-04T01:33:24.408896",
- "cpu_percent": 48.77,
- "ram_percent": 48.0,
- "ram_used_gb": 30.5601806640625,
- "gpu_memory_used": 1251.0,
- "relative_time": 177.4984576702118
+ "timestamp": "2025-01-06T02:02:16.269797",
+ "cpu_percent": 83.33,
+ "ram_percent": 45.6,
+ "ram_used_gb": 29.013275146484375,
+ "gpu_memory_used": 1102.0,
+ "relative_time": 174.92202520370483
},
{
- "timestamp": "2025-01-04T01:33:25.496705",
- "cpu_percent": 50.56,
- "ram_percent": 48.0,
- "ram_used_gb": 30.556926727294922,
- "gpu_memory_used": 1251.0,
- "relative_time": 178.58782863616943
+ "timestamp": "2025-01-06T02:02:17.403955",
+ "cpu_percent": 66.92,
+ "ram_percent": 45.6,
+ "ram_used_gb": 28.988414764404297,
+ "gpu_memory_used": 1102.0,
+ "relative_time": 175.9813597202301
},
{
- "timestamp": "2025-01-04T01:33:26.588438",
- "cpu_percent": 47.76,
- "ram_percent": 48.0,
- "ram_used_gb": 30.53600311279297,
- "gpu_memory_used": 1251.0,
- "relative_time": 179.67969870567322
+ "timestamp": "2025-01-06T02:02:18.464105",
+ "cpu_percent": 65.91,
+ "ram_percent": 45.6,
+ "ram_used_gb": 28.997055053710938,
+ "gpu_memory_used": 1102.0,
+ "relative_time": 177.0992946624756
},
{
- "timestamp": "2025-01-04T01:33:27.679807",
- "cpu_percent": 49.0,
- "ram_percent": 48.0,
- "ram_used_gb": 30.540546417236328,
- "gpu_memory_used": 1251.0,
- "relative_time": 180.78388810157776
+ "timestamp": "2025-01-06T02:02:19.584316",
+ "cpu_percent": 64.77,
+ "ram_percent": 45.6,
+ "ram_used_gb": 28.98046875,
+ "gpu_memory_used": 1102.0,
+ "relative_time": 178.22958946228027
},
{
- "timestamp": "2025-01-04T01:33:28.780263",
- "cpu_percent": 49.25,
- "ram_percent": 48.0,
- "ram_used_gb": 30.55233383178711,
- "gpu_memory_used": 1251.0,
- "relative_time": 181.88185930252075
+ "timestamp": "2025-01-06T02:02:20.719166",
+ "cpu_percent": 74.76,
+ "ram_percent": 45.6,
+ "ram_used_gb": 29.008087158203125,
+ "gpu_memory_used": 1100.0,
+ "relative_time": 179.40734457969666
},
{
- "timestamp": "2025-01-04T01:33:29.881869",
- "cpu_percent": 47.08,
- "ram_percent": 48.1,
- "ram_used_gb": 30.56603240966797,
- "gpu_memory_used": 1251.0,
- "relative_time": 182.9666450023651
+ "timestamp": "2025-01-06T02:02:21.901171",
+ "cpu_percent": 73.14,
+ "ram_percent": 45.6,
+ "ram_used_gb": 28.98965072631836,
+ "gpu_memory_used": 1101.0,
+ "relative_time": 180.55075860023499
},
{
- "timestamp": "2025-01-04T01:33:30.957821",
- "cpu_percent": 45.77,
- "ram_percent": 48.0,
- "ram_used_gb": 30.559410095214844,
- "gpu_memory_used": 1251.0,
- "relative_time": 184.05338644981384
+ "timestamp": "2025-01-06T02:02:23.037921",
+ "cpu_percent": 75.12,
+ "ram_percent": 45.6,
+ "ram_used_gb": 29.005779266357422,
+ "gpu_memory_used": 1101.0,
+ "relative_time": 181.6956079006195
},
{
- "timestamp": "2025-01-04T01:33:32.047377",
- "cpu_percent": 50.79,
- "ram_percent": 48.0,
- "ram_used_gb": 30.534175872802734,
- "gpu_memory_used": 1251.0,
- "relative_time": 185.17484974861145
+ "timestamp": "2025-01-06T02:02:24.190508",
+ "cpu_percent": 68.59,
+ "ram_percent": 45.6,
+ "ram_used_gb": 28.99420166015625,
+ "gpu_memory_used": 1103.0,
+ "relative_time": 182.8263657093048
},
{
- "timestamp": "2025-01-04T01:33:33.167413",
- "cpu_percent": 52.13,
- "ram_percent": 48.0,
- "ram_used_gb": 30.54046630859375,
- "gpu_memory_used": 1266.0,
- "relative_time": 186.23550605773926
+ "timestamp": "2025-01-06T02:02:25.318426",
+ "cpu_percent": 71.09,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.96915054321289,
+ "gpu_memory_used": 1103.0,
+ "relative_time": 183.96249556541443
},
{
- "timestamp": "2025-01-04T01:33:34.226743",
- "cpu_percent": 43.81,
- "ram_percent": 48.0,
- "ram_used_gb": 30.54621124267578,
- "gpu_memory_used": 1266.0,
- "relative_time": 187.30887961387634
+ "timestamp": "2025-01-06T02:02:26.452298",
+ "cpu_percent": 67.23,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.9666748046875,
+ "gpu_memory_used": 1103.0,
+ "relative_time": 185.08496713638306
},
{
- "timestamp": "2025-01-04T01:33:35.303398",
- "cpu_percent": 49.28,
- "ram_percent": 48.0,
- "ram_used_gb": 30.545230865478516,
- "gpu_memory_used": 1264.0,
- "relative_time": 188.40410709381104
+ "timestamp": "2025-01-06T02:02:27.575091",
+ "cpu_percent": 75.85,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.963436126708984,
+ "gpu_memory_used": 1100.0,
+ "relative_time": 186.21436381340027
},
{
- "timestamp": "2025-01-04T01:33:36.405660",
- "cpu_percent": 46.44,
- "ram_percent": 48.0,
- "ram_used_gb": 30.540679931640625,
- "gpu_memory_used": 1264.0,
- "relative_time": 189.47515082359314
+ "timestamp": "2025-01-06T02:02:28.698623",
+ "cpu_percent": 76.0,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.966102600097656,
+ "gpu_memory_used": 1101.0,
+ "relative_time": 187.35916352272034
},
{
- "timestamp": "2025-01-04T01:33:37.469955",
- "cpu_percent": 41.6,
- "ram_percent": 48.0,
- "ram_used_gb": 30.562320709228516,
- "gpu_memory_used": 1264.0,
- "relative_time": 190.56309294700623
+ "timestamp": "2025-01-06T02:02:29.850943",
+ "cpu_percent": 72.59,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.966720581054688,
+ "gpu_memory_used": 1101.0,
+ "relative_time": 188.48684239387512
},
{
- "timestamp": "2025-01-04T01:33:38.556728",
- "cpu_percent": 50.52,
- "ram_percent": 48.0,
- "ram_used_gb": 30.561084747314453,
- "gpu_memory_used": 1264.0,
- "relative_time": 191.66572499275208
+ "timestamp": "2025-01-06T02:02:30.973391",
+ "cpu_percent": 68.84,
+ "ram_percent": 45.6,
+ "ram_used_gb": 28.99535369873047,
+ "gpu_memory_used": 1109.0,
+ "relative_time": 189.58790564537048
},
{
- "timestamp": "2025-01-04T01:33:39.665385",
- "cpu_percent": 40.93,
- "ram_percent": 48.1,
- "ram_used_gb": 30.577682495117188,
- "gpu_memory_used": 1264.0,
- "relative_time": 192.76011109352112
+ "timestamp": "2025-01-06T02:02:32.080413",
+ "cpu_percent": 75.34,
+ "ram_percent": 45.6,
+ "ram_used_gb": 28.983352661132812,
+ "gpu_memory_used": 1109.0,
+ "relative_time": 190.70177555084229
},
{
- "timestamp": "2025-01-04T01:33:40.754482",
- "cpu_percent": 50.46,
- "ram_percent": 48.1,
- "ram_used_gb": 30.5740966796875,
- "gpu_memory_used": 1262.0,
- "relative_time": 193.90924453735352
+ "timestamp": "2025-01-06T02:02:33.181584",
+ "cpu_percent": 82.01,
+ "ram_percent": 45.6,
+ "ram_used_gb": 29.014503479003906,
+ "gpu_memory_used": 1109.0,
+ "relative_time": 191.79927110671997
},
{
- "timestamp": "2025-01-04T01:33:41.903437",
- "cpu_percent": 52.75,
- "ram_percent": 48.1,
- "ram_used_gb": 30.58869171142578,
- "gpu_memory_used": 1258.0,
- "relative_time": 195.0148274898529
+ "timestamp": "2025-01-06T02:02:34.289805",
+ "cpu_percent": 61.24,
+ "ram_percent": 45.6,
+ "ram_used_gb": 28.986770629882812,
+ "gpu_memory_used": 1108.0,
+ "relative_time": 192.92647433280945
},
{
- "timestamp": "2025-01-04T01:33:43.008520",
- "cpu_percent": 50.04,
- "ram_percent": 48.0,
- "ram_used_gb": 30.560386657714844,
- "gpu_memory_used": 1258.0,
- "relative_time": 196.12349009513855
+ "timestamp": "2025-01-06T02:02:35.408364",
+ "cpu_percent": 71.77,
+ "ram_percent": 45.6,
+ "ram_used_gb": 28.976062774658203,
+ "gpu_memory_used": 1108.0,
+ "relative_time": 194.02294254302979
},
{
- "timestamp": "2025-01-04T01:33:44.129194",
- "cpu_percent": 51.56,
- "ram_percent": 48.1,
- "ram_used_gb": 30.572277069091797,
- "gpu_memory_used": 1258.0,
- "relative_time": 197.20997285842896
+ "timestamp": "2025-01-06T02:02:36.509112",
+ "cpu_percent": 67.74,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.970108032226562,
+ "gpu_memory_used": 1108.0,
+ "relative_time": 195.13911247253418
},
{
- "timestamp": "2025-01-04T01:33:45.212927",
- "cpu_percent": 47.77,
- "ram_percent": 48.0,
- "ram_used_gb": 30.556873321533203,
- "gpu_memory_used": 1258.0,
- "relative_time": 198.29724264144897
+ "timestamp": "2025-01-06T02:02:37.630544",
+ "cpu_percent": 73.08,
+ "ram_percent": 45.6,
+ "ram_used_gb": 28.982391357421875,
+ "gpu_memory_used": 1108.0,
+ "relative_time": 196.25971221923828
},
{
- "timestamp": "2025-01-04T01:33:46.288883",
- "cpu_percent": 46.07,
- "ram_percent": 48.0,
- "ram_used_gb": 30.554439544677734,
- "gpu_memory_used": 1258.0,
- "relative_time": 199.39549779891968
+ "timestamp": "2025-01-06T02:02:38.751707",
+ "cpu_percent": 73.63,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.947036743164062,
+ "gpu_memory_used": 1108.0,
+ "relative_time": 197.3975374698639
},
{
- "timestamp": "2025-01-04T01:33:47.403171",
- "cpu_percent": 46.18,
- "ram_percent": 48.0,
- "ram_used_gb": 30.557025909423828,
- "gpu_memory_used": 1258.0,
- "relative_time": 200.50221276283264
+ "timestamp": "2025-01-06T02:02:39.890645",
+ "cpu_percent": 72.01,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.969207763671875,
+ "gpu_memory_used": 1108.0,
+ "relative_time": 198.50470900535583
},
{
- "timestamp": "2025-01-04T01:33:48.495515",
- "cpu_percent": 48.09,
- "ram_percent": 48.0,
- "ram_used_gb": 30.558856964111328,
- "gpu_memory_used": 1267.0,
- "relative_time": 201.62405467033386
+ "timestamp": "2025-01-06T02:02:40.995583",
+ "cpu_percent": 70.7,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.956737518310547,
+ "gpu_memory_used": 1108.0,
+ "relative_time": 199.64927458763123
},
{
- "timestamp": "2025-01-04T01:33:49.630725",
- "cpu_percent": 53.47,
- "ram_percent": 48.1,
- "ram_used_gb": 30.59896469116211,
- "gpu_memory_used": 1283.0,
- "relative_time": 202.70162987709045
+ "timestamp": "2025-01-06T02:02:42.130610",
+ "cpu_percent": 72.67,
+ "ram_percent": 45.6,
+ "ram_used_gb": 28.981361389160156,
+ "gpu_memory_used": 1108.0,
+ "relative_time": 200.74797558784485
},
{
- "timestamp": "2025-01-04T01:33:50.709226",
- "cpu_percent": 44.74,
- "ram_percent": 48.1,
- "ram_used_gb": 30.581470489501953,
- "gpu_memory_used": 1281.0,
- "relative_time": 203.78962469100952
+ "timestamp": "2025-01-06T02:02:43.233560",
+ "cpu_percent": 70.15,
+ "ram_percent": 45.6,
+ "ram_used_gb": 28.97726821899414,
+ "gpu_memory_used": 1108.0,
+ "relative_time": 201.86889624595642
},
{
- "timestamp": "2025-01-04T01:33:51.782302",
- "cpu_percent": 43.4,
- "ram_percent": 48.1,
- "ram_used_gb": 30.582977294921875,
- "gpu_memory_used": 1282.0,
- "relative_time": 204.87054562568665
+ "timestamp": "2025-01-06T02:02:44.354710",
+ "cpu_percent": 68.13,
+ "ram_percent": 45.6,
+ "ram_used_gb": 28.976673126220703,
+ "gpu_memory_used": 1108.0,
+ "relative_time": 202.98869681358337
},
{
- "timestamp": "2025-01-04T01:33:52.868020",
- "cpu_percent": 51.75,
- "ram_percent": 48.0,
- "ram_used_gb": 30.540206909179688,
- "gpu_memory_used": 1282.0,
- "relative_time": 205.95602416992188
+ "timestamp": "2025-01-06T02:02:45.472846",
+ "cpu_percent": 71.0,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.96390151977539,
+ "gpu_memory_used": 1108.0,
+ "relative_time": 204.10803961753845
},
{
- "timestamp": "2025-01-04T01:33:53.956023",
- "cpu_percent": 46.36,
- "ram_percent": 48.0,
- "ram_used_gb": 30.562763214111328,
- "gpu_memory_used": 1282.0,
- "relative_time": 207.06639337539673
+ "timestamp": "2025-01-06T02:02:46.590739",
+ "cpu_percent": 73.99,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.964736938476562,
+ "gpu_memory_used": 1107.0,
+ "relative_time": 205.21335530281067
},
{
- "timestamp": "2025-01-04T01:33:55.064043",
- "cpu_percent": 43.91,
- "ram_percent": 48.0,
- "ram_used_gb": 30.560302734375,
- "gpu_memory_used": 1277.0,
- "relative_time": 208.16699743270874
+ "timestamp": "2025-01-06T02:02:47.693619",
+ "cpu_percent": 88.54,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.073219299316406,
+ "gpu_memory_used": 1096.0,
+ "relative_time": 206.3404152393341
},
{
- "timestamp": "2025-01-04T01:33:56.170674",
- "cpu_percent": 50.01,
- "ram_percent": 48.1,
- "ram_used_gb": 30.576671600341797,
- "gpu_memory_used": 1281.0,
- "relative_time": 209.28660559654236
+ "timestamp": "2025-01-06T02:02:48.831178",
+ "cpu_percent": 70.85,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.051692962646484,
+ "gpu_memory_used": 1111.0,
+ "relative_time": 207.6542706489563
},
{
- "timestamp": "2025-01-04T01:33:57.288316",
- "cpu_percent": 50.51,
- "ram_percent": 48.0,
- "ram_used_gb": 30.551471710205078,
- "gpu_memory_used": 1280.0,
- "relative_time": 210.4030442237854
+ "timestamp": "2025-01-06T02:02:50.135974",
+ "cpu_percent": 81.68,
+ "ram_percent": 44.9,
+ "ram_used_gb": 28.559471130371094,
+ "gpu_memory_used": 1047.0,
+ "relative_time": 208.75140976905823
},
{
- "timestamp": "2025-01-04T01:33:58.407032",
- "cpu_percent": 49.43,
- "ram_percent": 48.1,
- "ram_used_gb": 30.576725006103516,
- "gpu_memory_used": 1280.0,
- "relative_time": 211.50494027137756
+ "timestamp": "2025-01-06T02:02:51.237222",
+ "cpu_percent": 68.7,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.52242660522461,
+ "gpu_memory_used": 1044.0,
+ "relative_time": 209.86320424079895
},
{
- "timestamp": "2025-01-04T01:33:59.497806",
- "cpu_percent": 46.68,
- "ram_percent": 48.1,
- "ram_used_gb": 30.59314727783203,
- "gpu_memory_used": 1279.0,
- "relative_time": 212.6002950668335
+ "timestamp": "2025-01-06T02:02:52.355393",
+ "cpu_percent": 75.6,
+ "ram_percent": 44.9,
+ "ram_used_gb": 28.53069305419922,
+ "gpu_memory_used": 1044.0,
+ "relative_time": 210.99385690689087
},
{
- "timestamp": "2025-01-04T01:34:00.598484",
- "cpu_percent": 57.44,
- "ram_percent": 48.1,
- "ram_used_gb": 30.60983657836914,
- "gpu_memory_used": 1285.0,
- "relative_time": 213.7150914669037
+ "timestamp": "2025-01-06T02:02:53.488242",
+ "cpu_percent": 72.12,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.527976989746094,
+ "gpu_memory_used": 1045.0,
+ "relative_time": 212.15026259422302
},
{
- "timestamp": "2025-01-04T01:34:01.719968",
- "cpu_percent": 54.58,
- "ram_percent": 48.1,
- "ram_used_gb": 30.586456298828125,
- "gpu_memory_used": 1283.0,
- "relative_time": 214.80932760238647
+ "timestamp": "2025-01-06T02:02:54.635771",
+ "cpu_percent": 75.99,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.52718734741211,
+ "gpu_memory_used": 1047.0,
+ "relative_time": 213.26202607154846
},
{
- "timestamp": "2025-01-04T01:34:02.807573",
- "cpu_percent": 61.69,
- "ram_percent": 48.0,
- "ram_used_gb": 30.53356170654297,
- "gpu_memory_used": 1281.0,
- "relative_time": 215.88946890830994
+ "timestamp": "2025-01-06T02:02:55.743075",
+ "cpu_percent": 71.68,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.513080596923828,
+ "gpu_memory_used": 1047.0,
+ "relative_time": 214.37045192718506
},
{
- "timestamp": "2025-01-04T01:34:03.885672",
- "cpu_percent": 49.46,
- "ram_percent": 48.0,
- "ram_used_gb": 30.517364501953125,
- "gpu_memory_used": 1283.0,
- "relative_time": 216.97114062309265
+ "timestamp": "2025-01-06T02:02:56.860009",
+ "cpu_percent": 73.16,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.4967041015625,
+ "gpu_memory_used": 1046.0,
+ "relative_time": 215.45900750160217
},
{
- "timestamp": "2025-01-04T01:34:04.974449",
- "cpu_percent": 42.69,
- "ram_percent": 48.0,
- "ram_used_gb": 30.527969360351562,
- "gpu_memory_used": 1285.0,
- "relative_time": 218.10192775726318
+ "timestamp": "2025-01-06T02:02:57.948354",
+ "cpu_percent": 66.73,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.50171661376953,
+ "gpu_memory_used": 1039.0,
+ "relative_time": 216.58656573295593
},
{
- "timestamp": "2025-01-04T01:34:06.107947",
- "cpu_percent": 54.87,
- "ram_percent": 48.0,
- "ram_used_gb": 30.51028823852539,
- "gpu_memory_used": 1273.0,
- "relative_time": 219.17600679397583
+ "timestamp": "2025-01-06T02:02:59.070787",
+ "cpu_percent": 66.33,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.485286712646484,
+ "gpu_memory_used": 1030.0,
+ "relative_time": 217.7198555469513
},
{
- "timestamp": "2025-01-04T01:34:07.172153",
- "cpu_percent": 45.42,
- "ram_percent": 48.0,
- "ram_used_gb": 30.509258270263672,
- "gpu_memory_used": 1273.0,
- "relative_time": 220.28902983665466
+ "timestamp": "2025-01-06T02:03:00.210441",
+ "cpu_percent": 76.69,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.50790786743164,
+ "gpu_memory_used": 1041.0,
+ "relative_time": 218.82320833206177
},
{
- "timestamp": "2025-01-04T01:34:08.289623",
- "cpu_percent": 52.75,
- "ram_percent": 48.0,
- "ram_used_gb": 30.52011489868164,
- "gpu_memory_used": 1272.0,
- "relative_time": 221.39960098266602
+ "timestamp": "2025-01-06T02:03:01.312747",
+ "cpu_percent": 69.7,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.496017456054688,
+ "gpu_memory_used": 1042.0,
+ "relative_time": 219.93000483512878
},
{
- "timestamp": "2025-01-04T01:34:09.406158",
- "cpu_percent": 52.53,
- "ram_percent": 48.0,
- "ram_used_gb": 30.52783966064453,
- "gpu_memory_used": 1265.0,
- "relative_time": 222.49749565124512
+ "timestamp": "2025-01-06T02:03:02.415826",
+ "cpu_percent": 67.86,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.504711151123047,
+ "gpu_memory_used": 1042.0,
+ "relative_time": 221.04683136940002
},
{
- "timestamp": "2025-01-04T01:34:10.491042",
- "cpu_percent": 56.49,
- "ram_percent": 48.0,
- "ram_used_gb": 30.540733337402344,
- "gpu_memory_used": 1261.0,
- "relative_time": 223.5777132511139
+ "timestamp": "2025-01-06T02:03:03.526977",
+ "cpu_percent": 69.49,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.508529663085938,
+ "gpu_memory_used": 1041.0,
+ "relative_time": 222.13029503822327
},
{
- "timestamp": "2025-01-04T01:34:11.577710",
- "cpu_percent": 44.25,
- "ram_percent": 48.0,
- "ram_used_gb": 30.531757354736328,
- "gpu_memory_used": 1262.0,
- "relative_time": 224.68288159370422
+ "timestamp": "2025-01-06T02:03:04.618586",
+ "cpu_percent": 64.46,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.50463104248047,
+ "gpu_memory_used": 1041.0,
+ "relative_time": 223.2292401790619
},
{
- "timestamp": "2025-01-04T01:34:12.682455",
- "cpu_percent": 47.56,
- "ram_percent": 48.0,
- "ram_used_gb": 30.50157928466797,
- "gpu_memory_used": 1262.0,
- "relative_time": 225.78287291526794
+ "timestamp": "2025-01-06T02:03:05.723362",
+ "cpu_percent": 68.49,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.49188232421875,
+ "gpu_memory_used": 1041.0,
+ "relative_time": 224.3517026901245
},
{
- "timestamp": "2025-01-04T01:34:13.782976",
- "cpu_percent": 48.52,
- "ram_percent": 48.0,
- "ram_used_gb": 30.507736206054688,
- "gpu_memory_used": 1262.0,
- "relative_time": 226.8910207748413
+ "timestamp": "2025-01-06T02:03:06.831388",
+ "cpu_percent": 76.89,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.49285888671875,
+ "gpu_memory_used": 1041.0,
+ "relative_time": 225.4667146205902
},
{
- "timestamp": "2025-01-04T01:34:14.884200",
- "cpu_percent": 49.89,
- "ram_percent": 48.0,
- "ram_used_gb": 30.50653076171875,
- "gpu_memory_used": 1263.0,
- "relative_time": 228.04418087005615
+ "timestamp": "2025-01-06T02:03:07.951993",
+ "cpu_percent": 67.04,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.50518798828125,
+ "gpu_memory_used": 1041.0,
+ "relative_time": 226.58504056930542
},
{
- "timestamp": "2025-01-04T01:34:16.051189",
- "cpu_percent": 49.34,
- "ram_percent": 48.0,
- "ram_used_gb": 30.504470825195312,
- "gpu_memory_used": 1263.0,
- "relative_time": 229.13680815696716
+ "timestamp": "2025-01-06T02:03:09.069126",
+ "cpu_percent": 70.29,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.47330093383789,
+ "gpu_memory_used": 1041.0,
+ "relative_time": 227.708820104599
},
{
- "timestamp": "2025-01-04T01:34:17.136588",
- "cpu_percent": 47.8,
- "ram_percent": 47.9,
- "ram_used_gb": 30.496841430664062,
- "gpu_memory_used": 1263.0,
- "relative_time": 230.26778984069824
+ "timestamp": "2025-01-06T02:03:10.191425",
+ "cpu_percent": 71.98,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.495555877685547,
+ "gpu_memory_used": 1041.0,
+ "relative_time": 228.845196723938
},
{
- "timestamp": "2025-01-04T01:34:18.269616",
- "cpu_percent": 48.23,
- "ram_percent": 48.0,
- "ram_used_gb": 30.50909423828125,
- "gpu_memory_used": 1262.0,
- "relative_time": 231.3880865573883
- },
+ "timestamp": "2025-01-06T02:03:11.325421",
+ "cpu_percent": 80.36,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.496841430664062,
+ "gpu_memory_used": 1031.0,
+ "relative_time": 230.06402707099915
+ },
+ {
+ "timestamp": "2025-01-06T02:03:12.552864",
+ "cpu_percent": 88.18,
+ "ram_percent": 44.9,
+ "ram_used_gb": 28.545429229736328,
+ "gpu_memory_used": 1032.0,
+ "relative_time": 231.31830477714539
+ },
+ {
+ "timestamp": "2025-01-06T02:03:13.798580",
+ "cpu_percent": 97.64,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.51227569580078,
+ "gpu_memory_used": 1032.0,
+ "relative_time": 232.4179232120514
+ },
+ {
+ "timestamp": "2025-01-06T02:03:14.901645",
+ "cpu_percent": 74.59,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.48064422607422,
+ "gpu_memory_used": 1032.0,
+ "relative_time": 233.52263975143433
+ },
+ {
+ "timestamp": "2025-01-06T02:03:16.008349",
+ "cpu_percent": 70.1,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.487651824951172,
+ "gpu_memory_used": 1027.0,
+ "relative_time": 234.65311098098755
+ },
+ {
+ "timestamp": "2025-01-06T02:03:17.141096",
+ "cpu_percent": 73.53,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.48447036743164,
+ "gpu_memory_used": 1027.0,
+ "relative_time": 235.7638533115387
+ },
+ {
+ "timestamp": "2025-01-06T02:03:18.244142",
+ "cpu_percent": 72.79,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.493579864501953,
+ "gpu_memory_used": 1027.0,
+ "relative_time": 236.8931279182434
+ },
+ {
+ "timestamp": "2025-01-06T02:03:19.380511",
+ "cpu_percent": 76.48,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.48853302001953,
+ "gpu_memory_used": 1032.0,
+ "relative_time": 238.01199889183044
+ },
+ {
+ "timestamp": "2025-01-06T02:03:20.498808",
+ "cpu_percent": 65.27,
+ "ram_percent": 44.7,
+ "ram_used_gb": 28.432437896728516,
+ "gpu_memory_used": 1031.0,
+ "relative_time": 239.09615564346313
+ },
+ {
+ "timestamp": "2025-01-06T02:03:21.590026",
+ "cpu_percent": 61.61,
+ "ram_percent": 44.7,
+ "ram_used_gb": 28.455894470214844,
+ "gpu_memory_used": 1034.0,
+ "relative_time": 240.2461507320404
+ },
+ {
+ "timestamp": "2025-01-06T02:03:22.737841",
+ "cpu_percent": 80.38,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.4761962890625,
+ "gpu_memory_used": 1035.0,
+ "relative_time": 241.36590909957886
+ },
+ {
+ "timestamp": "2025-01-06T02:03:23.859319",
+ "cpu_percent": 65.25,
+ "ram_percent": 44.7,
+ "ram_used_gb": 28.453296661376953,
+ "gpu_memory_used": 1035.0,
+ "relative_time": 242.49047112464905
+ },
+ {
+ "timestamp": "2025-01-06T02:03:24.975950",
+ "cpu_percent": 73.18,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.474166870117188,
+ "gpu_memory_used": 1036.0,
+ "relative_time": 243.60454726219177
+ },
+ {
+ "timestamp": "2025-01-06T02:03:26.096454",
+ "cpu_percent": 83.42,
+ "ram_percent": 44.7,
+ "ram_used_gb": 28.43814468383789,
+ "gpu_memory_used": 1037.0,
+ "relative_time": 244.7518711090088
+ },
+ {
+ "timestamp": "2025-01-06T02:03:27.240727",
+ "cpu_percent": 78.22,
+ "ram_percent": 44.7,
+ "ram_used_gb": 28.463787078857422,
+ "gpu_memory_used": 1037.0,
+ "relative_time": 245.88417291641235
+ },
+ {
+ "timestamp": "2025-01-06T02:03:28.374545",
+ "cpu_percent": 77.39,
+ "ram_percent": 44.7,
+ "ram_used_gb": 28.448078155517578,
+ "gpu_memory_used": 1037.0,
+ "relative_time": 246.99420261383057
+ },
+ {
+ "timestamp": "2025-01-06T02:03:29.481376",
+ "cpu_percent": 72.67,
+ "ram_percent": 44.7,
+ "ram_used_gb": 28.44430923461914,
+ "gpu_memory_used": 1037.0,
+ "relative_time": 248.13829016685486
+ },
+ {
+ "timestamp": "2025-01-06T02:03:30.629026",
+ "cpu_percent": 71.77,
+ "ram_percent": 44.7,
+ "ram_used_gb": 28.44107437133789,
+ "gpu_memory_used": 1036.0,
+ "relative_time": 249.24768805503845
+ },
+ {
+ "timestamp": "2025-01-06T02:03:31.732614",
+ "cpu_percent": 80.17,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.484886169433594,
+ "gpu_memory_used": 1042.0,
+ "relative_time": 250.38990330696106
+ },
+ {
+ "timestamp": "2025-01-06T02:03:32.882436",
+ "cpu_percent": 86.8,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.48214340209961,
+ "gpu_memory_used": 1043.0,
+ "relative_time": 251.52275609970093
+ },
+ {
+ "timestamp": "2025-01-06T02:03:34.004573",
+ "cpu_percent": 67.43,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.495872497558594,
+ "gpu_memory_used": 1044.0,
+ "relative_time": 252.6218719482422
+ },
+ {
+ "timestamp": "2025-01-06T02:03:35.102105",
+ "cpu_percent": 74.36,
+ "ram_percent": 44.7,
+ "ram_used_gb": 28.462890625,
+ "gpu_memory_used": 1044.0,
+ "relative_time": 253.7364068031311
+ },
+ {
+ "timestamp": "2025-01-06T02:03:36.229096",
+ "cpu_percent": 77.13,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.49734878540039,
+ "gpu_memory_used": 1042.0,
+ "relative_time": 254.84465980529785
+ },
+ {
+ "timestamp": "2025-01-06T02:03:37.328927",
+ "cpu_percent": 79.02,
+ "ram_percent": 44.7,
+ "ram_used_gb": 28.464012145996094,
+ "gpu_memory_used": 1038.0,
+ "relative_time": 255.96682238578796
+ },
+ {
+ "timestamp": "2025-01-06T02:03:38.448134",
+ "cpu_percent": 68.85,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.466339111328125,
+ "gpu_memory_used": 1035.0,
+ "relative_time": 257.09385228157043
+ },
+ {
+ "timestamp": "2025-01-06T02:03:39.582470",
+ "cpu_percent": 78.73,
+ "ram_percent": 44.7,
+ "ram_used_gb": 28.43946075439453,
+ "gpu_memory_used": 1035.0,
+ "relative_time": 258.2170786857605
+ },
+ {
+ "timestamp": "2025-01-06T02:03:40.696501",
+ "cpu_percent": 93.81,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.479244232177734,
+ "gpu_memory_used": 1043.0,
+ "relative_time": 259.3122980594635
+ },
+ {
+ "timestamp": "2025-01-06T02:03:41.801375",
+ "cpu_percent": 75.19,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.469676971435547,
+ "gpu_memory_used": 1043.0,
+ "relative_time": 260.4192705154419
+ },
+ {
+ "timestamp": "2025-01-06T02:03:42.902513",
+ "cpu_percent": 71.84,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.466243743896484,
+ "gpu_memory_used": 1043.0,
+ "relative_time": 261.4810314178467
+ },
+ {
+ "timestamp": "2025-01-06T02:03:43.966522",
+ "cpu_percent": 65.83,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.46680450439453,
+ "gpu_memory_used": 1043.0,
+ "relative_time": 262.59079146385193
+ },
+ {
+ "timestamp": "2025-01-06T02:03:45.071077",
+ "cpu_percent": 73.81,
+ "ram_percent": 44.7,
+ "ram_used_gb": 28.444644927978516,
+ "gpu_memory_used": 1043.0,
+ "relative_time": 263.69037771224976
+ },
+ {
+ "timestamp": "2025-01-06T02:03:46.177191",
+ "cpu_percent": 76.08,
+ "ram_percent": 44.7,
+ "ram_used_gb": 28.449535369873047,
+ "gpu_memory_used": 1044.0,
+ "relative_time": 264.811972618103
+ },
+ {
+ "timestamp": "2025-01-06T02:03:47.297543",
+ "cpu_percent": 69.21,
+ "ram_percent": 44.7,
+ "ram_used_gb": 28.455825805664062,
+ "gpu_memory_used": 1038.0,
+ "relative_time": 265.930095911026
+ },
+ {
+ "timestamp": "2025-01-06T02:03:48.419566",
+ "cpu_percent": 89.1,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.472156524658203,
+ "gpu_memory_used": 1042.0,
+ "relative_time": 267.05133056640625
+ },
+ {
+ "timestamp": "2025-01-06T02:03:49.545977",
+ "cpu_percent": 77.62,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.473480224609375,
+ "gpu_memory_used": 1043.0,
+ "relative_time": 268.1589620113373
+ },
+ {
+ "timestamp": "2025-01-06T02:03:50.649988",
+ "cpu_percent": 65.12,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.477100372314453,
+ "gpu_memory_used": 1043.0,
+ "relative_time": 269.270804643631
+ },
+ {
+ "timestamp": "2025-01-06T02:03:51.761398",
+ "cpu_percent": 72.83,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.466938018798828,
+ "gpu_memory_used": 1043.0,
+ "relative_time": 270.4037699699402
+ },
+ {
+ "timestamp": "2025-01-06T02:03:52.899078",
+ "cpu_percent": 69.92,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.516891479492188,
+ "gpu_memory_used": 1043.0,
+ "relative_time": 271.52356004714966
+ },
+ {
+ "timestamp": "2025-01-06T02:03:54.009357",
+ "cpu_percent": 64.24,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.478267669677734,
+ "gpu_memory_used": 1037.0,
+ "relative_time": 272.606009721756
+ },
+ {
+ "timestamp": "2025-01-06T02:03:55.099524",
+ "cpu_percent": 70.53,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.46905517578125,
+ "gpu_memory_used": 1037.0,
+ "relative_time": 273.72701716423035
+ },
+ {
+ "timestamp": "2025-01-06T02:03:56.206900",
+ "cpu_percent": 68.34,
+ "ram_percent": 44.7,
+ "ram_used_gb": 28.465599060058594,
+ "gpu_memory_used": 1037.0,
+ "relative_time": 274.8694078922272
+ },
+ {
+ "timestamp": "2025-01-06T02:03:57.354260",
+ "cpu_percent": 72.55,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.492900848388672,
+ "gpu_memory_used": 1036.0,
+ "relative_time": 275.9902341365814
+ },
+ {
+ "timestamp": "2025-01-06T02:03:58.471175",
+ "cpu_percent": 82.03,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.485233306884766,
+ "gpu_memory_used": 1035.0,
+ "relative_time": 277.10426807403564
+ },
+ {
+ "timestamp": "2025-01-06T02:03:59.587394",
+ "cpu_percent": 73.04,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.481033325195312,
+ "gpu_memory_used": 1036.0,
+ "relative_time": 278.22713327407837
+ },
+ {
+ "timestamp": "2025-01-06T02:04:00.719395",
+ "cpu_percent": 98.57,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.480728149414062,
+ "gpu_memory_used": 1044.0,
+ "relative_time": 279.353773355484
+ },
+ {
+ "timestamp": "2025-01-06T02:04:01.842768",
+ "cpu_percent": 71.4,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.491016387939453,
+ "gpu_memory_used": 1045.0,
+ "relative_time": 280.5646574497223
+ },
+ {
+ "timestamp": "2025-01-06T02:04:03.051785",
+ "cpu_percent": 89.75,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.49317169189453,
+ "gpu_memory_used": 1045.0,
+ "relative_time": 281.7272472381592
+ },
+ {
+ "timestamp": "2025-01-06T02:04:04.219136",
+ "cpu_percent": 73.74,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.491840362548828,
+ "gpu_memory_used": 1045.0,
+ "relative_time": 282.850713968277
+ },
+ {
+ "timestamp": "2025-01-06T02:04:05.332164",
+ "cpu_percent": 90.68,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.500041961669922,
+ "gpu_memory_used": 1046.0,
+ "relative_time": 283.98440742492676
+ },
+ {
+ "timestamp": "2025-01-06T02:04:06.464951",
+ "cpu_percent": 86.44,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.515422821044922,
+ "gpu_memory_used": 1046.0,
+ "relative_time": 285.0827331542969
+ },
+ {
+ "timestamp": "2025-01-06T02:04:07.566047",
+ "cpu_percent": 86.21,
+ "ram_percent": 44.8,
+ "ram_used_gb": 28.5203857421875,
+ "gpu_memory_used": 1045.0,
+ "relative_time": 286.2305998802185
+ },
+ {
+ "timestamp": "2025-01-06T02:04:08.713374",
+ "cpu_percent": 71.91,
+ "ram_percent": 45.0,
+ "ram_used_gb": 28.612117767333984,
+ "gpu_memory_used": 1044.0,
+ "relative_time": 287.33052921295166
+ },
+ {
+ "timestamp": "2025-01-06T02:04:09.815028",
+ "cpu_percent": 69.27,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.676231384277344,
+ "gpu_memory_used": 1044.0,
+ "relative_time": 288.4558057785034
+ },
+ {
+ "timestamp": "2025-01-06T02:04:10.950295",
+ "cpu_percent": 82.52,
+ "ram_percent": 45.2,
+ "ram_used_gb": 28.753101348876953,
+ "gpu_memory_used": 1043.0,
+ "relative_time": 289.58639645576477
+ },
+ {
+ "timestamp": "2025-01-06T02:04:12.077013",
+ "cpu_percent": 82.08,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.822429656982422,
+ "gpu_memory_used": 1038.0,
+ "relative_time": 290.69340085983276
+ },
+ {
+ "timestamp": "2025-01-06T02:04:13.177952",
+ "cpu_percent": 71.31,
+ "ram_percent": 45.5,
+ "ram_used_gb": 28.919525146484375,
+ "gpu_memory_used": 1038.0,
+ "relative_time": 291.77725172042847
+ },
+ {
+ "timestamp": "2025-01-06T02:04:14.261590",
+ "cpu_percent": 64.09,
+ "ram_percent": 45.6,
+ "ram_used_gb": 28.983028411865234,
+ "gpu_memory_used": 1038.0,
+ "relative_time": 292.8957669734955
+ },
+ {
+ "timestamp": "2025-01-06T02:04:15.378908",
+ "cpu_percent": 75.32,
+ "ram_percent": 45.6,
+ "ram_used_gb": 29.025554656982422,
+ "gpu_memory_used": 1037.0,
+ "relative_time": 294.02492356300354
+ },
{
- "timestamp": "2025-01-04T01:34:19.387759",
- "cpu_percent": 42.46,
- "ram_percent": 48.0,
- "ram_used_gb": 30.525142669677734,
- "gpu_memory_used": 1262.0,
- "relative_time": 232.4770486354828
- },
+ "timestamp": "2025-01-06T02:04:16.510116",
+ "cpu_percent": 77.4,
+ "ram_percent": 45.7,
+ "ram_used_gb": 29.070560455322266,
+ "gpu_memory_used": 1031.0,
+ "relative_time": 295.1259517669678
+ },
{
- "timestamp": "2025-01-04T01:34:20.471629",
- "cpu_percent": 44.17,
- "ram_percent": 48.0,
- "ram_used_gb": 30.535388946533203,
- "gpu_memory_used": 1259.0,
- "relative_time": 233.57954168319702
- },
+ "timestamp": "2025-01-06T02:04:17.615075",
+ "cpu_percent": 76.31,
+ "ram_percent": 45.8,
+ "ram_used_gb": 29.118003845214844,
+ "gpu_memory_used": 1027.0,
+ "relative_time": 296.2284073829651
+ },
{
- "timestamp": "2025-01-04T01:34:21.576615",
- "cpu_percent": 45.36,
- "ram_percent": 48.0,
- "ram_used_gb": 30.529708862304688,
- "gpu_memory_used": 1259.0,
- "relative_time": 234.70528435707092
- },
+ "timestamp": "2025-01-06T02:04:18.721955",
+ "cpu_percent": 78.09,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.807754516601562,
+ "gpu_memory_used": 1028.0,
+ "relative_time": 297.38829493522644
+ },
{
- "timestamp": "2025-01-04T01:34:22.709825",
- "cpu_percent": 52.14,
- "ram_percent": 47.9,
- "ram_used_gb": 30.490406036376953,
- "gpu_memory_used": 1259.0,
- "relative_time": 235.84367108345032
- },
+ "timestamp": "2025-01-06T02:04:19.874178",
+ "cpu_percent": 87.58,
+ "ram_percent": 45.2,
+ "ram_used_gb": 28.77080535888672,
+ "gpu_memory_used": 1028.0,
+ "relative_time": 298.5273485183716
+ },
{
- "timestamp": "2025-01-04T01:34:23.834912",
- "cpu_percent": 49.39,
- "ram_percent": 47.9,
- "ram_used_gb": 30.49042510986328,
- "gpu_memory_used": 1259.0,
- "relative_time": 236.94777131080627
- },
+ "timestamp": "2025-01-06T02:04:21.030928",
+ "cpu_percent": 80.39,
+ "ram_percent": 45.2,
+ "ram_used_gb": 28.74532699584961,
+ "gpu_memory_used": 1029.0,
+ "relative_time": 299.648122549057
+ },
{
- "timestamp": "2025-01-04T01:34:24.940884",
- "cpu_percent": 51.84,
- "ram_percent": 47.9,
- "ram_used_gb": 30.489459991455078,
- "gpu_memory_used": 1259.0,
- "relative_time": 238.07107305526733
- },
+ "timestamp": "2025-01-06T02:04:22.137059",
+ "cpu_percent": 69.39,
+ "ram_percent": 45.2,
+ "ram_used_gb": 28.74283218383789,
+ "gpu_memory_used": 1036.0,
+ "relative_time": 300.7701139450073
+ },
{
- "timestamp": "2025-01-04T01:34:26.077527",
- "cpu_percent": 49.55,
- "ram_percent": 47.9,
- "ram_used_gb": 30.488842010498047,
- "gpu_memory_used": 1259.0,
- "relative_time": 239.20314645767212
- },
+ "timestamp": "2025-01-06T02:04:23.259177",
+ "cpu_percent": 68.48,
+ "ram_percent": 45.2,
+ "ram_used_gb": 28.735374450683594,
+ "gpu_memory_used": 1036.0,
+ "relative_time": 301.8923716545105
+ },
{
- "timestamp": "2025-01-04T01:34:27.199360",
- "cpu_percent": 47.71,
- "ram_percent": 47.9,
- "ram_used_gb": 30.49380874633789,
- "gpu_memory_used": 1258.0,
- "relative_time": 240.32860612869263
- },
+ "timestamp": "2025-01-06T02:04:24.376291",
+ "cpu_percent": 77.94,
+ "ram_percent": 45.2,
+ "ram_used_gb": 28.745147705078125,
+ "gpu_memory_used": 1035.0,
+ "relative_time": 303.0271394252777
+ },
{
- "timestamp": "2025-01-04T01:34:28.333600",
- "cpu_percent": 48.61,
- "ram_percent": 48.0,
- "ram_used_gb": 30.503887176513672,
- "gpu_memory_used": 1258.0,
- "relative_time": 241.44983053207397
- },
+ "timestamp": "2025-01-06T02:04:25.511434",
+ "cpu_percent": 72.12,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.703075408935547,
+ "gpu_memory_used": 1035.0,
+ "relative_time": 304.14817214012146
+ },
{
- "timestamp": "2025-01-04T01:34:29.453855",
- "cpu_percent": 51.01,
- "ram_percent": 48.0,
- "ram_used_gb": 30.512046813964844,
- "gpu_memory_used": 1258.0,
- "relative_time": 242.60703372955322
- },
+ "timestamp": "2025-01-06T02:04:26.633643",
+ "cpu_percent": 79.6,
+ "ram_percent": 45.1,
+ "ram_used_gb": 28.69623565673828,
+ "gpu_memory_used": 1035.0,
+ "relative_time": 305.25277972221375
+ },
{
- "timestamp": "2025-01-04T01:34:30.613699",
- "cpu_percent": 53.89,
- "ram_percent": 48.0,
- "ram_used_gb": 30.522415161132812,
- "gpu_memory_used": 1258.0,
- "relative_time": 243.73219799995422
- },
+ "timestamp": "2025-01-06T02:04:27.737495",
+ "cpu_percent": 45.9,
+ "ram_percent": 45.2,
+ "ram_used_gb": 28.770511627197266,
+ "gpu_memory_used": 1035.0,
+ "relative_time": 306.34188294410706
+ },
{
- "timestamp": "2025-01-04T01:34:31.735503",
- "cpu_percent": 21.25,
- "ram_percent": 48.2,
- "ram_used_gb": 30.68771743774414,
- "gpu_memory_used": 1260.0,
- "relative_time": 244.80069231987
+ "timestamp": "2025-01-06T02:04:28.847601",
+ "cpu_percent": 32.22,
+ "ram_percent": 45.3,
+ "ram_used_gb": 28.79547119140625,
+ "gpu_memory_used": 1040.0,
+ "relative_time": 307.4505376815796
}
],
- "test_duration": 247.14976453781128
+ "test_duration": 310.5054519176483
}
\ No newline at end of file
diff --git a/examples/assorted_checks/benchmarks/output_data/cpu_benchmark_stats_8_4_par.txt b/examples/assorted_checks/benchmarks/output_data/cpu_benchmark_stats_rtf.txt
similarity index 52%
rename from examples/assorted_checks/benchmarks/output_data/cpu_benchmark_stats_8_4_par.txt
rename to examples/assorted_checks/benchmarks/output_data/cpu_benchmark_stats_rtf.txt
index 541a304..7c8023a 100644
--- a/examples/assorted_checks/benchmarks/output_data/cpu_benchmark_stats_8_4_par.txt
+++ b/examples/assorted_checks/benchmarks/output_data/cpu_benchmark_stats_rtf.txt
@@ -2,22 +2,22 @@
Total tokens processed: 1800
Total audio generated (s): 568.53
-Total test duration (s): 244.10
-Average processing rate (tokens/s): 7.34
-Average RTF: 0.43
-Average Real Time Speed: 2.33
+Total test duration (s): 306.02
+Average processing rate (tokens/s): 5.75
+Average RTF: 0.55
+Average Real Time Speed: 1.81
=== Per-chunk Stats ===
Average chunk size (tokens): 600.00
Min chunk size (tokens): 300
Max chunk size (tokens): 900
-Average processing time (s): 81.30
+Average processing time (s): 101.89
Average output length (s): 189.51
=== Performance Ranges ===
-Processing rate range (tokens/s): 7.21 - 7.47
-RTF range: 0.43x - 0.43x
-Real Time Speed range: 2.33x - 2.33x
+Processing rate range (tokens/s): 5.30 - 6.26
+RTF range: 0.51x - 0.59x
+Real Time Speed range: 1.69x - 1.96x
diff --git a/examples/assorted_checks/benchmarks/output_data/first_token_benchmark_stream.json b/examples/assorted_checks/benchmarks/output_data/first_token_benchmark_stream.json
new file mode 100644
index 0000000..c78b5ab
--- /dev/null
+++ b/examples/assorted_checks/benchmarks/output_data/first_token_benchmark_stream.json
@@ -0,0 +1,337 @@
+{
+ "individual_runs": [
+ {
+ "text_length": 37,
+ "token_count": null,
+ "total_time": 1.818483829498291,
+ "time_to_first_chunk": 1.8067498207092285,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream\\benchmark_tokens10_run1_stream.wav",
+ "audio_length": 3.45,
+ "target_tokens": 10,
+ "actual_tokens": 10,
+ "run_number": 1
+ },
+ {
+ "text_length": 37,
+ "token_count": null,
+ "total_time": 1.6271553039550781,
+ "time_to_first_chunk": 1.610968828201294,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream\\benchmark_tokens10_run2_stream.wav",
+ "audio_length": 3.45,
+ "target_tokens": 10,
+ "actual_tokens": 10,
+ "run_number": 2
+ },
+ {
+ "text_length": 37,
+ "token_count": null,
+ "total_time": 1.5759549140930176,
+ "time_to_first_chunk": 1.561316967010498,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream\\benchmark_tokens10_run3_stream.wav",
+ "audio_length": 3.45,
+ "target_tokens": 10,
+ "actual_tokens": 10,
+ "run_number": 3
+ },
+ {
+ "text_length": 37,
+ "token_count": null,
+ "total_time": 1.615680456161499,
+ "time_to_first_chunk": 1.6035709381103516,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream\\benchmark_tokens10_run4_stream.wav",
+ "audio_length": 3.45,
+ "target_tokens": 10,
+ "actual_tokens": 10,
+ "run_number": 4
+ },
+ {
+ "text_length": 37,
+ "token_count": null,
+ "total_time": 1.6515357494354248,
+ "time_to_first_chunk": 1.6268820762634277,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream\\benchmark_tokens10_run5_stream.wav",
+ "audio_length": 3.45,
+ "target_tokens": 10,
+ "actual_tokens": 10,
+ "run_number": 5
+ },
+ {
+ "text_length": 212,
+ "token_count": null,
+ "total_time": 7.368175268173218,
+ "time_to_first_chunk": 3.4540352821350098,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream\\benchmark_tokens50_run1_stream.wav",
+ "audio_length": 15.825,
+ "target_tokens": 50,
+ "actual_tokens": 50,
+ "run_number": 1
+ },
+ {
+ "text_length": 212,
+ "token_count": null,
+ "total_time": 6.931752443313599,
+ "time_to_first_chunk": 3.1553661823272705,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream\\benchmark_tokens50_run2_stream.wav",
+ "audio_length": 15.825,
+ "target_tokens": 50,
+ "actual_tokens": 50,
+ "run_number": 2
+ },
+ {
+ "text_length": 212,
+ "token_count": null,
+ "total_time": 6.867500066757202,
+ "time_to_first_chunk": 3.127124309539795,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream\\benchmark_tokens50_run3_stream.wav",
+ "audio_length": 15.825,
+ "target_tokens": 50,
+ "actual_tokens": 50,
+ "run_number": 3
+ },
+ {
+ "text_length": 212,
+ "token_count": null,
+ "total_time": 6.933881521224976,
+ "time_to_first_chunk": 3.1872360706329346,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream\\benchmark_tokens50_run4_stream.wav",
+ "audio_length": 15.825,
+ "target_tokens": 50,
+ "actual_tokens": 50,
+ "run_number": 4
+ },
+ {
+ "text_length": 212,
+ "token_count": null,
+ "total_time": 7.605916738510132,
+ "time_to_first_chunk": 3.6397976875305176,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream\\benchmark_tokens50_run5_stream.wav",
+ "audio_length": 15.825,
+ "target_tokens": 50,
+ "actual_tokens": 50,
+ "run_number": 5
+ },
+ {
+ "text_length": 448,
+ "token_count": null,
+ "total_time": 14.777218580245972,
+ "time_to_first_chunk": 3.625889778137207,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream\\benchmark_tokens100_run1_stream.wav",
+ "audio_length": 30.35,
+ "target_tokens": 100,
+ "actual_tokens": 100,
+ "run_number": 1
+ },
+ {
+ "text_length": 448,
+ "token_count": null,
+ "total_time": 13.911701202392578,
+ "time_to_first_chunk": 3.298157215118408,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream\\benchmark_tokens100_run2_stream.wav",
+ "audio_length": 30.35,
+ "target_tokens": 100,
+ "actual_tokens": 100,
+ "run_number": 2
+ },
+ {
+ "text_length": 448,
+ "token_count": null,
+ "total_time": 14.451806783676147,
+ "time_to_first_chunk": 3.8353848457336426,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream\\benchmark_tokens100_run3_stream.wav",
+ "audio_length": 30.35,
+ "target_tokens": 100,
+ "actual_tokens": 100,
+ "run_number": 3
+ },
+ {
+ "text_length": 448,
+ "token_count": null,
+ "total_time": 13.941124200820923,
+ "time_to_first_chunk": 3.3754897117614746,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream\\benchmark_tokens100_run4_stream.wav",
+ "audio_length": 30.35,
+ "target_tokens": 100,
+ "actual_tokens": 100,
+ "run_number": 4
+ },
+ {
+ "text_length": 448,
+ "token_count": null,
+ "total_time": 15.717307329177856,
+ "time_to_first_chunk": 3.6421003341674805,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream\\benchmark_tokens100_run5_stream.wav",
+ "audio_length": 30.35,
+ "target_tokens": 100,
+ "actual_tokens": 100,
+ "run_number": 5
+ },
+ {
+ "text_length": 1140,
+ "token_count": null,
+ "total_time": 41.16162133216858,
+ "time_to_first_chunk": 3.7044918537139893,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream\\benchmark_tokens250_run1_stream.wav",
+ "audio_length": 78.175,
+ "target_tokens": 250,
+ "actual_tokens": 250,
+ "run_number": 1
+ },
+ {
+ "text_length": 1140,
+ "token_count": null,
+ "total_time": 35.43009877204895,
+ "time_to_first_chunk": 3.1040024757385254,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream\\benchmark_tokens250_run2_stream.wav",
+ "audio_length": 78.175,
+ "target_tokens": 250,
+ "actual_tokens": 250,
+ "run_number": 2
+ },
+ {
+ "text_length": 1140,
+ "token_count": null,
+ "total_time": 35.285505294799805,
+ "time_to_first_chunk": 3.657808780670166,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream\\benchmark_tokens250_run3_stream.wav",
+ "audio_length": 78.175,
+ "target_tokens": 250,
+ "actual_tokens": 250,
+ "run_number": 3
+ },
+ {
+ "text_length": 1140,
+ "token_count": null,
+ "total_time": 34.47842836380005,
+ "time_to_first_chunk": 3.2033851146698,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream\\benchmark_tokens250_run4_stream.wav",
+ "audio_length": 78.175,
+ "target_tokens": 250,
+ "actual_tokens": 250,
+ "run_number": 4
+ },
+ {
+ "text_length": 1140,
+ "token_count": null,
+ "total_time": 36.50936222076416,
+ "time_to_first_chunk": 3.1159815788269043,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream\\benchmark_tokens250_run5_stream.wav",
+ "audio_length": 78.175,
+ "target_tokens": 250,
+ "actual_tokens": 250,
+ "run_number": 5
+ },
+ {
+ "text_length": 2232,
+ "token_count": null,
+ "total_time": 86.84899735450745,
+ "time_to_first_chunk": 5.405678987503052,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream\\benchmark_tokens500_run1_stream.wav",
+ "audio_length": 155.125,
+ "target_tokens": 500,
+ "actual_tokens": 500,
+ "run_number": 1
+ },
+ {
+ "text_length": 2232,
+ "token_count": null,
+ "total_time": 74.72578477859497,
+ "time_to_first_chunk": 3.966891050338745,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream\\benchmark_tokens500_run2_stream.wav",
+ "audio_length": 155.125,
+ "target_tokens": 500,
+ "actual_tokens": 500,
+ "run_number": 2
+ },
+ {
+ "text_length": 2232,
+ "token_count": null,
+ "total_time": 68.1974081993103,
+ "time_to_first_chunk": 3.27712082862854,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream\\benchmark_tokens500_run3_stream.wav",
+ "audio_length": 155.125,
+ "target_tokens": 500,
+ "actual_tokens": 500,
+ "run_number": 3
+ },
+ {
+ "text_length": 2232,
+ "token_count": null,
+ "total_time": 72.68819260597229,
+ "time_to_first_chunk": 3.153608560562134,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream\\benchmark_tokens500_run4_stream.wav",
+ "audio_length": 155.125,
+ "target_tokens": 500,
+ "actual_tokens": 500,
+ "run_number": 4
+ },
+ {
+ "text_length": 2232,
+ "token_count": null,
+ "total_time": 67.94887590408325,
+ "time_to_first_chunk": 3.954728841781616,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream\\benchmark_tokens500_run5_stream.wav",
+ "audio_length": 155.125,
+ "target_tokens": 500,
+ "actual_tokens": 500,
+ "run_number": 5
+ }
+ ],
+ "summary": {
+ "10": {
+ "avg_time_to_first_chunk": 1.642,
+ "avg_total_time": 1.658,
+ "avg_audio_length": 3.45,
+ "num_successful_runs": 5
+ },
+ "50": {
+ "avg_time_to_first_chunk": 3.313,
+ "avg_total_time": 7.141,
+ "avg_audio_length": 15.825,
+ "num_successful_runs": 5
+ },
+ "100": {
+ "avg_time_to_first_chunk": 3.555,
+ "avg_total_time": 14.56,
+ "avg_audio_length": 30.35,
+ "num_successful_runs": 5
+ },
+ "250": {
+ "avg_time_to_first_chunk": 3.357,
+ "avg_total_time": 36.573,
+ "avg_audio_length": 78.175,
+ "num_successful_runs": 5
+ },
+ "500": {
+ "avg_time_to_first_chunk": 3.952,
+ "avg_total_time": 74.082,
+ "avg_audio_length": 155.125,
+ "num_successful_runs": 5
+ }
+ },
+ "timestamp": "2025-01-06 03:31:37"
+}
\ No newline at end of file
diff --git a/examples/assorted_checks/benchmarks/output_data/first_token_benchmark_stream_openai.json b/examples/assorted_checks/benchmarks/output_data/first_token_benchmark_stream_openai.json
new file mode 100644
index 0000000..968fffb
--- /dev/null
+++ b/examples/assorted_checks/benchmarks/output_data/first_token_benchmark_stream_openai.json
@@ -0,0 +1,337 @@
+{
+ "individual_runs": [
+ {
+ "text_length": 37,
+ "token_count": null,
+ "total_time": 1.638200044631958,
+ "time_to_first_chunk": 1.6232295036315918,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream_openai\\benchmark_tokens10_run1_stream_openai.wav",
+ "audio_length": 3.45,
+ "target_tokens": 10,
+ "actual_tokens": 10,
+ "run_number": 1
+ },
+ {
+ "text_length": 37,
+ "token_count": null,
+ "total_time": 1.4960439205169678,
+ "time_to_first_chunk": 1.4854960441589355,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream_openai\\benchmark_tokens10_run2_stream_openai.wav",
+ "audio_length": 3.45,
+ "target_tokens": 10,
+ "actual_tokens": 10,
+ "run_number": 2
+ },
+ {
+ "text_length": 37,
+ "token_count": null,
+ "total_time": 1.5055279731750488,
+ "time_to_first_chunk": 1.4948456287384033,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream_openai\\benchmark_tokens10_run3_stream_openai.wav",
+ "audio_length": 3.45,
+ "target_tokens": 10,
+ "actual_tokens": 10,
+ "run_number": 3
+ },
+ {
+ "text_length": 37,
+ "token_count": null,
+ "total_time": 1.496837854385376,
+ "time_to_first_chunk": 1.4835176467895508,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream_openai\\benchmark_tokens10_run4_stream_openai.wav",
+ "audio_length": 3.45,
+ "target_tokens": 10,
+ "actual_tokens": 10,
+ "run_number": 4
+ },
+ {
+ "text_length": 37,
+ "token_count": null,
+ "total_time": 1.7330272197723389,
+ "time_to_first_chunk": 1.7219843864440918,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream_openai\\benchmark_tokens10_run5_stream_openai.wav",
+ "audio_length": 3.45,
+ "target_tokens": 10,
+ "actual_tokens": 10,
+ "run_number": 5
+ },
+ {
+ "text_length": 212,
+ "token_count": null,
+ "total_time": 6.865253925323486,
+ "time_to_first_chunk": 3.1809072494506836,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream_openai\\benchmark_tokens50_run1_stream_openai.wav",
+ "audio_length": 15.825,
+ "target_tokens": 50,
+ "actual_tokens": 50,
+ "run_number": 1
+ },
+ {
+ "text_length": 212,
+ "token_count": null,
+ "total_time": 7.975425720214844,
+ "time_to_first_chunk": 3.2910428047180176,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream_openai\\benchmark_tokens50_run2_stream_openai.wav",
+ "audio_length": 15.825,
+ "target_tokens": 50,
+ "actual_tokens": 50,
+ "run_number": 2
+ },
+ {
+ "text_length": 212,
+ "token_count": null,
+ "total_time": 6.793715715408325,
+ "time_to_first_chunk": 3.210068464279175,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream_openai\\benchmark_tokens50_run3_stream_openai.wav",
+ "audio_length": 15.825,
+ "target_tokens": 50,
+ "actual_tokens": 50,
+ "run_number": 3
+ },
+ {
+ "text_length": 212,
+ "token_count": null,
+ "total_time": 6.639606237411499,
+ "time_to_first_chunk": 3.0641400814056396,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream_openai\\benchmark_tokens50_run4_stream_openai.wav",
+ "audio_length": 15.825,
+ "target_tokens": 50,
+ "actual_tokens": 50,
+ "run_number": 4
+ },
+ {
+ "text_length": 212,
+ "token_count": null,
+ "total_time": 8.100529193878174,
+ "time_to_first_chunk": 3.3910109996795654,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream_openai\\benchmark_tokens50_run5_stream_openai.wav",
+ "audio_length": 15.825,
+ "target_tokens": 50,
+ "actual_tokens": 50,
+ "run_number": 5
+ },
+ {
+ "text_length": 448,
+ "token_count": null,
+ "total_time": 15.246968984603882,
+ "time_to_first_chunk": 3.1980819702148438,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream_openai\\benchmark_tokens100_run1_stream_openai.wav",
+ "audio_length": 30.35,
+ "target_tokens": 100,
+ "actual_tokens": 100,
+ "run_number": 1
+ },
+ {
+ "text_length": 448,
+ "token_count": null,
+ "total_time": 15.934760332107544,
+ "time_to_first_chunk": 4.23082709312439,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream_openai\\benchmark_tokens100_run2_stream_openai.wav",
+ "audio_length": 30.35,
+ "target_tokens": 100,
+ "actual_tokens": 100,
+ "run_number": 2
+ },
+ {
+ "text_length": 448,
+ "token_count": null,
+ "total_time": 13.799078226089478,
+ "time_to_first_chunk": 3.42996883392334,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream_openai\\benchmark_tokens100_run3_stream_openai.wav",
+ "audio_length": 30.35,
+ "target_tokens": 100,
+ "actual_tokens": 100,
+ "run_number": 3
+ },
+ {
+ "text_length": 448,
+ "token_count": null,
+ "total_time": 13.400063037872314,
+ "time_to_first_chunk": 3.2097883224487305,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream_openai\\benchmark_tokens100_run4_stream_openai.wav",
+ "audio_length": 30.35,
+ "target_tokens": 100,
+ "actual_tokens": 100,
+ "run_number": 4
+ },
+ {
+ "text_length": 448,
+ "token_count": null,
+ "total_time": 14.833694219589233,
+ "time_to_first_chunk": 3.1589744091033936,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream_openai\\benchmark_tokens100_run5_stream_openai.wav",
+ "audio_length": 30.35,
+ "target_tokens": 100,
+ "actual_tokens": 100,
+ "run_number": 5
+ },
+ {
+ "text_length": 1140,
+ "token_count": null,
+ "total_time": 35.49378156661987,
+ "time_to_first_chunk": 3.852027177810669,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream_openai\\benchmark_tokens250_run1_stream_openai.wav",
+ "audio_length": 78.175,
+ "target_tokens": 250,
+ "actual_tokens": 250,
+ "run_number": 1
+ },
+ {
+ "text_length": 1140,
+ "token_count": null,
+ "total_time": 33.59433174133301,
+ "time_to_first_chunk": 3.2059006690979004,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream_openai\\benchmark_tokens250_run2_stream_openai.wav",
+ "audio_length": 78.175,
+ "target_tokens": 250,
+ "actual_tokens": 250,
+ "run_number": 2
+ },
+ {
+ "text_length": 1140,
+ "token_count": null,
+ "total_time": 34.23120045661926,
+ "time_to_first_chunk": 3.1464977264404297,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream_openai\\benchmark_tokens250_run3_stream_openai.wav",
+ "audio_length": 78.175,
+ "target_tokens": 250,
+ "actual_tokens": 250,
+ "run_number": 3
+ },
+ {
+ "text_length": 1140,
+ "token_count": null,
+ "total_time": 36.18487215042114,
+ "time_to_first_chunk": 3.188844919204712,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream_openai\\benchmark_tokens250_run4_stream_openai.wav",
+ "audio_length": 78.175,
+ "target_tokens": 250,
+ "actual_tokens": 250,
+ "run_number": 4
+ },
+ {
+ "text_length": 1140,
+ "token_count": null,
+ "total_time": 38.142744302749634,
+ "time_to_first_chunk": 3.6997063159942627,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream_openai\\benchmark_tokens250_run5_stream_openai.wav",
+ "audio_length": 78.175,
+ "target_tokens": 250,
+ "actual_tokens": 250,
+ "run_number": 5
+ },
+ {
+ "text_length": 2232,
+ "token_count": null,
+ "total_time": 71.48920440673828,
+ "time_to_first_chunk": 3.148237943649292,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream_openai\\benchmark_tokens500_run1_stream_openai.wav",
+ "audio_length": 155.125,
+ "target_tokens": 500,
+ "actual_tokens": 500,
+ "run_number": 1
+ },
+ {
+ "text_length": 2232,
+ "token_count": null,
+ "total_time": 73.53017520904541,
+ "time_to_first_chunk": 3.464594841003418,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream_openai\\benchmark_tokens500_run2_stream_openai.wav",
+ "audio_length": 155.125,
+ "target_tokens": 500,
+ "actual_tokens": 500,
+ "run_number": 2
+ },
+ {
+ "text_length": 2232,
+ "token_count": null,
+ "total_time": 75.52278685569763,
+ "time_to_first_chunk": 3.5506417751312256,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream_openai\\benchmark_tokens500_run3_stream_openai.wav",
+ "audio_length": 155.125,
+ "target_tokens": 500,
+ "actual_tokens": 500,
+ "run_number": 3
+ },
+ {
+ "text_length": 2232,
+ "token_count": null,
+ "total_time": 69.45922994613647,
+ "time_to_first_chunk": 3.495962619781494,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream_openai\\benchmark_tokens500_run4_stream_openai.wav",
+ "audio_length": 155.125,
+ "target_tokens": 500,
+ "actual_tokens": 500,
+ "run_number": 4
+ },
+ {
+ "text_length": 2232,
+ "token_count": null,
+ "total_time": 66.66928672790527,
+ "time_to_first_chunk": 3.301323175430298,
+ "error": null,
+ "audio_path": "c:\\Users\\jerem\\Desktop\\Kokoro-FastAPI\\examples\\assorted_checks\\benchmarks\\output_audio_stream_openai\\benchmark_tokens500_run5_stream_openai.wav",
+ "audio_length": 155.125,
+ "target_tokens": 500,
+ "actual_tokens": 500,
+ "run_number": 5
+ }
+ ],
+ "summary": {
+ "10": {
+ "avg_time_to_first_chunk": 1.562,
+ "avg_total_time": 1.574,
+ "avg_audio_length": 3.45,
+ "num_successful_runs": 5
+ },
+ "50": {
+ "avg_time_to_first_chunk": 3.227,
+ "avg_total_time": 7.275,
+ "avg_audio_length": 15.825,
+ "num_successful_runs": 5
+ },
+ "100": {
+ "avg_time_to_first_chunk": 3.446,
+ "avg_total_time": 14.643,
+ "avg_audio_length": 30.35,
+ "num_successful_runs": 5
+ },
+ "250": {
+ "avg_time_to_first_chunk": 3.419,
+ "avg_total_time": 35.529,
+ "avg_audio_length": 78.175,
+ "num_successful_runs": 5
+ },
+ "500": {
+ "avg_time_to_first_chunk": 3.392,
+ "avg_total_time": 71.334,
+ "avg_audio_length": 155.125,
+ "num_successful_runs": 5
+ }
+ },
+ "timestamp": "2025-01-06 03:42:32"
+}
\ No newline at end of file
diff --git a/examples/assorted_checks/benchmarks/output_data/gpu_benchmark_results_rtf.json b/examples/assorted_checks/benchmarks/output_data/gpu_benchmark_results_rtf.json
index ccac37e..89db38c 100644
--- a/examples/assorted_checks/benchmarks/output_data/gpu_benchmark_results_rtf.json
+++ b/examples/assorted_checks/benchmarks/output_data/gpu_benchmark_results_rtf.json
@@ -2,1252 +2,616 @@
"results": [
{
"tokens": 150,
- "processing_time": 1.86,
+ "processing_time": 2.36,
"output_length": 45.9,
- "rtf": 0.04,
- "elapsed_time": 1.92
+ "rtf": 0.05,
+ "elapsed_time": 2.44626
},
{
"tokens": 300,
- "processing_time": 3.08,
+ "processing_time": 4.94,
"output_length": 96.425,
- "rtf": 0.03,
- "elapsed_time": 5.06
+ "rtf": 0.05,
+ "elapsed_time": 7.46073
},
{
"tokens": 450,
- "processing_time": 4.4,
+ "processing_time": 8.94,
"output_length": 143.1,
- "rtf": 0.03,
- "elapsed_time": 9.53
+ "rtf": 0.06,
+ "elapsed_time": 16.55036
},
{
"tokens": 600,
- "processing_time": 6.47,
+ "processing_time": 19.78,
"output_length": 188.675,
- "rtf": 0.03,
- "elapsed_time": 16.06
+ "rtf": 0.1,
+ "elapsed_time": 36.69352
},
{
"tokens": 750,
- "processing_time": 8.32,
+ "processing_time": 19.89,
"output_length": 236.7,
- "rtf": 0.04,
- "elapsed_time": 24.45
+ "rtf": 0.08,
+ "elapsed_time": 56.77695
},
{
"tokens": 900,
- "processing_time": 8.92,
+ "processing_time": 16.83,
"output_length": 283.425,
- "rtf": 0.03,
- "elapsed_time": 33.45
- },
- {
- "tokens": 2000,
- "processing_time": 18.55,
- "output_length": 624.325,
- "rtf": 0.03,
- "elapsed_time": 52.14
- },
- {
- "tokens": 3000,
- "processing_time": 23.98,
- "output_length": 931.15,
- "rtf": 0.03,
- "elapsed_time": 76.32
- },
- {
- "tokens": 4000,
- "processing_time": 32.93,
- "output_length": 1222.1,
- "rtf": 0.03,
- "elapsed_time": 109.53
- },
- {
- "tokens": 5000,
- "processing_time": 45.39,
- "output_length": 1524.575,
- "rtf": 0.03,
- "elapsed_time": 155.23
+ "rtf": 0.06,
+ "elapsed_time": 73.8079
}
],
"system_metrics": [
{
- "timestamp": "2025-01-04T02:37:52.172368",
- "cpu_percent": 11.51,
- "ram_percent": 52.8,
- "ram_used_gb": 33.61172866821289,
- "gpu_memory_used": 3216.0,
- "relative_time": 0.08031892776489258
+ "timestamp": "2025-01-06T00:43:20.888295",
+ "cpu_percent": 36.92,
+ "ram_percent": 68.6,
+ "ram_used_gb": 43.6395263671875,
+ "gpu_memory_used": 7022.0,
+ "relative_time": 0.09646010398864746
},
- {
- "timestamp": "2025-01-04T02:37:53.266071",
- "cpu_percent": 15.33,
- "ram_percent": 52.9,
- "ram_used_gb": 33.678314208984375,
- "gpu_memory_used": 3392.0,
- "relative_time": 1.1673684120178223
+ {
+ "timestamp": "2025-01-06T00:43:21.983741",
+ "cpu_percent": 22.29,
+ "ram_percent": 68.6,
+ "ram_used_gb": 43.642677307128906,
+ "gpu_memory_used": 7021.0,
+ "relative_time": 1.1906661987304688
},
- {
- "timestamp": "2025-01-04T02:37:54.352909",
- "cpu_percent": 15.3,
- "ram_percent": 53.0,
- "ram_used_gb": 33.712764739990234,
- "gpu_memory_used": 3667.0,
- "relative_time": 2.253591537475586
+ {
+ "timestamp": "2025-01-06T00:43:23.078293",
+ "cpu_percent": 27.39,
+ "ram_percent": 68.6,
+ "ram_used_gb": 43.61421203613281,
+ "gpu_memory_used": 7190.0,
+ "relative_time": 2.264479160308838
},
- {
- "timestamp": "2025-01-04T02:37:55.439413",
- "cpu_percent": 23.51,
- "ram_percent": 52.7,
- "ram_used_gb": 33.49789810180664,
- "gpu_memory_used": 3662.0,
- "relative_time": 3.3292760848999023
+ {
+ "timestamp": "2025-01-06T00:43:24.151445",
+ "cpu_percent": 20.28,
+ "ram_percent": 68.6,
+ "ram_used_gb": 43.65406036376953,
+ "gpu_memory_used": 7193.0,
+ "relative_time": 3.349093198776245
},
- {
- "timestamp": "2025-01-04T02:37:56.511211",
- "cpu_percent": 14.69,
- "ram_percent": 52.7,
- "ram_used_gb": 33.494102478027344,
- "gpu_memory_used": 3668.0,
- "relative_time": 4.397106885910034
+ {
+ "timestamp": "2025-01-06T00:43:25.237021",
+ "cpu_percent": 23.03,
+ "ram_percent": 68.6,
+ "ram_used_gb": 43.647274017333984,
+ "gpu_memory_used": 7191.0,
+ "relative_time": 4.413560628890991
},
- {
- "timestamp": "2025-01-04T02:37:57.582176",
- "cpu_percent": 11.01,
- "ram_percent": 52.8,
- "ram_used_gb": 33.564491271972656,
- "gpu_memory_used": 3665.0,
- "relative_time": 5.46670126914978
+ {
+ "timestamp": "2025-01-06T00:43:26.300255",
+ "cpu_percent": 23.62,
+ "ram_percent": 68.6,
+ "ram_used_gb": 43.642295837402344,
+ "gpu_memory_used": 7185.0,
+ "relative_time": 5.484973430633545
},
- {
- "timestamp": "2025-01-04T02:37:58.637969",
- "cpu_percent": 15.04,
- "ram_percent": 52.8,
- "ram_used_gb": 33.555362701416016,
- "gpu_memory_used": 3668.0,
- "relative_time": 6.523184061050415
+ {
+ "timestamp": "2025-01-06T00:43:27.377319",
+ "cpu_percent": 46.04,
+ "ram_percent": 68.7,
+ "ram_used_gb": 43.7291374206543,
+ "gpu_memory_used": 7178.0,
+ "relative_time": 6.658120632171631
},
- {
- "timestamp": "2025-01-04T02:37:59.700880",
- "cpu_percent": 13.32,
- "ram_percent": 52.8,
- "ram_used_gb": 33.559967041015625,
- "gpu_memory_used": 3668.0,
- "relative_time": 7.589032888412476
+ {
+ "timestamp": "2025-01-06T00:43:28.546053",
+ "cpu_percent": 29.79,
+ "ram_percent": 68.7,
+ "ram_used_gb": 43.73202133178711,
+ "gpu_memory_used": 7177.0,
+ "relative_time": 7.725939035415649
},
- {
- "timestamp": "2025-01-04T02:38:00.773895",
- "cpu_percent": 12.45,
- "ram_percent": 52.8,
- "ram_used_gb": 33.609134674072266,
- "gpu_memory_used": 3667.0,
- "relative_time": 8.677486181259155
+ {
+ "timestamp": "2025-01-06T00:43:29.613327",
+ "cpu_percent": 18.19,
+ "ram_percent": 68.8,
+ "ram_used_gb": 43.791343688964844,
+ "gpu_memory_used": 7177.0,
+ "relative_time": 8.800285577774048
},
- {
- "timestamp": "2025-01-04T02:38:01.851195",
- "cpu_percent": 12.62,
- "ram_percent": 52.9,
- "ram_used_gb": 33.67635726928711,
- "gpu_memory_used": 3665.0,
- "relative_time": 9.734971046447754
+ {
+ "timestamp": "2025-01-06T00:43:30.689097",
+ "cpu_percent": 22.29,
+ "ram_percent": 68.9,
+ "ram_used_gb": 43.81514358520508,
+ "gpu_memory_used": 7176.0,
+ "relative_time": 9.899119853973389
},
- {
- "timestamp": "2025-01-04T02:38:02.907897",
- "cpu_percent": 20.61,
- "ram_percent": 53.0,
- "ram_used_gb": 33.72555160522461,
- "gpu_memory_used": 3660.0,
- "relative_time": 10.813292026519775
+ {
+ "timestamp": "2025-01-06T00:43:31.786443",
+ "cpu_percent": 32.59,
+ "ram_percent": 68.9,
+ "ram_used_gb": 43.834510803222656,
+ "gpu_memory_used": 7189.0,
+ "relative_time": 11.042734384536743
},
- {
- "timestamp": "2025-01-04T02:38:03.996322",
- "cpu_percent": 33.24,
- "ram_percent": 53.2,
- "ram_used_gb": 33.832088470458984,
- "gpu_memory_used": 3660.0,
- "relative_time": 11.917856454849243
+ {
+ "timestamp": "2025-01-06T00:43:32.929720",
+ "cpu_percent": 42.48,
+ "ram_percent": 68.8,
+ "ram_used_gb": 43.77507019042969,
+ "gpu_memory_used": 7192.0,
+ "relative_time": 12.117269277572632
},
- {
- "timestamp": "2025-01-04T02:38:05.101973",
- "cpu_percent": 14.24,
- "ram_percent": 53.0,
- "ram_used_gb": 33.7408447265625,
- "gpu_memory_used": 3662.0,
- "relative_time": 12.986546277999878
+ {
+ "timestamp": "2025-01-06T00:43:34.004481",
+ "cpu_percent": 26.33,
+ "ram_percent": 68.8,
+ "ram_used_gb": 43.77891159057617,
+ "gpu_memory_used": 7192.0,
+ "relative_time": 13.19830870628357
},
{
- "timestamp": "2025-01-04T02:38:06.162037",
- "cpu_percent": 14.38,
- "ram_percent": 53.1,
- "ram_used_gb": 33.774169921875,
- "gpu_memory_used": 3662.0,
- "relative_time": 14.062608242034912
+ "timestamp": "2025-01-06T00:43:35.086024",
+ "cpu_percent": 26.53,
+ "ram_percent": 68.8,
+ "ram_used_gb": 43.77515411376953,
+ "gpu_memory_used": 7192.0,
+ "relative_time": 14.29457426071167
},
{
- "timestamp": "2025-01-04T02:38:07.248210",
- "cpu_percent": 14.39,
- "ram_percent": 53.2,
- "ram_used_gb": 33.83738327026367,
- "gpu_memory_used": 4029.0,
- "relative_time": 15.156044960021973
+ "timestamp": "2025-01-06T00:43:36.183496",
+ "cpu_percent": 40.33,
+ "ram_percent": 68.9,
+ "ram_used_gb": 43.81095886230469,
+ "gpu_memory_used": 7192.0,
+ "relative_time": 15.402768850326538
},
{
- "timestamp": "2025-01-04T02:38:08.329582",
- "cpu_percent": 31.18,
- "ram_percent": 53.2,
- "ram_used_gb": 33.87126541137695,
- "gpu_memory_used": 4032.0,
- "relative_time": 16.249940395355225
+ "timestamp": "2025-01-06T00:43:37.290635",
+ "cpu_percent": 43.6,
+ "ram_percent": 69.0,
+ "ram_used_gb": 43.87236022949219,
+ "gpu_memory_used": 7190.0,
+ "relative_time": 16.574281930923462
},
{
- "timestamp": "2025-01-04T02:38:09.432992",
- "cpu_percent": 19.33,
- "ram_percent": 53.2,
- "ram_used_gb": 33.842403411865234,
- "gpu_memory_used": 4032.0,
- "relative_time": 17.331223011016846
+ "timestamp": "2025-01-06T00:43:38.462164",
+ "cpu_percent": 85.74,
+ "ram_percent": 69.0,
+ "ram_used_gb": 43.864280700683594,
+ "gpu_memory_used": 6953.0,
+ "relative_time": 17.66074824333191
},
{
- "timestamp": "2025-01-04T02:38:10.505101",
- "cpu_percent": 13.34,
- "ram_percent": 53.2,
- "ram_used_gb": 33.86738967895508,
- "gpu_memory_used": 4029.0,
- "relative_time": 18.390397548675537
+ "timestamp": "2025-01-06T00:43:39.548295",
+ "cpu_percent": 23.88,
+ "ram_percent": 68.8,
+ "ram_used_gb": 43.75236129760742,
+ "gpu_memory_used": 4722.0,
+ "relative_time": 18.739423036575317
},
{
- "timestamp": "2025-01-04T02:38:11.570033",
- "cpu_percent": 12.61,
- "ram_percent": 53.4,
- "ram_used_gb": 33.938289642333984,
- "gpu_memory_used": 4028.0,
- "relative_time": 19.477521181106567
+ "timestamp": "2025-01-06T00:43:40.626692",
+ "cpu_percent": 59.24,
+ "ram_percent": 68.7,
+ "ram_used_gb": 43.720741271972656,
+ "gpu_memory_used": 4723.0,
+ "relative_time": 19.846031665802002
},
{
- "timestamp": "2025-01-04T02:38:12.663780",
- "cpu_percent": 15.78,
- "ram_percent": 53.4,
- "ram_used_gb": 33.969398498535156,
- "gpu_memory_used": 4030.0,
- "relative_time": 20.57425808906555
+ "timestamp": "2025-01-06T00:43:41.733597",
+ "cpu_percent": 41.74,
+ "ram_percent": 68.4,
+ "ram_used_gb": 43.53546142578125,
+ "gpu_memory_used": 4722.0,
+ "relative_time": 20.920310020446777
},
{
- "timestamp": "2025-01-04T02:38:13.750065",
- "cpu_percent": 18.69,
- "ram_percent": 53.5,
- "ram_used_gb": 34.03954315185547,
- "gpu_memory_used": 4021.0,
- "relative_time": 21.652076244354248
+ "timestamp": "2025-01-06T00:43:42.808191",
+ "cpu_percent": 35.43,
+ "ram_percent": 68.3,
+ "ram_used_gb": 43.424468994140625,
+ "gpu_memory_used": 4726.0,
+ "relative_time": 22.00457763671875
},
{
- "timestamp": "2025-01-04T02:38:14.825318",
- "cpu_percent": 10.48,
- "ram_percent": 53.6,
- "ram_used_gb": 34.07048416137695,
- "gpu_memory_used": 4025.0,
- "relative_time": 22.73010230064392
+ "timestamp": "2025-01-06T00:43:43.891669",
+ "cpu_percent": 43.81,
+ "ram_percent": 68.2,
+ "ram_used_gb": 43.38311004638672,
+ "gpu_memory_used": 4727.0,
+ "relative_time": 23.08402943611145
},
{
- "timestamp": "2025-01-04T02:38:15.912340",
- "cpu_percent": 12.53,
- "ram_percent": 53.6,
- "ram_used_gb": 34.09389877319336,
- "gpu_memory_used": 4026.0,
- "relative_time": 23.81609869003296
+ "timestamp": "2025-01-06T00:43:44.971246",
+ "cpu_percent": 58.13,
+ "ram_percent": 68.0,
+ "ram_used_gb": 43.27970886230469,
+ "gpu_memory_used": 4731.0,
+ "relative_time": 24.249765396118164
},
{
- "timestamp": "2025-01-04T02:38:17.003329",
- "cpu_percent": 16.09,
- "ram_percent": 53.7,
- "ram_used_gb": 34.1781120300293,
- "gpu_memory_used": 4025.0,
- "relative_time": 24.90904140472412
+ "timestamp": "2025-01-06T00:43:46.137626",
+ "cpu_percent": 66.76,
+ "ram_percent": 68.0,
+ "ram_used_gb": 43.23844528198242,
+ "gpu_memory_used": 4731.0,
+ "relative_time": 25.32853865623474
},
{
- "timestamp": "2025-01-04T02:38:18.079837",
- "cpu_percent": 14.98,
- "ram_percent": 53.8,
- "ram_used_gb": 34.21260070800781,
- "gpu_memory_used": 4025.0,
- "relative_time": 25.986279249191284
+ "timestamp": "2025-01-06T00:43:47.219723",
+ "cpu_percent": 27.95,
+ "ram_percent": 67.8,
+ "ram_used_gb": 43.106136322021484,
+ "gpu_memory_used": 4734.0,
+ "relative_time": 26.499221563339233
},
{
- "timestamp": "2025-01-04T02:38:19.167635",
- "cpu_percent": 14.85,
- "ram_percent": 53.8,
- "ram_used_gb": 34.23923873901367,
- "gpu_memory_used": 4024.0,
- "relative_time": 27.076823234558105
+ "timestamp": "2025-01-06T00:43:48.386913",
+ "cpu_percent": 73.13,
+ "ram_percent": 67.7,
+ "ram_used_gb": 43.049781799316406,
+ "gpu_memory_used": 4736.0,
+ "relative_time": 27.592528104782104
},
{
- "timestamp": "2025-01-04T02:38:20.258141",
- "cpu_percent": 15.05,
- "ram_percent": 53.9,
- "ram_used_gb": 34.26483917236328,
- "gpu_memory_used": 4015.0,
- "relative_time": 28.144607067108154
+ "timestamp": "2025-01-06T00:43:49.480407",
+ "cpu_percent": 50.63,
+ "ram_percent": 67.6,
+ "ram_used_gb": 43.007415771484375,
+ "gpu_memory_used": 4736.0,
+ "relative_time": 28.711266040802002
},
{
- "timestamp": "2025-01-04T02:38:21.315694",
- "cpu_percent": 17.08,
- "ram_percent": 53.9,
- "ram_used_gb": 34.31473922729492,
- "gpu_memory_used": 4016.0,
- "relative_time": 29.20189356803894
+ "timestamp": "2025-01-06T00:43:50.599220",
+ "cpu_percent": 92.36,
+ "ram_percent": 67.5,
+ "ram_used_gb": 42.9685173034668,
+ "gpu_memory_used": 4728.0,
+ "relative_time": 29.916289567947388
},
{
- "timestamp": "2025-01-04T02:38:22.388259",
- "cpu_percent": 17.47,
- "ram_percent": 54.0,
- "ram_used_gb": 34.35490798950195,
- "gpu_memory_used": 4016.0,
- "relative_time": 30.28918957710266
+ "timestamp": "2025-01-06T00:43:51.803667",
+ "cpu_percent": 83.07,
+ "ram_percent": 67.5,
+ "ram_used_gb": 42.96232986450195,
+ "gpu_memory_used": 4724.0,
+ "relative_time": 31.039498805999756
},
{
- "timestamp": "2025-01-04T02:38:23.463469",
- "cpu_percent": 15.76,
- "ram_percent": 54.0,
- "ram_used_gb": 34.33717346191406,
- "gpu_memory_used": 4002.0,
- "relative_time": 31.364880561828613
+ "timestamp": "2025-01-06T00:43:52.927208",
+ "cpu_percent": 90.61,
+ "ram_percent": 67.5,
+ "ram_used_gb": 42.96202850341797,
+ "gpu_memory_used": 5037.0,
+ "relative_time": 32.2381911277771
},
{
- "timestamp": "2025-01-04T02:38:24.540334",
- "cpu_percent": 13.54,
- "ram_percent": 54.1,
- "ram_used_gb": 34.38197708129883,
- "gpu_memory_used": 3999.0,
- "relative_time": 32.4253191947937
+ "timestamp": "2025-01-06T00:43:54.128135",
+ "cpu_percent": 89.47,
+ "ram_percent": 67.5,
+ "ram_used_gb": 42.94692611694336,
+ "gpu_memory_used": 5085.0,
+ "relative_time": 33.35147500038147
},
{
- "timestamp": "2025-01-04T02:38:25.597934",
- "cpu_percent": 13.99,
- "ram_percent": 54.2,
- "ram_used_gb": 34.48365783691406,
- "gpu_memory_used": 4004.0,
- "relative_time": 33.50029754638672
+ "timestamp": "2025-01-06T00:43:55.238967",
+ "cpu_percent": 60.01,
+ "ram_percent": 67.4,
+ "ram_used_gb": 42.88222122192383,
+ "gpu_memory_used": 5085.0,
+ "relative_time": 34.455963373184204
},
{
- "timestamp": "2025-01-04T02:38:26.673108",
- "cpu_percent": 15.16,
- "ram_percent": 54.2,
- "ram_used_gb": 34.50083923339844,
- "gpu_memory_used": 4011.0,
- "relative_time": 34.5756139755249
+ "timestamp": "2025-01-06T00:43:56.344164",
+ "cpu_percent": 62.12,
+ "ram_percent": 67.3,
+ "ram_used_gb": 42.81411361694336,
+ "gpu_memory_used": 5083.0,
+ "relative_time": 35.549962282180786
},
{
- "timestamp": "2025-01-04T02:38:27.748147",
- "cpu_percent": 17.68,
- "ram_percent": 54.2,
- "ram_used_gb": 34.49884033203125,
- "gpu_memory_used": 4016.0,
- "relative_time": 35.650988817214966
+ "timestamp": "2025-01-06T00:43:57.437566",
+ "cpu_percent": 53.56,
+ "ram_percent": 67.3,
+ "ram_used_gb": 42.83011245727539,
+ "gpu_memory_used": 5078.0,
+ "relative_time": 36.66783380508423
},
{
- "timestamp": "2025-01-04T02:38:28.835603",
- "cpu_percent": 26.81,
- "ram_percent": 54.3,
- "ram_used_gb": 34.536773681640625,
- "gpu_memory_used": 4015.0,
- "relative_time": 36.73981595039368
+ "timestamp": "2025-01-06T00:43:58.554923",
+ "cpu_percent": 80.27,
+ "ram_percent": 67.3,
+ "ram_used_gb": 42.79304504394531,
+ "gpu_memory_used": 5069.0,
+ "relative_time": 37.77330660820007
},
{
- "timestamp": "2025-01-04T02:38:29.912604",
- "cpu_percent": 27.61,
- "ram_percent": 54.3,
- "ram_used_gb": 34.56916427612305,
- "gpu_memory_used": 4016.0,
- "relative_time": 37.81279993057251
+ "timestamp": "2025-01-06T00:43:59.660456",
+ "cpu_percent": 72.33,
+ "ram_percent": 67.2,
+ "ram_used_gb": 42.727474212646484,
+ "gpu_memory_used": 5079.0,
+ "relative_time": 38.885955810546875
},
{
- "timestamp": "2025-01-04T02:38:30.984988",
- "cpu_percent": 34.24,
- "ram_percent": 54.4,
- "ram_used_gb": 34.599365234375,
- "gpu_memory_used": 4014.0,
- "relative_time": 38.89973425865173
+ "timestamp": "2025-01-06T00:44:00.773867",
+ "cpu_percent": 59.29,
+ "ram_percent": 66.9,
+ "ram_used_gb": 42.566131591796875,
+ "gpu_memory_used": 5079.0,
+ "relative_time": 39.99704432487488
},
{
- "timestamp": "2025-01-04T02:38:32.071596",
- "cpu_percent": 31.95,
- "ram_percent": 54.2,
- "ram_used_gb": 34.46506881713867,
- "gpu_memory_used": 4014.0,
- "relative_time": 39.95902729034424
+ "timestamp": "2025-01-06T00:44:01.884399",
+ "cpu_percent": 43.52,
+ "ram_percent": 66.5,
+ "ram_used_gb": 42.32980728149414,
+ "gpu_memory_used": 5079.0,
+ "relative_time": 41.13008522987366
},
{
- "timestamp": "2025-01-04T02:38:33.140836",
- "cpu_percent": 27.78,
- "ram_percent": 54.3,
- "ram_used_gb": 34.51242446899414,
- "gpu_memory_used": 4014.0,
- "relative_time": 41.0454580783844
+ "timestamp": "2025-01-06T00:44:03.018905",
+ "cpu_percent": 84.46,
+ "ram_percent": 66.5,
+ "ram_used_gb": 42.28911590576172,
+ "gpu_memory_used": 5087.0,
+ "relative_time": 42.296770095825195
},
{
- "timestamp": "2025-01-04T02:38:34.229919",
- "cpu_percent": 21.09,
- "ram_percent": 54.3,
- "ram_used_gb": 34.513973236083984,
- "gpu_memory_used": 4011.0,
- "relative_time": 42.133435010910034
+ "timestamp": "2025-01-06T00:44:04.184606",
+ "cpu_percent": 88.27,
+ "ram_percent": 66.3,
+ "ram_used_gb": 42.16263961791992,
+ "gpu_memory_used": 5091.0,
+ "relative_time": 43.42832589149475
},
{
- "timestamp": "2025-01-04T02:38:35.317486",
- "cpu_percent": 17.26,
- "ram_percent": 53.9,
- "ram_used_gb": 34.3167839050293,
- "gpu_memory_used": 4020.0,
- "relative_time": 43.21739077568054
+ "timestamp": "2025-01-06T00:44:05.315967",
+ "cpu_percent": 80.91,
+ "ram_percent": 65.9,
+ "ram_used_gb": 41.9491081237793,
+ "gpu_memory_used": 5089.0,
+ "relative_time": 44.52496290206909
},
{
- "timestamp": "2025-01-04T02:38:36.394375",
- "cpu_percent": 12.32,
- "ram_percent": 54.0,
- "ram_used_gb": 34.34043884277344,
- "gpu_memory_used": 4020.0,
- "relative_time": 44.27889919281006
+ "timestamp": "2025-01-06T00:44:06.412298",
+ "cpu_percent": 41.68,
+ "ram_percent": 65.6,
+ "ram_used_gb": 41.72716522216797,
+ "gpu_memory_used": 5090.0,
+ "relative_time": 45.679444313049316
},
{
- "timestamp": "2025-01-04T02:38:37.454005",
- "cpu_percent": 12.46,
- "ram_percent": 54.0,
- "ram_used_gb": 34.37453842163086,
- "gpu_memory_used": 4020.0,
- "relative_time": 45.341508626937866
+ "timestamp": "2025-01-06T00:44:07.566964",
+ "cpu_percent": 73.02,
+ "ram_percent": 65.5,
+ "ram_used_gb": 41.64710998535156,
+ "gpu_memory_used": 5091.0,
+ "relative_time": 46.81710481643677
},
{
- "timestamp": "2025-01-04T02:38:38.515337",
- "cpu_percent": 14.16,
- "ram_percent": 54.1,
- "ram_used_gb": 34.401729583740234,
- "gpu_memory_used": 4019.0,
- "relative_time": 46.410696506500244
+ "timestamp": "2025-01-06T00:44:08.704786",
+ "cpu_percent": 75.38,
+ "ram_percent": 65.4,
+ "ram_used_gb": 41.59475326538086,
+ "gpu_memory_used": 5097.0,
+ "relative_time": 47.91444158554077
},
{
- "timestamp": "2025-01-04T02:38:39.593044",
- "cpu_percent": 13.71,
- "ram_percent": 54.1,
- "ram_used_gb": 34.435630798339844,
- "gpu_memory_used": 4019.0,
- "relative_time": 47.48556661605835
+ "timestamp": "2025-01-06T00:44:09.802745",
+ "cpu_percent": 42.21,
+ "ram_percent": 65.2,
+ "ram_used_gb": 41.45526885986328,
+ "gpu_memory_used": 5111.0,
+ "relative_time": 49.04095649719238
},
{
- "timestamp": "2025-01-04T02:38:40.665509",
- "cpu_percent": 13.17,
- "ram_percent": 54.2,
- "ram_used_gb": 34.49795150756836,
- "gpu_memory_used": 4016.0,
- "relative_time": 48.551952838897705
+ "timestamp": "2025-01-06T00:44:10.928231",
+ "cpu_percent": 65.65,
+ "ram_percent": 64.4,
+ "ram_used_gb": 40.93437957763672,
+ "gpu_memory_used": 5111.0,
+ "relative_time": 50.14311861991882
},
{
- "timestamp": "2025-01-04T02:38:41.724929",
- "cpu_percent": 12.67,
- "ram_percent": 54.3,
- "ram_used_gb": 34.52568054199219,
- "gpu_memory_used": 4011.0,
- "relative_time": 49.61591196060181
+ "timestamp": "2025-01-06T00:44:12.036249",
+ "cpu_percent": 28.51,
+ "ram_percent": 64.1,
+ "ram_used_gb": 40.749881744384766,
+ "gpu_memory_used": 5107.0,
+ "relative_time": 51.250269651412964
},
{
- "timestamp": "2025-01-04T02:38:42.801080",
- "cpu_percent": 12.83,
- "ram_percent": 54.4,
- "ram_used_gb": 34.579071044921875,
- "gpu_memory_used": 4007.0,
- "relative_time": 50.70357823371887
+ "timestamp": "2025-01-06T00:44:13.137586",
+ "cpu_percent": 52.99,
+ "ram_percent": 64.2,
+ "ram_used_gb": 40.84278869628906,
+ "gpu_memory_used": 5104.0,
+ "relative_time": 52.34805965423584
},
{
- "timestamp": "2025-01-04T02:38:43.884984",
- "cpu_percent": 12.31,
- "ram_percent": 54.4,
- "ram_used_gb": 34.59829330444336,
- "gpu_memory_used": 4003.0,
- "relative_time": 51.771891832351685
+ "timestamp": "2025-01-06T00:44:14.235248",
+ "cpu_percent": 34.55,
+ "ram_percent": 64.1,
+ "ram_used_gb": 40.7873420715332,
+ "gpu_memory_used": 5097.0,
+ "relative_time": 53.424301862716675
},
{
- "timestamp": "2025-01-04T02:38:44.957477",
- "cpu_percent": 12.58,
- "ram_percent": 54.7,
- "ram_used_gb": 34.76633071899414,
- "gpu_memory_used": 4003.0,
- "relative_time": 52.859192848205566
+ "timestamp": "2025-01-06T00:44:15.311386",
+ "cpu_percent": 39.07,
+ "ram_percent": 64.2,
+ "ram_used_gb": 40.860008239746094,
+ "gpu_memory_used": 5091.0,
+ "relative_time": 54.50679922103882
},
{
- "timestamp": "2025-01-04T02:38:46.031581",
- "cpu_percent": 14.48,
- "ram_percent": 54.6,
- "ram_used_gb": 34.76308059692383,
- "gpu_memory_used": 4013.0,
- "relative_time": 53.91648840904236
+ "timestamp": "2025-01-06T00:44:16.393626",
+ "cpu_percent": 31.02,
+ "ram_percent": 64.3,
+ "ram_used_gb": 40.884307861328125,
+ "gpu_memory_used": 5093.0,
+ "relative_time": 55.57431173324585
},
{
- "timestamp": "2025-01-04T02:38:47.091693",
- "cpu_percent": 14.35,
- "ram_percent": 54.7,
- "ram_used_gb": 34.81193923950195,
- "gpu_memory_used": 4013.0,
- "relative_time": 54.993882179260254
+ "timestamp": "2025-01-06T00:44:17.461449",
+ "cpu_percent": 24.53,
+ "ram_percent": 64.3,
+ "ram_used_gb": 40.89955520629883,
+ "gpu_memory_used": 5070.0,
+ "relative_time": 56.660638093948364
},
{
- "timestamp": "2025-01-04T02:38:48.178826",
- "cpu_percent": 16.46,
- "ram_percent": 54.7,
- "ram_used_gb": 34.784278869628906,
- "gpu_memory_used": 4014.0,
- "relative_time": 56.064146518707275
+ "timestamp": "2025-01-06T00:44:18.547558",
+ "cpu_percent": 19.93,
+ "ram_percent": 64.3,
+ "ram_used_gb": 40.92641830444336,
+ "gpu_memory_used": 5074.0,
+ "relative_time": 57.736456871032715
},
{
- "timestamp": "2025-01-04T02:38:49.235997",
- "cpu_percent": 12.84,
- "ram_percent": 54.7,
- "ram_used_gb": 34.79767608642578,
- "gpu_memory_used": 4014.0,
- "relative_time": 57.12374472618103
+ "timestamp": "2025-01-06T00:44:19.624478",
+ "cpu_percent": 15.63,
+ "ram_percent": 64.3,
+ "ram_used_gb": 40.92564392089844,
+ "gpu_memory_used": 5082.0,
+ "relative_time": 58.81701683998108
},
{
- "timestamp": "2025-01-04T02:38:50.295962",
- "cpu_percent": 15.69,
- "ram_percent": 54.8,
- "ram_used_gb": 34.8546257019043,
- "gpu_memory_used": 4013.0,
- "relative_time": 58.180296421051025
+ "timestamp": "2025-01-06T00:44:20.705184",
+ "cpu_percent": 29.86,
+ "ram_percent": 64.4,
+ "ram_used_gb": 40.935394287109375,
+ "gpu_memory_used": 5082.0,
+ "relative_time": 59.88701677322388
},
{
- "timestamp": "2025-01-04T02:38:51.357678",
- "cpu_percent": 14.54,
- "ram_percent": 54.8,
- "ram_used_gb": 34.8900260925293,
- "gpu_memory_used": 4014.0,
- "relative_time": 59.242270708084106
+ "timestamp": "2025-01-06T00:44:21.775463",
+ "cpu_percent": 43.55,
+ "ram_percent": 64.4,
+ "ram_used_gb": 40.9350471496582,
+ "gpu_memory_used": 5080.0,
+ "relative_time": 60.96005439758301
},
{
- "timestamp": "2025-01-04T02:38:52.415380",
- "cpu_percent": 14.74,
- "ram_percent": 54.9,
- "ram_used_gb": 34.92173767089844,
- "gpu_memory_used": 4014.0,
- "relative_time": 60.307114601135254
+ "timestamp": "2025-01-06T00:44:22.847939",
+ "cpu_percent": 26.66,
+ "ram_percent": 64.4,
+ "ram_used_gb": 40.94179916381836,
+ "gpu_memory_used": 5076.0,
+ "relative_time": 62.02673673629761
},
{
- "timestamp": "2025-01-04T02:38:53.490598",
- "cpu_percent": 13.82,
- "ram_percent": 55.1,
- "ram_used_gb": 35.028907775878906,
- "gpu_memory_used": 4014.0,
- "relative_time": 61.37576389312744
+ "timestamp": "2025-01-06T00:44:23.914337",
+ "cpu_percent": 22.46,
+ "ram_percent": 64.4,
+ "ram_used_gb": 40.9537467956543,
+ "gpu_memory_used": 5076.0,
+ "relative_time": 63.10581707954407
},
{
- "timestamp": "2025-01-04T02:38:54.548660",
- "cpu_percent": 11.31,
- "ram_percent": 55.1,
- "ram_used_gb": 35.05375289916992,
- "gpu_memory_used": 4011.0,
- "relative_time": 62.43392610549927
- },
- {
- "timestamp": "2025-01-04T02:38:55.609900",
- "cpu_percent": 14.35,
- "ram_percent": 55.1,
- "ram_used_gb": 35.03831100463867,
- "gpu_memory_used": 4011.0,
- "relative_time": 63.493370056152344
- },
- {
- "timestamp": "2025-01-04T02:38:56.666032",
- "cpu_percent": 13.11,
- "ram_percent": 55.1,
- "ram_used_gb": 35.07795333862305,
- "gpu_memory_used": 4011.0,
- "relative_time": 64.54955720901489
- },
- {
- "timestamp": "2025-01-04T02:38:57.730782",
- "cpu_percent": 16.01,
- "ram_percent": 55.2,
- "ram_used_gb": 35.11598587036133,
- "gpu_memory_used": 4007.0,
- "relative_time": 65.61445665359497
- },
- {
- "timestamp": "2025-01-04T02:38:58.787051",
- "cpu_percent": 13.68,
- "ram_percent": 55.3,
- "ram_used_gb": 35.15185546875,
- "gpu_memory_used": 4007.0,
- "relative_time": 66.67095923423767
- },
- {
- "timestamp": "2025-01-04T02:38:59.847606",
- "cpu_percent": 13.08,
- "ram_percent": 55.3,
- "ram_used_gb": 35.183753967285156,
- "gpu_memory_used": 4003.0,
- "relative_time": 67.73307466506958
- },
- {
- "timestamp": "2025-01-04T02:39:00.918871",
- "cpu_percent": 13.26,
- "ram_percent": 55.4,
- "ram_used_gb": 35.22275161743164,
- "gpu_memory_used": 4005.0,
- "relative_time": 68.80590057373047
- },
- {
- "timestamp": "2025-01-04T02:39:01.991170",
- "cpu_percent": 11.05,
- "ram_percent": 55.3,
- "ram_used_gb": 35.15507125854492,
- "gpu_memory_used": 4005.0,
- "relative_time": 69.8813705444336
- },
- {
- "timestamp": "2025-01-04T02:39:03.058942",
- "cpu_percent": 11.08,
- "ram_percent": 55.4,
- "ram_used_gb": 35.2095947265625,
- "gpu_memory_used": 4005.0,
- "relative_time": 70.94484400749207
- },
- {
- "timestamp": "2025-01-04T02:39:04.116159",
- "cpu_percent": 12.74,
- "ram_percent": 55.4,
- "ram_used_gb": 35.24392318725586,
- "gpu_memory_used": 4005.0,
- "relative_time": 72.00281810760498
- },
- {
- "timestamp": "2025-01-04T02:39:05.187410",
- "cpu_percent": 11.04,
- "ram_percent": 55.4,
- "ram_used_gb": 35.259830474853516,
- "gpu_memory_used": 4005.0,
- "relative_time": 73.07217526435852
- },
- {
- "timestamp": "2025-01-04T02:39:06.248588",
- "cpu_percent": 13.66,
- "ram_percent": 55.5,
- "ram_used_gb": 35.29854965209961,
- "gpu_memory_used": 4005.0,
- "relative_time": 74.13533973693848
- },
- {
- "timestamp": "2025-01-04T02:39:07.321179",
- "cpu_percent": 11.16,
- "ram_percent": 55.6,
- "ram_used_gb": 35.346981048583984,
- "gpu_memory_used": 4005.0,
- "relative_time": 75.18772435188293
- },
- {
- "timestamp": "2025-01-04T02:39:08.362496",
- "cpu_percent": 9.46,
- "ram_percent": 56.1,
- "ram_used_gb": 35.69393539428711,
- "gpu_memory_used": 4006.0,
- "relative_time": 76.25136637687683
- },
- {
- "timestamp": "2025-01-04T02:39:09.438218",
- "cpu_percent": 15.88,
- "ram_percent": 56.1,
- "ram_used_gb": 35.6658821105957,
- "gpu_memory_used": 4006.0,
- "relative_time": 77.32329249382019
- },
- {
- "timestamp": "2025-01-04T02:39:10.495653",
- "cpu_percent": 13.25,
- "ram_percent": 56.1,
- "ram_used_gb": 35.658119201660156,
- "gpu_memory_used": 4014.0,
- "relative_time": 78.38210940361023
- },
- {
- "timestamp": "2025-01-04T02:39:11.560094",
- "cpu_percent": 10.8,
- "ram_percent": 56.1,
- "ram_used_gb": 35.694610595703125,
- "gpu_memory_used": 4014.0,
- "relative_time": 79.44816374778748
- },
- {
- "timestamp": "2025-01-04T02:39:12.621879",
- "cpu_percent": 12.58,
- "ram_percent": 56.1,
- "ram_used_gb": 35.68545150756836,
- "gpu_memory_used": 4014.0,
- "relative_time": 80.51017951965332
- },
- {
- "timestamp": "2025-01-04T02:39:13.692834",
- "cpu_percent": 13.3,
- "ram_percent": 56.2,
- "ram_used_gb": 35.730979919433594,
- "gpu_memory_used": 4012.0,
- "relative_time": 81.57789969444275
- },
- {
- "timestamp": "2025-01-04T02:39:14.753401",
- "cpu_percent": 14.75,
- "ram_percent": 56.2,
- "ram_used_gb": 35.73103332519531,
- "gpu_memory_used": 4012.0,
- "relative_time": 82.63830900192261
- },
- {
- "timestamp": "2025-01-04T02:39:15.811385",
- "cpu_percent": 14.69,
- "ram_percent": 56.2,
- "ram_used_gb": 35.740108489990234,
- "gpu_memory_used": 4011.0,
- "relative_time": 83.69796371459961
- },
- {
- "timestamp": "2025-01-04T02:39:16.874197",
- "cpu_percent": 14.28,
- "ram_percent": 56.2,
- "ram_used_gb": 35.767982482910156,
- "gpu_memory_used": 4010.0,
- "relative_time": 84.76145887374878
- },
- {
- "timestamp": "2025-01-04T02:39:17.936944",
- "cpu_percent": 12.99,
- "ram_percent": 56.3,
- "ram_used_gb": 35.81233596801758,
- "gpu_memory_used": 4010.0,
- "relative_time": 85.84119439125061
- },
- {
- "timestamp": "2025-01-04T02:39:19.027320",
- "cpu_percent": 12.79,
- "ram_percent": 56.6,
- "ram_used_gb": 36.0085563659668,
- "gpu_memory_used": 4010.0,
- "relative_time": 86.91442775726318
- },
- {
- "timestamp": "2025-01-04T02:39:20.088804",
- "cpu_percent": 15.62,
- "ram_percent": 56.9,
- "ram_used_gb": 36.16616439819336,
- "gpu_memory_used": 4006.0,
- "relative_time": 88.00494360923767
- },
- {
- "timestamp": "2025-01-04T02:39:21.191796",
- "cpu_percent": 12.98,
- "ram_percent": 57.1,
- "ram_used_gb": 36.3217658996582,
- "gpu_memory_used": 4005.0,
- "relative_time": 89.12522411346436
- },
- {
- "timestamp": "2025-01-04T02:39:22.311508",
- "cpu_percent": 14.77,
- "ram_percent": 56.9,
- "ram_used_gb": 36.191429138183594,
- "gpu_memory_used": 4005.0,
- "relative_time": 90.19932198524475
- },
- {
- "timestamp": "2025-01-04T02:39:23.372871",
- "cpu_percent": 12.98,
- "ram_percent": 57.1,
- "ram_used_gb": 36.29658889770508,
- "gpu_memory_used": 4005.0,
- "relative_time": 91.3045928478241
- },
- {
- "timestamp": "2025-01-04T02:39:24.479087",
- "cpu_percent": 14.64,
- "ram_percent": 57.1,
- "ram_used_gb": 36.30413055419922,
- "gpu_memory_used": 3998.0,
- "relative_time": 92.36360597610474
- },
- {
- "timestamp": "2025-01-04T02:39:25.537969",
- "cpu_percent": 14.51,
- "ram_percent": 57.1,
- "ram_used_gb": 36.311763763427734,
- "gpu_memory_used": 3998.0,
- "relative_time": 93.42230415344238
- },
- {
- "timestamp": "2025-01-04T02:39:26.594967",
- "cpu_percent": 13.05,
- "ram_percent": 57.1,
- "ram_used_gb": 36.351402282714844,
- "gpu_memory_used": 3998.0,
- "relative_time": 94.47847175598145
- },
- {
- "timestamp": "2025-01-04T02:39:27.652223",
- "cpu_percent": 15.05,
- "ram_percent": 57.2,
- "ram_used_gb": 36.36949157714844,
- "gpu_memory_used": 4004.0,
- "relative_time": 95.53560948371887
- },
- {
- "timestamp": "2025-01-04T02:39:28.708008",
- "cpu_percent": 12.51,
- "ram_percent": 57.2,
- "ram_used_gb": 36.3841667175293,
- "gpu_memory_used": 4004.0,
- "relative_time": 96.59472155570984
- },
- {
- "timestamp": "2025-01-04T02:39:29.768866",
- "cpu_percent": 10.83,
- "ram_percent": 57.2,
- "ram_used_gb": 36.39939880371094,
- "gpu_memory_used": 4004.0,
- "relative_time": 97.6679356098175
- },
- {
- "timestamp": "2025-01-04T02:39:30.844295",
- "cpu_percent": 14.61,
- "ram_percent": 57.3,
- "ram_used_gb": 36.42519760131836,
- "gpu_memory_used": 4004.0,
- "relative_time": 98.74996089935303
- },
- {
- "timestamp": "2025-01-04T02:39:31.934080",
- "cpu_percent": 11.74,
- "ram_percent": 57.0,
- "ram_used_gb": 36.271087646484375,
- "gpu_memory_used": 4004.0,
- "relative_time": 99.81860518455505
- },
- {
- "timestamp": "2025-01-04T02:39:32.989954",
- "cpu_percent": 12.09,
- "ram_percent": 57.2,
- "ram_used_gb": 36.368350982666016,
- "gpu_memory_used": 4010.0,
- "relative_time": 100.87712931632996
- },
- {
- "timestamp": "2025-01-04T02:39:34.061411",
- "cpu_percent": 11.07,
- "ram_percent": 57.2,
- "ram_used_gb": 36.38072967529297,
- "gpu_memory_used": 4010.0,
- "relative_time": 101.946035861969
- },
- {
- "timestamp": "2025-01-04T02:39:35.117182",
- "cpu_percent": 9.32,
- "ram_percent": 57.2,
- "ram_used_gb": 36.367733001708984,
- "gpu_memory_used": 4415.0,
- "relative_time": 103.00355505943298
- },
- {
- "timestamp": "2025-01-04T02:39:36.179256",
- "cpu_percent": 12.93,
- "ram_percent": 57.2,
- "ram_used_gb": 36.396636962890625,
- "gpu_memory_used": 4417.0,
- "relative_time": 104.06347131729126
- },
- {
- "timestamp": "2025-01-04T02:39:37.237454",
- "cpu_percent": 10.94,
- "ram_percent": 57.3,
- "ram_used_gb": 36.429630279541016,
- "gpu_memory_used": 4417.0,
- "relative_time": 105.12580728530884
- },
- {
- "timestamp": "2025-01-04T02:39:38.310321",
- "cpu_percent": 12.86,
- "ram_percent": 57.3,
- "ram_used_gb": 36.44291305541992,
- "gpu_memory_used": 4418.0,
- "relative_time": 106.17753839492798
- },
- {
- "timestamp": "2025-01-04T02:39:39.355358",
- "cpu_percent": 10.82,
- "ram_percent": 57.3,
- "ram_used_gb": 36.46603012084961,
- "gpu_memory_used": 4418.0,
- "relative_time": 107.24251008033752
- },
- {
- "timestamp": "2025-01-04T02:39:40.413524",
- "cpu_percent": 14.64,
- "ram_percent": 57.4,
- "ram_used_gb": 36.507179260253906,
- "gpu_memory_used": 4418.0,
- "relative_time": 108.29774165153503
- },
- {
- "timestamp": "2025-01-04T02:39:41.482368",
- "cpu_percent": 10.03,
- "ram_percent": 58.1,
- "ram_used_gb": 36.93812942504883,
- "gpu_memory_used": 4418.0,
- "relative_time": 109.36836910247803
- },
- {
- "timestamp": "2025-01-04T02:39:42.546204",
- "cpu_percent": 12.63,
- "ram_percent": 58.0,
- "ram_used_gb": 36.87542724609375,
- "gpu_memory_used": 4418.0,
- "relative_time": 110.43055510520935
- },
- {
- "timestamp": "2025-01-04T02:39:43.604666",
- "cpu_percent": 14.14,
- "ram_percent": 58.0,
- "ram_used_gb": 36.875328063964844,
- "gpu_memory_used": 4426.0,
- "relative_time": 111.49229407310486
- },
- {
- "timestamp": "2025-01-04T02:39:44.664973",
- "cpu_percent": 10.64,
- "ram_percent": 58.0,
- "ram_used_gb": 36.88217544555664,
- "gpu_memory_used": 4425.0,
- "relative_time": 112.55481696128845
- },
- {
- "timestamp": "2025-01-04T02:39:45.741305",
- "cpu_percent": 13.92,
- "ram_percent": 57.9,
- "ram_used_gb": 36.85449981689453,
- "gpu_memory_used": 4425.0,
- "relative_time": 113.62504053115845
- },
- {
- "timestamp": "2025-01-04T02:39:46.799656",
- "cpu_percent": 13.15,
- "ram_percent": 58.0,
- "ram_used_gb": 36.870826721191406,
- "gpu_memory_used": 4423.0,
- "relative_time": 114.6845052242279
- },
- {
- "timestamp": "2025-01-04T02:39:47.859725",
- "cpu_percent": 13.85,
- "ram_percent": 58.0,
- "ram_used_gb": 36.870948791503906,
- "gpu_memory_used": 4423.0,
- "relative_time": 115.74664235115051
- },
- {
- "timestamp": "2025-01-04T02:39:48.919071",
- "cpu_percent": 14.59,
- "ram_percent": 58.0,
- "ram_used_gb": 36.886802673339844,
- "gpu_memory_used": 4422.0,
- "relative_time": 116.80267906188965
- },
- {
- "timestamp": "2025-01-04T02:39:49.976541",
- "cpu_percent": 16.56,
- "ram_percent": 58.0,
- "ram_used_gb": 36.90068435668945,
- "gpu_memory_used": 4422.0,
- "relative_time": 117.86520886421204
- },
- {
- "timestamp": "2025-01-04T02:39:51.036593",
- "cpu_percent": 10.33,
- "ram_percent": 58.1,
- "ram_used_gb": 36.96821212768555,
- "gpu_memory_used": 4416.0,
- "relative_time": 118.92232513427734
- },
- {
- "timestamp": "2025-01-04T02:39:52.098186",
- "cpu_percent": 15.23,
- "ram_percent": 58.1,
- "ram_used_gb": 36.96358108520508,
- "gpu_memory_used": 4416.0,
- "relative_time": 119.98378920555115
- },
- {
- "timestamp": "2025-01-04T02:39:53.168566",
- "cpu_percent": 11.96,
- "ram_percent": 58.2,
- "ram_used_gb": 37.00669479370117,
- "gpu_memory_used": 4416.0,
- "relative_time": 121.05223441123962
- },
- {
- "timestamp": "2025-01-04T02:39:54.230292",
- "cpu_percent": 13.03,
- "ram_percent": 58.2,
- "ram_used_gb": 37.031307220458984,
- "gpu_memory_used": 4416.0,
- "relative_time": 122.11563086509705
- },
- {
- "timestamp": "2025-01-04T02:39:55.287670",
- "cpu_percent": 14.93,
- "ram_percent": 58.2,
- "ram_used_gb": 37.045589447021484,
- "gpu_memory_used": 4416.0,
- "relative_time": 123.17237305641174
- },
- {
- "timestamp": "2025-01-04T02:39:56.349816",
- "cpu_percent": 12.91,
- "ram_percent": 58.3,
- "ram_used_gb": 37.072689056396484,
- "gpu_memory_used": 4416.0,
- "relative_time": 124.23265671730042
- },
- {
- "timestamp": "2025-01-04T02:39:57.409384",
- "cpu_percent": 11.35,
- "ram_percent": 58.3,
- "ram_used_gb": 37.10455322265625,
- "gpu_memory_used": 4416.0,
- "relative_time": 125.29221749305725
- },
- {
- "timestamp": "2025-01-04T02:39:58.464653",
- "cpu_percent": 12.97,
- "ram_percent": 58.4,
- "ram_used_gb": 37.12955093383789,
- "gpu_memory_used": 4416.0,
- "relative_time": 126.34849739074707
- },
- {
- "timestamp": "2025-01-04T02:39:59.521737",
- "cpu_percent": 11.69,
- "ram_percent": 58.4,
- "ram_used_gb": 37.139190673828125,
- "gpu_memory_used": 4416.0,
- "relative_time": 127.40602111816406
- },
- {
- "timestamp": "2025-01-04T02:40:00.581455",
- "cpu_percent": 12.86,
- "ram_percent": 58.5,
- "ram_used_gb": 37.204673767089844,
- "gpu_memory_used": 4418.0,
- "relative_time": 128.5798671245575
- },
- {
- "timestamp": "2025-01-04T02:40:01.760893",
- "cpu_percent": 81.59,
- "ram_percent": 58.1,
- "ram_used_gb": 36.97315216064453,
- "gpu_memory_used": 4425.0,
- "relative_time": 129.6643455028534
- },
- {
- "timestamp": "2025-01-04T02:40:02.850000",
- "cpu_percent": 14.55,
- "ram_percent": 58.2,
- "ram_used_gb": 37.0354118347168,
- "gpu_memory_used": 4435.0,
- "relative_time": 130.7529788017273
- },
- {
- "timestamp": "2025-01-04T02:40:03.934586",
- "cpu_percent": 13.16,
- "ram_percent": 58.2,
- "ram_used_gb": 37.03787612915039,
- "gpu_memory_used": 4437.0,
- "relative_time": 131.81812405586243
- },
- {
- "timestamp": "2025-01-04T02:40:04.989915",
- "cpu_percent": 11.0,
- "ram_percent": 58.3,
- "ram_used_gb": 37.09538650512695,
- "gpu_memory_used": 4437.0,
- "relative_time": 132.88244915008545
- },
- {
- "timestamp": "2025-01-04T02:40:06.067559",
- "cpu_percent": 32.79,
- "ram_percent": 58.3,
- "ram_used_gb": 37.07184982299805,
- "gpu_memory_used": 4437.0,
- "relative_time": 133.97513842582703
- },
- {
- "timestamp": "2025-01-04T02:40:07.156881",
- "cpu_percent": 22.34,
- "ram_percent": 58.3,
- "ram_used_gb": 37.07517623901367,
- "gpu_memory_used": 4438.0,
- "relative_time": 135.04176831245422
- },
- {
- "timestamp": "2025-01-04T02:40:08.212791",
- "cpu_percent": 14.86,
- "ram_percent": 58.3,
- "ram_used_gb": 37.08013153076172,
- "gpu_memory_used": 4438.0,
- "relative_time": 136.1427457332611
- },
- {
- "timestamp": "2025-01-04T02:40:09.317884",
- "cpu_percent": 21.55,
- "ram_percent": 58.4,
- "ram_used_gb": 37.144142150878906,
- "gpu_memory_used": 4447.0,
- "relative_time": 137.20455360412598
- },
- {
- "timestamp": "2025-01-04T02:40:10.390292",
- "cpu_percent": 26.97,
- "ram_percent": 58.4,
- "ram_used_gb": 37.141868591308594,
- "gpu_memory_used": 4454.0,
- "relative_time": 138.2930736541748
- },
- {
- "timestamp": "2025-01-04T02:40:11.464548",
- "cpu_percent": 12.21,
- "ram_percent": 58.5,
- "ram_used_gb": 37.205867767333984,
- "gpu_memory_used": 4451.0,
- "relative_time": 139.35198616981506
- },
- {
- "timestamp": "2025-01-04T02:40:12.537687",
- "cpu_percent": 13.14,
- "ram_percent": 58.5,
- "ram_used_gb": 37.233299255371094,
- "gpu_memory_used": 4452.0,
- "relative_time": 140.4236707687378
- },
- {
- "timestamp": "2025-01-04T02:40:13.608158",
- "cpu_percent": 13.93,
- "ram_percent": 58.6,
- "ram_used_gb": 37.25624465942383,
- "gpu_memory_used": 4452.0,
- "relative_time": 141.4932518005371
- },
- {
- "timestamp": "2025-01-04T02:40:14.668199",
- "cpu_percent": 15.76,
- "ram_percent": 58.6,
- "ram_used_gb": 37.278499603271484,
- "gpu_memory_used": 4452.0,
- "relative_time": 142.57055759429932
- },
- {
- "timestamp": "2025-01-04T02:40:15.754506",
- "cpu_percent": 14.59,
- "ram_percent": 58.5,
- "ram_used_gb": 37.21017837524414,
- "gpu_memory_used": 4451.0,
- "relative_time": 143.64187097549438
- },
+ "timestamp": "2025-01-06T00:44:24.993313",
+ "cpu_percent": 28.07,
+ "ram_percent": 64.4,
+ "ram_used_gb": 40.94577407836914,
+ "gpu_memory_used": 5076.0,
+ "relative_time": 64.18998432159424
+ },
{
- "timestamp": "2025-01-04T02:40:16.827392",
- "cpu_percent": 17.55,
- "ram_percent": 58.4,
- "ram_used_gb": 37.1205940246582,
- "gpu_memory_used": 4450.0,
- "relative_time": 144.75147438049316
- },
+ "timestamp": "2025-01-06T00:44:26.077028",
+ "cpu_percent": 26.1,
+ "ram_percent": 64.4,
+ "ram_used_gb": 40.98012161254883,
+ "gpu_memory_used": 5197.0,
+ "relative_time": 65.28782486915588
+ },
{
- "timestamp": "2025-01-04T02:40:17.929858",
- "cpu_percent": 55.09,
- "ram_percent": 58.2,
- "ram_used_gb": 37.04216766357422,
- "gpu_memory_used": 4449.0,
- "relative_time": 145.815936088562
- },
+ "timestamp": "2025-01-06T00:44:27.175228",
+ "cpu_percent": 35.17,
+ "ram_percent": 64.6,
+ "ram_used_gb": 41.0831184387207,
+ "gpu_memory_used": 5422.0,
+ "relative_time": 66.37566781044006
+ },
{
- "timestamp": "2025-01-04T02:40:18.988009",
- "cpu_percent": 13.92,
- "ram_percent": 58.0,
- "ram_used_gb": 36.90068054199219,
- "gpu_memory_used": 4447.0,
- "relative_time": 146.8880341053009
- },
+ "timestamp": "2025-01-06T00:44:28.265025",
+ "cpu_percent": 55.14,
+ "ram_percent": 64.9,
+ "ram_used_gb": 41.25740432739258,
+ "gpu_memory_used": 5512.0,
+ "relative_time": 67.48023676872253
+ },
{
- "timestamp": "2025-01-04T02:40:20.062567",
- "cpu_percent": 17.42,
- "ram_percent": 57.7,
- "ram_used_gb": 36.69455337524414,
- "gpu_memory_used": 4446.0,
- "relative_time": 147.96440315246582
- },
+ "timestamp": "2025-01-06T00:44:29.367776",
+ "cpu_percent": 53.84,
+ "ram_percent": 65.0,
+ "ram_used_gb": 41.36682891845703,
+ "gpu_memory_used": 5616.0,
+ "relative_time": 68.57096815109253
+ },
{
- "timestamp": "2025-01-04T02:40:21.149129",
- "cpu_percent": 14.78,
- "ram_percent": 57.4,
- "ram_used_gb": 36.50687789916992,
- "gpu_memory_used": 4445.0,
- "relative_time": 149.041100025177
- },
+ "timestamp": "2025-01-06T00:44:30.458301",
+ "cpu_percent": 33.42,
+ "ram_percent": 65.3,
+ "ram_used_gb": 41.5602912902832,
+ "gpu_memory_used": 5724.0,
+ "relative_time": 69.66709041595459
+ },
{
- "timestamp": "2025-01-04T02:40:22.221780",
- "cpu_percent": 11.1,
- "ram_percent": 57.0,
- "ram_used_gb": 36.28267288208008,
- "gpu_memory_used": 4438.0,
- "relative_time": 150.125506401062
- },
+ "timestamp": "2025-01-06T00:44:31.554329",
+ "cpu_percent": 50.81,
+ "ram_percent": 65.5,
+ "ram_used_gb": 41.66044616699219,
+ "gpu_memory_used": 5827.0,
+ "relative_time": 70.75874853134155
+ },
{
- "timestamp": "2025-01-04T02:40:23.308492",
- "cpu_percent": 12.21,
- "ram_percent": 56.7,
- "ram_used_gb": 36.036773681640625,
- "gpu_memory_used": 4436.0,
- "relative_time": 151.19524502754211
- },
+ "timestamp": "2025-01-06T00:44:32.646414",
+ "cpu_percent": 34.34,
+ "ram_percent": 65.6,
+ "ram_used_gb": 41.739715576171875,
+ "gpu_memory_used": 5843.0,
+ "relative_time": 71.86718988418579
+ },
{
- "timestamp": "2025-01-04T02:40:24.381177",
- "cpu_percent": 13.79,
- "ram_percent": 56.3,
- "ram_used_gb": 35.83684539794922,
- "gpu_memory_used": 4436.0,
- "relative_time": 152.26534175872803
- },
+ "timestamp": "2025-01-06T00:44:33.754223",
+ "cpu_percent": 44.32,
+ "ram_percent": 66.0,
+ "ram_used_gb": 42.005794525146484,
+ "gpu_memory_used": 5901.0,
+ "relative_time": 72.95793795585632
+ },
{
- "timestamp": "2025-01-04T02:40:25.452457",
- "cpu_percent": 12.28,
- "ram_percent": 56.4,
- "ram_used_gb": 35.848087310791016,
- "gpu_memory_used": 4436.0,
- "relative_time": 153.33880996704102
- },
+ "timestamp": "2025-01-06T00:44:34.848852",
+ "cpu_percent": 48.36,
+ "ram_percent": 66.5,
+ "ram_used_gb": 42.3160514831543,
+ "gpu_memory_used": 5924.0,
+ "relative_time": 74.35109186172485
+ },
{
- "timestamp": "2025-01-04T02:40:26.521613",
- "cpu_percent": 12.52,
- "ram_percent": 56.8,
- "ram_used_gb": 36.1606330871582,
- "gpu_memory_used": 4440.0,
- "relative_time": 154.40920901298523
- },
+ "timestamp": "2025-01-06T00:44:36.240235",
+ "cpu_percent": 58.06,
+ "ram_percent": 67.5,
+ "ram_used_gb": 42.95722198486328,
+ "gpu_memory_used": 5930.0,
+ "relative_time": 75.47581958770752
+ },
{
- "timestamp": "2025-01-04T02:40:27.587547",
- "cpu_percent": 7.94,
- "ram_percent": 57.2,
- "ram_used_gb": 36.37208557128906,
- "gpu_memory_used": 4440.0,
- "relative_time": 155.46942234039307
- },
+ "timestamp": "2025-01-06T00:44:37.363208",
+ "cpu_percent": 46.82,
+ "ram_percent": 67.6,
+ "ram_used_gb": 42.97764587402344,
+ "gpu_memory_used": 6364.0,
+ "relative_time": 76.58708119392395
+ },
{
- "timestamp": "2025-01-04T02:40:28.647400",
- "cpu_percent": 8.85,
- "ram_percent": 57.3,
- "ram_used_gb": 36.470054626464844,
- "gpu_memory_used": 4440.0,
- "relative_time": 156.53129720687866
+ "timestamp": "2025-01-06T00:44:38.474408",
+ "cpu_percent": 50.93,
+ "ram_percent": 67.9,
+ "ram_used_gb": 43.1597900390625,
+ "gpu_memory_used": 6426.0,
+ "relative_time": 77.6842532157898
}
],
- "test_duration": 159.19756031036377
+ "test_duration": 82.49591493606567
}
\ No newline at end of file
diff --git a/examples/assorted_checks/benchmarks/output_data/gpu_benchmark_stats_rtf.txt b/examples/assorted_checks/benchmarks/output_data/gpu_benchmark_stats_rtf.txt
index cb2df6a..7876062 100644
--- a/examples/assorted_checks/benchmarks/output_data/gpu_benchmark_stats_rtf.txt
+++ b/examples/assorted_checks/benchmarks/output_data/gpu_benchmark_stats_rtf.txt
@@ -1,23 +1,23 @@
=== Benchmark Statistics (with correct RTF) ===
-Total tokens processed: 17150
-Total audio generated (s): 5296.38
-Total test duration (s): 155.23
-Average processing rate (tokens/s): 102.86
-Average RTF: 0.03
-Average Real Time Speed: 31.25
+Total tokens processed: 3150
+Total audio generated (s): 994.22
+Total test duration (s): 73.81
+Average processing rate (tokens/s): 49.36
+Average RTF: 0.07
+Average Real Time Speed: 15.00
=== Per-chunk Stats ===
-Average chunk size (tokens): 1715.00
+Average chunk size (tokens): 525.00
Min chunk size (tokens): 150
-Max chunk size (tokens): 5000
-Average processing time (s): 15.39
-Average output length (s): 529.64
+Max chunk size (tokens): 900
+Average processing time (s): 12.12
+Average output length (s): 165.70
=== Performance Ranges ===
-Processing rate range (tokens/s): 80.65 - 125.10
-RTF range: 0.03x - 0.04x
-Real Time Speed range: 25.00x - 33.33x
+Processing rate range (tokens/s): 30.33 - 63.56
+RTF range: 0.05x - 0.10x
+Real Time Speed range: 10.00x - 20.00x
diff --git a/examples/assorted_checks/benchmarks/output_plots/cpu_processing_time_rtf.png b/examples/assorted_checks/benchmarks/output_plots/cpu_processing_time_rtf.png
index 339c896..2317c52 100644
Binary files a/examples/assorted_checks/benchmarks/output_plots/cpu_processing_time_rtf.png and b/examples/assorted_checks/benchmarks/output_plots/cpu_processing_time_rtf.png differ
diff --git a/examples/assorted_checks/benchmarks/output_plots/cpu_realtime_factor_rtf.png b/examples/assorted_checks/benchmarks/output_plots/cpu_realtime_factor_rtf.png
index 3e5c8d1..919c53b 100644
Binary files a/examples/assorted_checks/benchmarks/output_plots/cpu_realtime_factor_rtf.png and b/examples/assorted_checks/benchmarks/output_plots/cpu_realtime_factor_rtf.png differ
diff --git a/examples/assorted_checks/benchmarks/output_plots/cpu_system_usage_rtf.png b/examples/assorted_checks/benchmarks/output_plots/cpu_system_usage_rtf.png
index e209978..5143bda 100644
Binary files a/examples/assorted_checks/benchmarks/output_plots/cpu_system_usage_rtf.png and b/examples/assorted_checks/benchmarks/output_plots/cpu_system_usage_rtf.png differ
diff --git a/examples/assorted_checks/benchmarks/output_plots/first_token_latency_stream.png b/examples/assorted_checks/benchmarks/output_plots/first_token_latency_stream.png
new file mode 100644
index 0000000..75c1a19
Binary files /dev/null and b/examples/assorted_checks/benchmarks/output_plots/first_token_latency_stream.png differ
diff --git a/examples/assorted_checks/benchmarks/output_plots/first_token_latency_stream_openai.png b/examples/assorted_checks/benchmarks/output_plots/first_token_latency_stream_openai.png
new file mode 100644
index 0000000..c78f28c
Binary files /dev/null and b/examples/assorted_checks/benchmarks/output_plots/first_token_latency_stream_openai.png differ
diff --git a/examples/assorted_checks/benchmarks/output_plots/first_token_timeline_stream.png b/examples/assorted_checks/benchmarks/output_plots/first_token_timeline_stream.png
new file mode 100644
index 0000000..47ac017
Binary files /dev/null and b/examples/assorted_checks/benchmarks/output_plots/first_token_timeline_stream.png differ
diff --git a/examples/assorted_checks/benchmarks/output_plots/first_token_timeline_stream_openai.png b/examples/assorted_checks/benchmarks/output_plots/first_token_timeline_stream_openai.png
new file mode 100644
index 0000000..64f2208
Binary files /dev/null and b/examples/assorted_checks/benchmarks/output_plots/first_token_timeline_stream_openai.png differ
diff --git a/examples/assorted_checks/benchmarks/output_plots/format_comparison.png b/examples/assorted_checks/benchmarks/output_plots/format_comparison.png
deleted file mode 100644
index 95ac515..0000000
Binary files a/examples/assorted_checks/benchmarks/output_plots/format_comparison.png and /dev/null differ
diff --git a/examples/assorted_checks/benchmarks/output_plots/gpu_processing_time_rtf.png b/examples/assorted_checks/benchmarks/output_plots/gpu_processing_time_rtf.png
index 62c6864..ebf7bcf 100644
Binary files a/examples/assorted_checks/benchmarks/output_plots/gpu_processing_time_rtf.png and b/examples/assorted_checks/benchmarks/output_plots/gpu_processing_time_rtf.png differ
diff --git a/examples/assorted_checks/benchmarks/output_plots/gpu_realtime_factor_rtf.png b/examples/assorted_checks/benchmarks/output_plots/gpu_realtime_factor_rtf.png
index 1c5d7b7..bcdacda 100644
Binary files a/examples/assorted_checks/benchmarks/output_plots/gpu_realtime_factor_rtf.png and b/examples/assorted_checks/benchmarks/output_plots/gpu_realtime_factor_rtf.png differ
diff --git a/examples/assorted_checks/benchmarks/output_plots/gpu_system_usage_rtf.png b/examples/assorted_checks/benchmarks/output_plots/gpu_system_usage_rtf.png
index 942b3a8..a6a6ea5 100644
Binary files a/examples/assorted_checks/benchmarks/output_plots/gpu_system_usage_rtf.png and b/examples/assorted_checks/benchmarks/output_plots/gpu_system_usage_rtf.png differ
diff --git a/examples/assorted_checks/benchmarks/output_plots/gpu_usage.png b/examples/assorted_checks/benchmarks/output_plots/gpu_usage.png
deleted file mode 100644
index 1bc44dc..0000000
Binary files a/examples/assorted_checks/benchmarks/output_plots/gpu_usage.png and /dev/null differ
diff --git a/examples/assorted_checks/benchmarks/output_plots/total_time_latency_stream.png b/examples/assorted_checks/benchmarks/output_plots/total_time_latency_stream.png
new file mode 100644
index 0000000..4e94aaa
Binary files /dev/null and b/examples/assorted_checks/benchmarks/output_plots/total_time_latency_stream.png differ
diff --git a/examples/assorted_checks/benchmarks/output_plots/total_time_latency_stream_openai.png b/examples/assorted_checks/benchmarks/output_plots/total_time_latency_stream_openai.png
new file mode 100644
index 0000000..7730ced
Binary files /dev/null and b/examples/assorted_checks/benchmarks/output_plots/total_time_latency_stream_openai.png differ
diff --git a/examples/assorted_checks/generate_readme_plots.py b/examples/assorted_checks/generate_readme_plots.py
new file mode 100644
index 0000000..a6e5da2
--- /dev/null
+++ b/examples/assorted_checks/generate_readme_plots.py
@@ -0,0 +1,198 @@
+#!/usr/bin/env python3
+"""Script to generate all plots needed for the README."""
+
+import os
+import sys
+import shutil
+from pathlib import Path
+
+from validate_wav import validate_tts
+
+# Get absolute paths
+script_dir = Path(__file__).parent.resolve()
+project_root = script_dir.parent.parent
+
+# Add directories to Python path for imports
+sys.path.append(str(script_dir))
+sys.path.append(str(script_dir / "benchmarks"))
+
+# Import test scripts
+from benchmark_tts_rtf import main as benchmark_rtf
+from test_formats.test_audio_formats import main as test_formats
+from benchmark_first_token_stream_unified import main as benchmark_stream
+from test_combinations.test_analyze_combined_voices import main as test_voice_analysis
+
+# Remove directories from path after imports
+sys.path.remove(str(script_dir))
+sys.path.remove(str(script_dir / "benchmarks"))
+
+
+def ensure_assets_dir():
+ """Create assets directory if it doesn't exist."""
+ assets_dir = project_root / "assets"
+ assets_dir.mkdir(exist_ok=True)
+ return assets_dir
+
+
+def copy_plot(src_path: str, dest_name: str, assets_dir: Path):
+ """Copy a plot to the assets directory with a new name."""
+ if os.path.exists(src_path):
+ shutil.copy2(src_path, assets_dir / dest_name)
+ print(f"Copied {src_path} to {assets_dir / dest_name}")
+ else:
+ print(f"Warning: Source plot not found at {src_path}")
+
+
+def validate_and_print(wav_path: str, category: str):
+ """Validate a WAV file and print results."""
+ if not os.path.exists(wav_path):
+ print(f"Warning: WAV file not found at {wav_path}")
+ return
+
+ print(f"\n=== Validating {category} Audio ===")
+ result = validate_tts(wav_path)
+
+ if "error" in result:
+ print(f"Error: {result['error']}")
+ else:
+ print(f"Duration: {result['duration']}")
+ print(f"Sample Rate: {result['sample_rate']} Hz")
+ print(f"Peak Amplitude: {result['peak_amplitude']}")
+ print(f"RMS Level: {result['rms_level']}")
+
+ if result["issues"]:
+ print("\nIssues Found:")
+ for issue in result["issues"]:
+ print(f"- {issue}")
+ else:
+ print("\nNo issues found")
+
+
+def main():
+ """Generate all plots needed for the README."""
+ # Ensure assets directory exists
+ prefix = "gpu"
+ assets_dir = ensure_assets_dir()
+
+ print("\n=== Generating Format Comparison Plot ===")
+ test_formats()
+ copy_plot(
+ str(script_dir / "test_formats/output/test_formats/format_comparison.png"),
+ "format_comparison.png",
+ assets_dir,
+ )
+ # Validate WAV output from format test
+ validate_and_print(
+ str(script_dir / "test_formats/output/test_formats/speech.wav"),
+ "Format Test WAV",
+ )
+
+ print("\n=== Generating Voice Analysis Plot ===")
+ test_voice_analysis()
+ copy_plot(
+ str(script_dir / "test_combinations/output/analysis_comparison.png"),
+ "voice_analysis.png",
+ assets_dir,
+ )
+ # Validate combined voice output
+ validate_and_print(
+ str(
+ script_dir
+ / "test_combinations/output/analysis_combined_af_bella_af_nicole.wav"
+ ),
+ "Combined Voice",
+ )
+
+ print("\n=== Generating Performance Benchmark Plots ===")
+ benchmark_rtf()
+ copy_plot(
+ str(script_dir / f"benchmarks/output_plots/{prefix}_processing_time_rtf.png"),
+ f"{prefix}_processing_time.png",
+ assets_dir,
+ )
+ copy_plot(
+ str(script_dir / f"benchmarks/output_plots/{prefix}_realtime_factor_rtf.png"),
+ f"{prefix}_realtime_factor.png",
+ assets_dir,
+ )
+ # Validate RTF benchmark output (~500 tokens)
+ validate_and_print(
+ str(script_dir / "benchmarks/output_audio/chunk_450_tokens.wav"),
+ "RTF Benchmark",
+ )
+
+ print("\n=== Generating Streaming Benchmark Plots ===")
+ benchmark_stream()
+
+ # Copy direct streaming plots
+ copy_plot(
+ str(script_dir / "benchmarks/output_plots/first_token_latency_stream.png"),
+ f"{prefix}_first_token_latency_direct.png",
+ assets_dir,
+ )
+ copy_plot(
+ str(script_dir / "benchmarks/output_plots/first_token_timeline_stream.png"),
+ f"{prefix}_first_token_timeline_direct.png",
+ assets_dir,
+ )
+ copy_plot(
+ str(script_dir / "benchmarks/output_plots/total_time_latency_stream.png"),
+ f"{prefix}_total_time_latency_direct.png",
+ assets_dir,
+ )
+
+ # Copy OpenAI streaming plots
+ copy_plot(
+ str(
+ script_dir / "benchmarks/output_plots/first_token_latency_stream_openai.png"
+ ),
+ f"{prefix}_first_token_latency_openai.png",
+ assets_dir,
+ )
+ copy_plot(
+ str(
+ script_dir
+ / "benchmarks/output_plots/first_token_timeline_stream_openai.png"
+ ),
+ f"{prefix}_first_token_timeline_openai.png",
+ assets_dir,
+ )
+ copy_plot(
+ str(
+ script_dir / "benchmarks/output_plots/total_time_latency_stream_openai.png"
+ ),
+ f"{prefix}_total_time_latency_openai.png",
+ assets_dir,
+ )
+
+ # Wait a moment for files to be generated
+ import time
+
+ time.sleep(2)
+
+ # Validate streaming outputs (~500 tokens)
+ validate_and_print(
+ str(
+ script_dir
+ / "benchmarks/output_audio_stream/benchmark_tokens500_run1_stream.wav"
+ ),
+ "Direct Streaming",
+ )
+ validate_and_print(
+ str(
+ script_dir
+ / "benchmarks/output_audio_stream_openai/benchmark_tokens500_run1_stream_openai.wav"
+ ),
+ "OpenAI Streaming",
+ )
+
+ validate_and_print(
+ str(script_dir / "test_formats/output/test_formats/test_audio.wav"),
+ "Format Test WAV",
+ )
+
+ print("\nAll plots have been generated and copied to the assets directory")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/assorted_checks/test_combinations/test_analyze_combined_voices.py b/examples/assorted_checks/test_combinations/test_analyze_combined_voices.py
index ec280e2..134f554 100644
--- a/examples/assorted_checks/test_combinations/test_analyze_combined_voices.py
+++ b/examples/assorted_checks/test_combinations/test_analyze_combined_voices.py
@@ -73,6 +73,7 @@ def generate_speech(
"voice": voice,
"speed": 1.0,
"response_format": "wav", # Use WAV for analysis
+ "stream": False,
},
)
@@ -193,9 +194,10 @@ def plot_analysis(audio_files: Dict[str, str], output_dir: str):
fig.patch.set_facecolor("#1a1a2e")
num_files = len(audio_files)
- # Create subplot grid with proper spacing
+ # Create subplot grid with proper spacing for waveforms and metrics
+ total_rows = num_files + 2 # Add one more row for metrics
gs = plt.GridSpec(
- num_files + 1, 2, height_ratios=[1.5] * num_files + [1], hspace=0.4, wspace=0.3
+ total_rows, 2, height_ratios=[1.5] * num_files + [1, 1], hspace=0.4, wspace=0.3
)
# Analyze all files first
@@ -216,48 +218,74 @@ def plot_analysis(audio_files: Dict[str, str], output_dir: str):
# Colors for voices
colors = ["#ff2a6d", "#05d9e8", "#d1f7ff"]
- # Create two subplots for metrics with similar scales
- # Left subplot: Brightness and Volume
- ax1 = plt.subplot(gs[num_files, 0])
- metrics1 = [
+ # Create metrics for each subplot
+ metrics = [
(
- "Brightness",
- [chars["spectral_centroid"] / 1000 for chars in all_chars.values()],
- "kHz",
- ),
- ("Volume", [chars["rms"] * 100 for chars in all_chars.values()], "RMS×100"),
- ]
-
- # Right subplot: Voice Pitch and Texture
- ax2 = plt.subplot(gs[num_files, 1])
- metrics2 = [
- (
- "Voice Pitch",
- [min(chars["dominant_frequencies"]) for chars in all_chars.values()],
- "Hz",
+ plt.subplot(gs[num_files, 0]),
+ [
+ (
+ "Volume",
+ [chars["rms"] * 100 for chars in all_chars.values()],
+ "RMS×100",
+ )
+ ],
),
(
- "Texture",
- [chars["zero_crossing_rate"] * 1000 for chars in all_chars.values()],
- "ZCR×1000",
+ plt.subplot(gs[num_files, 1]),
+ [
+ (
+ "Brightness",
+ [chars["spectral_centroid"] / 1000 for chars in all_chars.values()],
+ "kHz",
+ )
+ ],
+ ),
+ (
+ plt.subplot(gs[num_files + 1, 0]),
+ [
+ (
+ "Voice Pitch",
+ [
+ min(chars["dominant_frequencies"])
+ for chars in all_chars.values()
+ ],
+ "Hz",
+ )
+ ],
+ ),
+ (
+ plt.subplot(gs[num_files + 1, 1]),
+ [
+ (
+ "Texture",
+ [
+ chars["zero_crossing_rate"] * 1000
+ for chars in all_chars.values()
+ ],
+ "ZCR×1000",
+ )
+ ],
),
]
- def plot_grouped_bars(ax, metrics, show_legend=True):
- n_groups = len(metrics)
+ # Plot each metric
+ for i, (ax, metric_data) in enumerate(metrics):
n_voices = len(audio_files)
bar_width = 0.25
+ indices = np.array([0])
- indices = np.arange(n_groups)
+ values = metric_data[0][1]
+ max_val = max(values)
- # Get max value for y-axis scaling
- max_val = max(max(m[1]) for m in metrics)
-
- for i, (voice, color) in enumerate(zip(audio_files.keys(), colors)):
- values = [m[1][i] for m in metrics]
- offset = (i - n_voices / 2 + 0.5) * bar_width
+ for j, (voice, color) in enumerate(zip(audio_files.keys(), colors)):
+ offset = (j - n_voices / 2 + 0.5) * bar_width
bars = ax.bar(
- indices + offset, values, bar_width, label=voice, color=color, alpha=0.8
+ indices + offset,
+ [values[j]],
+ bar_width,
+ label=voice,
+ color=color,
+ alpha=0.8,
)
# Add value labels on top of bars
@@ -274,12 +302,12 @@ def plot_analysis(audio_files: Dict[str, str], output_dir: str):
)
ax.set_xticks(indices)
- ax.set_xticklabels([f"{m[0]}\n({m[2]})" for m in metrics])
-
- # Set y-axis limits with some padding
+ ax.set_xticklabels([f"{metric_data[0][0]}\n({metric_data[0][2]})"])
ax.set_ylim(0, max_val * 1.2)
+ ax.set_ylabel("Value")
- if show_legend:
+ # Only show legend on first metric plot
+ if i == 0:
ax.legend(
bbox_to_anchor=(1.05, 1),
loc="upper left",
@@ -287,22 +315,11 @@ def plot_analysis(audio_files: Dict[str, str], output_dir: str):
edgecolor="#ffffff",
)
- # Plot both subplots
- plot_grouped_bars(ax1, metrics1, show_legend=True)
- plot_grouped_bars(ax2, metrics2, show_legend=False)
+ # Style the subplot
+ setup_plot(fig, ax, metric_data[0][0])
- # Style both subplots
- setup_plot(fig, ax1, "Brightness and Volume")
- setup_plot(fig, ax2, "Voice Pitch and Texture")
-
- # Add y-axis labels
- ax1.set_ylabel("Value")
- ax2.set_ylabel("Value")
-
- # Adjust the figure size to accommodate the legend
- fig.set_size_inches(15, 15)
-
- # Add padding around the entire figure
+ # Adjust the figure size and padding
+ fig.set_size_inches(15, 20)
plt.subplots_adjust(right=0.85, top=0.95, bottom=0.05, left=0.1)
plt.savefig(os.path.join(output_dir, "analysis_comparison.png"), dpi=300)
print(f"Saved analysis comparison to {output_dir}/analysis_comparison.png")
@@ -332,7 +349,7 @@ def main():
)
parser.add_argument("--url", default="http://localhost:8880", help="API base URL")
parser.add_argument(
- "--output-dir",
+ "--output-dir",
default="examples/assorted_checks/test_combinations/output",
help="Output directory for audio files",
)
diff --git a/examples/assorted_checks/test_formats/test_audio_formats.py b/examples/assorted_checks/test_formats/test_audio_formats.py
index e126dec..68156b6 100644
--- a/examples/assorted_checks/test_formats/test_audio_formats.py
+++ b/examples/assorted_checks/test_formats/test_audio_formats.py
@@ -66,26 +66,27 @@ def plot_format_comparison(stats: list, output_dir: str):
for i, stat in enumerate(stats):
format_name = stat["format"].upper()
try:
- # Handle PCM format differently
- if stat["format"] == "pcm":
- # Read raw PCM data (16-bit mono)
- with open(
- os.path.join(output_dir, f"test_audio.{stat['format']}"), "rb"
- ) as f:
- raw_data = f.read()
- data = np.frombuffer(raw_data, dtype=np.int16)
- data = data.astype(np.float32) / 32768.0 # Convert to float [-1, 1]
- sr = 24000
- else:
- # Read other formats with soundfile
- data, sr = sf.read(
- os.path.join(output_dir, f"test_audio.{stat['format']}")
- )
+ file_path = os.path.join(output_dir, f"test_audio.{stat['format']}")
- # Plot waveform
+ if stat["format"] == "wav":
+ # Use scipy.io.wavfile for WAV files
+ sr, data = wavfile.read(file_path)
+ data = data.astype(np.float32) / 32768.0 # Convert to float [-1, 1]
+ elif stat["format"] == "pcm":
+ # Read raw 16-bit signed little-endian PCM data at 24kHz
+ data = np.frombuffer(
+ open(file_path, "rb").read(), dtype=" dict:
"""Get audio file statistics"""
file_size = os.path.getsize(file_path)
file_size_kb = file_size / 1024 # Convert to KB
+ format_name = Path(file_path).suffix[1:]
- try:
- # Try reading with soundfile first
+ if format_name == "wav":
+ # Use scipy.io.wavfile for WAV files
+ sample_rate, data = wavfile.read(file_path)
+ data = data.astype(np.float32) / 32768.0 # Convert to float [-1, 1]
+ duration = len(data) / sample_rate
+ channels = 1 if len(data.shape) == 1 else data.shape[1]
+ elif format_name == "pcm":
+ # For PCM, read raw 16-bit signed little-endian PCM data at 24kHz
+ data = np.frombuffer(
+ open(file_path, "rb").read(), dtype=" List[str]:
+ """Create a variety of test cases with different characteristics"""
+
+ # Helper to create random text with specific patterns
+ def random_text(length: int) -> str:
+ return "".join(
+ random.choice(string.ascii_letters + string.digits + " .,!?")
+ for _ in range(length)
+ )
+
+ test_cases = []
+
+ # Base test cases that hit specific patterns
+ base_cases = [
+ "Dr. Smith and Mr. Jones discussed the $1,234.56 million investment.",
+ "Yeah, they met at 10:30 and reviewed A.B.C. documentation with Mrs. Brown etc.",
+ 'The temperature was 72.5 degrees (quite normal) for "this time" of year.',
+ "X's and Y's properties cost £50 million in the 1990s",
+ "こんにちは。今日は!",
+ ]
+
+ # Add base cases
+ test_cases.extend(base_cases)
+
+ # Add variations with random content
+ for length in [100, 1000, 10000]:
+ # Create 3 variations of each length
+ for _ in range(3):
+ text = random_text(length)
+ # Insert some patterns we're looking for
+ text = text.replace(text[10:20], "Dr. Smith")
+ text = text.replace(text[30:40], "$1,234.56")
+ text = text.replace(text[50:60], "A.B.C. xyz")
+ test_cases.append(text)
+
+ return test_cases
+
+
+class TextNormalizerInline:
+ """Text normalizer using inline patterns"""
+
+ def normalize(self, text: str) -> str:
+ # Replace quotes and brackets
+ text = text.replace(chr(8216), "'").replace(chr(8217), "'")
+ text = text.replace("«", chr(8220)).replace("»", chr(8221))
+ text = text.replace(chr(8220), '"').replace(chr(8221), '"')
+ text = text.replace("(", "«").replace(")", "»")
+
+ # Handle CJK punctuation
+ for a, b in zip("、。!,:;?", ",.!,:;?"):
+ text = text.replace(a, b + " ")
+
+ text = re.sub(r"[^\S \n]", " ", text)
+ text = re.sub(r" +", " ", text)
+ text = re.sub(r"(?<=\n) +(?=\n)", "", text)
+ text = re.sub(r"\bD[Rr]\.(?= [A-Z])", "Doctor", text)
+ text = re.sub(r"\b(?:Mr\.|MR\.(?= [A-Z]))", "Mister", text)
+ text = re.sub(r"\b(?:Ms\.|MS\.(?= [A-Z]))", "Miss", text)
+ text = re.sub(r"\b(?:Mrs\.|MRS\.(?= [A-Z]))", "Mrs", text)
+ text = re.sub(r"\betc\.(?! [A-Z])", "etc", text)
+ text = re.sub(r"(?i)\b(y)eah?\b", r"\1e'a", text)
+ text = re.sub(
+ r"\d*\.\d+|\b\d{4}s?\b|(? str:
+ # Replace quotes and brackets
+ text = text.replace(chr(8216), "'").replace(chr(8217), "'")
+ text = text.replace("«", chr(8220)).replace("»", chr(8221))
+ text = text.replace(chr(8220), '"').replace(chr(8221), '"')
+ text = text.replace("(", "«").replace(")", "»")
+
+ # Handle CJK punctuation
+ for a, b in zip("、。!,:;?", ",.!,:;?"):
+ text = text.replace(a, b + " ")
+
+ # Use compiled patterns
+ text = self.patterns["whitespace"].sub(" ", text)
+ text = self.patterns["multi_space"].sub(" ", text)
+ text = self.patterns["newline_space"].sub("", text)
+ text = self.patterns["doctor"].sub("Doctor", text)
+ text = self.patterns["mister"].sub("Mister", text)
+ text = self.patterns["miss"].sub("Miss", text)
+ text = self.patterns["mrs"].sub("Mrs", text)
+ text = self.patterns["etc"].sub("etc", text)
+ text = self.patterns["yeah"].sub(r"\1e'a", text)
+ text = self.patterns["numbers"].sub(split_num, text)
+ text = self.patterns["comma_in_number"].sub("", text)
+ text = self.patterns["money"].sub(handle_money, text)
+ text = self.patterns["decimal"].sub(handle_decimal, text)
+ text = self.patterns["range"].sub(" to ", text)
+ text = self.patterns["s_after_number"].sub(" S", text)
+ text = self.patterns["possessive_s"].sub("'S", text)
+ text = self.patterns["x_possessive"].sub("s", text)
+ text = self.patterns["initials"].sub(
+ lambda m: m.group().replace(".", "-"), text
+ )
+ text = self.patterns["single_initial"].sub("-", text)
+
+ return text.strip()
+
+
+class TextNormalizerHybrid:
+ """Text normalizer using hybrid approach - compile only complex/frequent patterns"""
+
+ def __init__(self):
+ # Only compile patterns that are complex or frequently used
+ self.patterns = {
+ "whitespace": re.compile(r"[^\S \n]"),
+ "numbers": re.compile(
+ r"\d*\.\d+|\b\d{4}s?\b|(? str:
+ # Replace quotes and brackets
+ text = text.replace(chr(8216), "'").replace(chr(8217), "'")
+ text = text.replace("«", chr(8220)).replace("»", chr(8221))
+ text = text.replace(chr(8220), '"').replace(chr(8221), '"')
+ text = text.replace("(", "«").replace(")", "»")
+
+ # Handle CJK punctuation
+ for a, b in zip("、。!,:;?", ",.!,:;?"):
+ text = text.replace(a, b + " ")
+
+ # Use compiled patterns for complex operations
+ text = self.patterns["whitespace"].sub(" ", text)
+ text = self.patterns["numbers"].sub(split_num, text)
+ text = self.patterns["money"].sub(handle_money, text)
+ text = self.patterns["initials"].sub(
+ lambda m: m.group().replace(".", "-"), text
+ )
+
+ # Use inline patterns for simpler operations
+ text = re.sub(r" +", " ", text)
+ text = re.sub(r"(?<=\n) +(?=\n)", "", text)
+ text = re.sub(r"\bD[Rr]\.(?= [A-Z])", "Doctor", text)
+ text = re.sub(r"\b(?:Mr\.|MR\.(?= [A-Z]))", "Mister", text)
+ text = re.sub(r"\b(?:Ms\.|MS\.(?= [A-Z]))", "Miss", text)
+ text = re.sub(r"\b(?:Mrs\.|MRS\.(?= [A-Z]))", "Mrs", text)
+ text = re.sub(r"\betc\.(?! [A-Z])", "etc", text)
+ text = re.sub(r"(?i)\b(y)eah?\b", r"\1e'a", text)
+ text = re.sub(r"(?<=\d),(?=\d)", "", text)
+ text = re.sub(r"\d*\.\d+", handle_decimal, text)
+ text = re.sub(r"(?<=\d)-(?=\d)", " to ", text)
+ text = re.sub(r"(?<=\d)S", " S", text)
+ text = re.sub(r"(?<=[BCDFGHJ-NP-TV-Z])'?s\b", "'S", text)
+ text = re.sub(r"(?<=X')S\b", "s", text)
+ text = re.sub(r"(?i)(?<=[A-Z])\.(?=[A-Z])", "-", text)
+
+ return text.strip()
+
+
+def split_num(match: re.Match) -> str:
+ """Split numbers for TTS processing"""
+ num = match.group(0)
+ if ":" in num:
+ h, m = num.split(":")
+ return f"{h} {m}"
+ if num.endswith("s"):
+ return f"{num[:-1]} s"
+ return num
+
+
+def handle_money(match: re.Match) -> str:
+ """Format money strings for TTS"""
+ text = match.group(0)
+ return text.replace("$", " dollars ").replace("£", " pounds ")
+
+
+def handle_decimal(match: re.Match) -> str:
+ """Format decimal numbers for TTS"""
+ num = match.group(0)
+ return num.replace(".", " point ")
+
+
+def benchmark_normalizers(
+ test_cases: List[str], iterations: int = 100
+) -> Tuple[float, float, float]:
+ """Benchmark all three implementations"""
+
+ normalizers = {
+ "inline": TextNormalizerInline(),
+ "compiled": TextNormalizerCompiled(),
+ "hybrid": TextNormalizerHybrid(),
+ }
+
+ results = {}
+
+ # Test each normalizer
+ for name, normalizer in normalizers.items():
+ start = time.perf_counter()
+
+ # Run normalizations
+ for _ in range(iterations):
+ for test in test_cases:
+ normalizer.normalize(test)
+
+ results[name] = time.perf_counter() - start
+
+ return results
+
+
+def verify_outputs(test_cases: List[str]) -> bool:
+ """Verify that all implementations produce identical output"""
+ normalizers = {
+ "inline": TextNormalizerInline(),
+ "compiled": TextNormalizerCompiled(),
+ "hybrid": TextNormalizerHybrid(),
+ }
+
+ for test in test_cases:
+ results = [norm.normalize(test) for norm in normalizers.values()]
+ if not all(r == results[0] for r in results):
+ return False
+ return True
+
+
+def main():
+ # Create test cases
+ print("Generating test cases...")
+ test_cases = create_test_cases()
+ total_chars = sum(len(t) for t in test_cases)
+ print(
+ f"Created {len(test_cases)} test cases, total size: {total_chars:,} characters"
+ )
+
+ # Verify output consistency
+ print("\nVerifying output consistency...")
+ if verify_outputs(test_cases):
+ print("✓ All implementations produce identical output")
+ else:
+ print("✗ Warning: Implementations produce different outputs!")
+ return
+
+ # Run benchmarks
+ print("\nRunning benchmarks...")
+ iterations = 100
+ results = benchmark_normalizers(test_cases, iterations)
+
+ # Print results
+ print(f"\nResults for {iterations} iterations: ")
+ for name, time_taken in results.items():
+ print(f"{name.capitalize()}: {time_taken:.3f}s")
+
+
+main()
diff --git a/examples/assorted_checks/validate_wav.py b/examples/assorted_checks/validate_wav.py
index d925e56..844655a 100644
--- a/examples/assorted_checks/validate_wav.py
+++ b/examples/assorted_checks/validate_wav.py
@@ -1,218 +1,262 @@
+import argparse
+from typing import Any, Dict
+from pathlib import Path
+
import numpy as np
import soundfile as sf
-import argparse
-from pathlib import Path
+from tqdm import tqdm
+
def validate_tts(wav_path: str) -> dict:
"""
- Quick validation checks for TTS-generated audio files to detect common artifacts.
-
- Checks for:
- - Unnatural silence gaps
- - Audio glitches and artifacts
- - Repeated speech segments (stuck/looping)
- - Abrupt changes in speech
- - Audio quality issues
-
- Args:
- wav_path: Path to audio file (wav, mp3, etc)
- Returns:
- Dictionary with validation results
+ Validation checks for TTS-generated audio files to detect common artifacts.
"""
try:
- # Load audio
+ # Load and process audio
audio, sr = sf.read(wav_path)
if len(audio.shape) > 1:
- audio = audio.mean(axis=1) # Convert to mono
-
- # Basic audio stats
+ audio = np.mean(audio, axis=1)
+
duration = len(audio) / sr
- rms = np.sqrt(np.mean(audio**2))
- peak = np.max(np.abs(audio))
- dc_offset = np.mean(audio)
-
- # Calculate clipping stats if we're near peak
- clip_count = np.sum(np.abs(audio) >= 0.99)
- clip_percent = (clip_count / len(audio)) * 100
- if clip_percent > 0:
- clip_stats = f" ({clip_percent:.2e} ratio near peak)"
- else:
- clip_stats = " (no samples near peak)"
-
- # Convert to dB for analysis
- eps = np.finfo(float).eps
- db = 20 * np.log10(np.abs(audio) + eps)
-
issues = []
-
- # Check if audio is too short (likely failed generation)
- if duration < 0.1: # Less than 100ms
- issues.append("WARNING: Audio is suspiciously short - possible failed generation")
-
- # 1. Check for basic audio quality
- if peak >= 1.0:
- # Calculate percentage of samples that are clipping
- clip_count = np.sum(np.abs(audio) >= 0.99)
- clip_percent = (clip_count / len(audio)) * 100
-
- if clip_percent > 1.0: # Only warn if more than 1% of samples clip
- issues.append(f"WARNING: Significant clipping detected ({clip_percent:.2e}% of samples)")
- elif clip_percent > 0.01: # Add info if more than 0.01% but less than 1%
- issues.append(f"INFO: Minor peak limiting detected ({clip_percent:.2e}% of samples) - likely intentional normalization")
-
- if rms < 0.01:
+
+ # Basic quality checks
+ abs_audio = np.abs(audio)
+ stats = {
+ "rms": float(np.sqrt(np.mean(audio**2))),
+ "peak": float(np.max(abs_audio)),
+ "dc_offset": float(np.mean(audio)),
+ }
+
+ clip_count = np.sum(abs_audio >= 0.99)
+ clip_percent = (clip_count / len(audio)) * 100
+
+ if duration < 0.1:
+ issues.append(
+ "WARNING: Audio is suspiciously short - possible failed generation"
+ )
+
+ if stats["peak"] >= 1.0:
+ if clip_percent > 1.0:
+ issues.append(
+ f"WARNING: Significant clipping detected ({clip_percent:.2e}% of samples)"
+ )
+ elif clip_percent > 0.01:
+ issues.append(
+ f"INFO: Minor peak limiting detected ({clip_percent:.2e}% of samples)"
+ )
+
+ if stats["rms"] < 0.01:
issues.append("WARNING: Audio is very quiet - possible failed generation")
- if abs(dc_offset) > 0.1: # DC offset is particularly bad for speech
- issues.append(f"WARNING: High DC offset ({dc_offset:.3f}) - may cause audio artifacts")
-
- # 2. Check for long silence gaps (potential TTS failures)
+
+ if abs(stats["dc_offset"]) > 0.1:
+ issues.append(f"WARNING: High DC offset ({stats['dc_offset']:.3f})")
+
+ # Check for long silence gaps
+ eps = np.finfo(float).eps
+ db = 20 * np.log10(abs_audio + eps)
silence_threshold = -45 # dB
- min_silence = 2.0 # Only detect silences longer than 2 seconds
+ min_silence = 2.0 # seconds
window_size = int(min_silence * sr)
silence_count = 0
last_silence = -1
-
- # Skip the first 0.2s for silence detection (avoid false positives at start)
- start_idx = int(0.2 * sr)
- for i in range(start_idx, len(db) - window_size, window_size):
- window = db[i:i+window_size]
+
+ start_idx = int(0.2 * sr) # Skip first 0.2s
+ for i in tqdm(
+ range(start_idx, len(db) - window_size, window_size),
+ desc="Checking for silence",
+ ):
+ window = db[i : i + window_size]
if np.mean(window) < silence_threshold:
- # Verify the entire window is mostly silence
silent_ratio = np.mean(window < silence_threshold)
- if silent_ratio > 0.9: # 90% of the window should be below threshold
- if last_silence == -1 or (i/sr - last_silence) > 2.0: # Only count silences more than 2s apart
+ if silent_ratio > 0.9:
+ if last_silence == -1 or (i / sr - last_silence) > 2.0:
silence_count += 1
- last_silence = i/sr
- issues.append(f"WARNING: Long silence detected at {i/sr:.2f}s (duration: {min_silence:.1f}s)")
-
- if silence_count > 2: # Only warn if there are multiple long silences
- issues.append(f"WARNING: Multiple long silences found ({silence_count} total) - possible generation issue")
-
- # 3. Check for extreme audio artifacts (changes too rapid for natural speech)
- # Use a longer window to avoid flagging normal phoneme transitions
- window_size = int(0.02 * sr) # 20ms window
- db_smooth = np.convolve(db, np.ones(window_size)/window_size, 'same')
- db_diff = np.abs(np.diff(db_smooth))
-
- # Much higher threshold to only catch truly unnatural changes
- artifact_threshold = 40 # dB
- min_duration = int(0.01 * sr) # Minimum 10ms duration
-
- # Find regions where the smoothed dB change is extreme
- artifact_points = np.where(db_diff > artifact_threshold)[0]
-
- if len(artifact_points) > 0:
- # Group artifacts that are very close together
- grouped_artifacts = []
- current_group = [artifact_points[0]]
-
- for i in range(1, len(artifact_points)):
- if (artifact_points[i] - current_group[-1]) < min_duration:
- current_group.append(artifact_points[i])
- else:
- if len(current_group) * (1/sr) >= 0.01: # Only keep groups lasting >= 10ms
- grouped_artifacts.append(current_group)
- current_group = [artifact_points[i]]
-
- if len(current_group) * (1/sr) >= 0.01:
- grouped_artifacts.append(current_group)
-
- # Report only the most severe artifacts
- for group in grouped_artifacts[:2]: # Report up to 2 worst artifacts
- center_idx = group[len(group)//2]
- db_change = db_diff[center_idx]
- if db_change > 45: # Only report very extreme changes
- issues.append(
- f"WARNING: Possible audio artifact at {center_idx/sr:.2f}s "
- f"({db_change:.1f}dB change over {len(group)/sr*1000:.0f}ms)"
- )
-
- # 4. Check for repeated speech segments (stuck/looping)
- # Check both short and long sentence durations at audiobook speed (150-160 wpm)
- for chunk_duration in [5.0, 10.0]: # 5s (~12 words) and 10s (~25 words) at ~audiobook speed
+ last_silence = i / sr
+ issues.append(
+ f"WARNING: Long silence detected at {i/sr:.2f}s (duration: {min_silence:.1f}s)"
+ )
+
+ if silence_count > 2:
+ issues.append(
+ f"WARNING: Multiple long silences found ({silence_count} total)"
+ )
+
+ # Detect audio artifacts
+ diff = np.diff(audio)
+ abs_diff = np.abs(diff)
+ window_size = min(int(0.005 * sr), 256)
+ window = np.ones(window_size) / window_size
+ local_avg_diff = np.convolve(abs_diff, window, mode="same")
+
+ spikes = (abs_diff > (10 * local_avg_diff)) & (abs_diff > 0.1)
+ artifact_indices = np.nonzero(spikes)[0]
+
+ artifacts = []
+ if len(artifact_indices) > 0:
+ gaps = np.diff(artifact_indices)
+ min_gap = int(0.005 * sr)
+ break_points = np.nonzero(gaps > min_gap)[0] + 1
+ groups = np.split(artifact_indices, break_points)
+
+ for group in groups:
+ if len(group) >= 5:
+ severity = np.max(abs_diff[group])
+ if severity > 0.2:
+ center_idx = group[len(group) // 2]
+ artifacts.append(
+ {
+ "time": float(
+ center_idx / sr
+ ), # Ensure float for consistent timing
+ "severity": float(severity),
+ }
+ )
+ issues.append(
+ f"WARNING: Audio discontinuity at {center_idx/sr:.3f}s "
+ f"(severity: {severity:.3f})"
+ )
+
+ # Check for repeated speech segments
+ for chunk_duration in tqdm(
+ [0.5, 2.5, 5.0, 10.0], desc="Checking for repeated speech"
+ ):
chunk_size = int(chunk_duration * sr)
- overlap = int(0.2 * chunk_size) # 20% overlap between chunks
-
- for i in range(0, len(audio) - 2*chunk_size, overlap):
- chunk1 = audio[i:i+chunk_size]
- chunk2 = audio[i+chunk_size:i+2*chunk_size]
-
- # Ignore chunks that are mostly silence
+ overlap = int(0.2 * chunk_size)
+
+ for i in range(0, len(audio) - 2 * chunk_size, overlap):
+ chunk1 = audio[i : i + chunk_size]
+ chunk2 = audio[i + chunk_size : i + 2 * chunk_size]
+
if np.mean(np.abs(chunk1)) < 0.01 or np.mean(np.abs(chunk2)) < 0.01:
continue
-
+
try:
- correlation = np.corrcoef(chunk1, chunk2)[0,1]
- if not np.isnan(correlation) and correlation > 0.92: # Lower threshold for sentence-length chunks
+ correlation = np.corrcoef(chunk1, chunk2)[0, 1]
+ if not np.isnan(correlation) and correlation > 0.92:
issues.append(
f"WARNING: Possible repeated speech at {i/sr:.1f}s "
f"(~{int(chunk_duration*160/60):d} words, correlation: {correlation:.3f})"
)
- break # Found repetition at this duration, try next duration
+ break
except:
continue
-
- # 5. Check for extreme amplitude discontinuities (common in failed TTS)
- amplitude_envelope = np.abs(audio)
- window_size = sr // 10 # 100ms window for smoother envelope
- smooth_env = np.convolve(amplitude_envelope, np.ones(window_size)/float(window_size), 'same')
- env_diff = np.abs(np.diff(smooth_env))
-
- # Only detect very extreme amplitude changes
- jump_threshold = 0.5 # Much higher threshold
- jumps = np.where(env_diff > jump_threshold)[0]
-
- if len(jumps) > 0:
- # Group jumps that are close together
- grouped_jumps = []
- current_group = [jumps[0]]
-
- for i in range(1, len(jumps)):
- if (jumps[i] - current_group[-1]) < 0.05 * sr: # Group within 50ms
- current_group.append(jumps[i])
- else:
- if len(current_group) >= 3: # Only keep significant discontinuities
- grouped_jumps.append(current_group)
- current_group = [jumps[i]]
-
- if len(current_group) >= 3:
- grouped_jumps.append(current_group)
-
- # Report only the most severe discontinuities
- for group in grouped_jumps[:2]: # Report up to 2 worst cases
- center_idx = group[len(group)//2]
- jump_size = env_diff[center_idx]
- if jump_size > 0.6: # Only report very extreme changes
- issues.append(
- f"WARNING: Possible audio discontinuity at {center_idx/sr:.2f}s "
- f"({jump_size:.2f} amplitude ratio change)"
- )
-
+
return {
"file": wav_path,
"duration": f"{duration:.2f}s",
"sample_rate": sr,
- "peak_amplitude": f"{peak:.3f}{clip_stats}",
- "rms_level": f"{rms:.3f}",
- "dc_offset": f"{dc_offset:.3f}",
+ "peak_amplitude": f"{stats['peak']:.3f}",
+ "rms_level": f"{stats['rms']:.3f}",
+ "dc_offset": f"{stats['dc_offset']:.3f}",
+ "artifact_count": len(artifacts),
+ "artifact_locations": [a["time"] for a in artifacts],
+ "artifact_severities": [a["severity"] for a in artifacts],
"issues": issues,
- "valid": len(issues) == 0
- }
-
- except Exception as e:
- return {
- "file": wav_path,
- "error": str(e),
- "valid": False
+ "valid": len(issues) == 0,
}
+ except Exception as e:
+ return {"file": wav_path, "error": str(e), "valid": False}
+
+
+def generate_analysis_plots(
+ wav_path: str, output_dir: str, validation_result: Dict[str, Any]
+):
+ """
+ Generate analysis plots for audio file with time-aligned visualizations.
+ """
+ import matplotlib.pyplot as plt
+ from scipy.signal import spectrogram
+
+ # Load audio
+ audio, sr = sf.read(wav_path)
+ if len(audio.shape) > 1:
+ audio = np.mean(audio, axis=1)
+
+ # Create figure with shared x-axis
+ fig = plt.figure(figsize=(15, 8))
+ gs = plt.GridSpec(2, 1, height_ratios=[1.2, 0.8], hspace=0.1)
+ ax1 = fig.add_subplot(gs[0])
+ ax2 = fig.add_subplot(gs[1], sharex=ax1)
+
+ # Calculate spectrogram
+ nperseg = 2048
+ noverlap = 1536
+ f, t, Sxx = spectrogram(
+ audio, sr, nperseg=nperseg, noverlap=noverlap, window="hann", scaling="spectrum"
+ )
+
+ # Plot spectrogram
+ im = ax1.pcolormesh(
+ t,
+ f,
+ 10 * np.log10(Sxx + 1e-10),
+ shading="gouraud",
+ cmap="viridis",
+ vmin=-100,
+ vmax=-20,
+ )
+ ax1.set_ylabel("Frequency [Hz]", fontsize=10)
+ cbar = plt.colorbar(im, ax=ax1, label="dB")
+ ax1.set_title("Spectrogram", pad=10, fontsize=12)
+
+ # Plot waveform with exact time alignment
+ times = np.arange(len(audio)) / sr
+ ax2.plot(times, audio, color="#2E5596", alpha=0.7, linewidth=0.5, label="Audio")
+ ax2.set_ylabel("Amplitude", fontsize=10)
+ ax2.set_xlabel("Time [sec]", fontsize=10)
+ ax2.grid(True, alpha=0.2)
+
+ # Add artifact markers
+ if (
+ "artifact_locations" in validation_result
+ and validation_result["artifact_locations"]
+ ):
+ for loc in validation_result["artifact_locations"]:
+ ax1.axvline(x=loc, color="red", alpha=0.7, linewidth=2)
+ ax2.axvline(
+ x=loc, color="red", alpha=0.7, linewidth=2, label="Detected Artifacts"
+ )
+
+ # Add legend to both plots
+ if len(validation_result["artifact_locations"]) > 0:
+ ax1.plot([], [], color="red", linewidth=2, label="Detected Artifacts")
+ ax1.legend(loc="upper right", fontsize=8)
+ # Only add unique labels to legend
+ handles, labels = ax2.get_legend_handles_labels()
+ unique_labels = dict(zip(labels, handles))
+ ax2.legend(
+ unique_labels.values(),
+ unique_labels.keys(),
+ loc="upper right",
+ fontsize=8,
+ )
+
+ # Set common x limits
+ xlim = (0, len(audio) / sr)
+ ax1.set_xlim(xlim)
+ ax2.set_xlim(xlim)
+ og_filename = Path(wav_path).name.split(".")[0]
+ # Save plot
+ plt.savefig(
+ Path(output_dir) / f"{og_filename}_audio_analysis.png",
+ dpi=300,
+ bbox_inches="tight",
+ )
+ plt.close()
+
+
if __name__ == "__main__":
- parser = argparse.ArgumentParser(description="TTS Output Validator")
- parser.add_argument("wav_file", help="Path to audio file to validate")
- args = parser.parse_args()
-
- result = validate_tts(args.wav_file)
-
+ wav_file = r"C:\Users\jerem\Desktop\Kokoro-FastAPI\examples\assorted_checks\benchmarks\output_audio\chunk_600_tokens.wav"
+ silent = False
+
+ print(f"\n\n Processing:\n\t{wav_file}")
+ result = validate_tts(wav_file)
+ if not silent:
+ wav_root_dir = Path(wav_file).parent
+ generate_analysis_plots(wav_file, wav_root_dir, result)
+
print(f"\nValidating: {result['file']}")
if "error" in result:
print(f"Error: {result['error']}")
@@ -222,7 +266,8 @@ if __name__ == "__main__":
print(f"Peak Amplitude: {result['peak_amplitude']}")
print(f"RMS Level: {result['rms_level']}")
print(f"DC Offset: {result['dc_offset']}")
-
+ print(f"Detected Artifacts: {result['artifact_count']}")
+
if result["issues"]:
print("\nIssues Found:")
for issue in result["issues"]:
diff --git a/examples/assorted_checks/validate_wavs.py b/examples/assorted_checks/validate_wavs.py
index a37c043..ebf114f 100644
--- a/examples/assorted_checks/validate_wavs.py
+++ b/examples/assorted_checks/validate_wavs.py
@@ -1,7 +1,9 @@
import argparse
from pathlib import Path
+
from validate_wav import validate_tts
+
def print_validation_result(result: dict, rel_path: Path):
"""Print full validation details for a single file."""
print(f"\nValidating: {rel_path}")
@@ -13,7 +15,7 @@ def print_validation_result(result: dict, rel_path: Path):
print(f"Peak Amplitude: {result['peak_amplitude']}")
print(f"RMS Level: {result['rms_level']}")
print(f"DC Offset: {result['dc_offset']}")
-
+
if result["issues"]:
print("\nIssues Found:")
for issue in result["issues"]:
@@ -21,25 +23,26 @@ def print_validation_result(result: dict, rel_path: Path):
else:
print("\nNo issues found")
+
def validate_directory(directory: str):
"""Validate all wav files in a directory with detailed output and summary."""
dir_path = Path(directory)
-
+
# Find all wav files (including nested directories)
wav_files = list(dir_path.rglob("*.wav"))
wav_files.extend(dir_path.rglob("*.mp3")) # Also check mp3s
wav_files = sorted(wav_files)
-
+
if not wav_files:
print(f"No .wav or .mp3 files found in {directory}")
return
-
+
print(f"Found {len(wav_files)} files in {directory}")
print("=" * 80)
-
+
# Store results for summary
results = []
-
+
# Detailed validation output
for wav_file in wav_files:
result = validate_tts(str(wav_file))
@@ -47,7 +50,7 @@ def validate_directory(directory: str):
print_validation_result(result, rel_path)
results.append((rel_path, result))
print("=" * 80)
-
+
# Summary with detailed issues
print("\nSUMMARY:")
for rel_path, result in results:
@@ -58,15 +61,18 @@ def validate_directory(directory: str):
issues = result["issues"]
first_issue = issues[0].replace("WARNING: ", "")
if len(issues) > 1:
- print(f"{rel_path}: FAIL - {first_issue} (+{len(issues)-1} more issues)")
+ print(
+ f"{rel_path}: FAIL - {first_issue} (+{len(issues)-1} more issues)"
+ )
else:
print(f"{rel_path}: FAIL - {first_issue}")
else:
print(f"{rel_path}: PASS")
+
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Batch validate TTS wav files")
parser.add_argument("directory", help="Directory containing wav files to validate")
args = parser.parse_args()
-
+
validate_directory(args.directory)
diff --git a/examples/audio_analysis.png b/examples/audio_analysis.png
new file mode 100644
index 0000000..7c3034f
Binary files /dev/null and b/examples/audio_analysis.png differ
diff --git a/examples/openai_streaming_audio.py b/examples/openai_streaming_audio.py
new file mode 100644
index 0000000..ec5c210
--- /dev/null
+++ b/examples/openai_streaming_audio.py
@@ -0,0 +1,49 @@
+
+#!/usr/bin/env rye run python
+
+import time
+from pathlib import Path
+
+from openai import OpenAI
+
+# gets OPENAI_API_KEY from your environment variables
+openai = OpenAI(base_url="http://localhost:8880/v1", api_key="not-needed-for-local")
+
+speech_file_path = Path(__file__).parent / "speech.mp3"
+
+
+def main() -> None:
+ stream_to_speakers()
+
+ # Create text-to-speech audio file
+ with openai.audio.speech.with_streaming_response.create(
+ model="kokoro",
+ voice="af",
+ input="the quick brown fox jumped over the lazy dogs",
+ ) as response:
+ response.stream_to_file(speech_file_path)
+
+
+
+def stream_to_speakers() -> None:
+ import pyaudio
+
+ player_stream = pyaudio.PyAudio().open(format=pyaudio.paInt16, channels=1, rate=24000, output=True)
+
+ start_time = time.time()
+
+ with openai.audio.speech.with_streaming_response.create(
+ model="kokoro",
+ voice="af",
+ response_format="pcm", # similar to WAV, but without a header chunk at the start.
+ input="""My dear sir, that is just where you are wrong. That is just where the whole world has gone wrong. We are always getting away from the present moment. Our mental existences, which are immaterial and have no dimensions, are passing along the Time-Dimension with a uniform velocity from the cradle to the grave. Just as we should travel down if we began our existence fifty miles above the earth’s surface""",
+ ) as response:
+ print(f"Time to first byte: {int((time.time() - start_time) * 1000)}ms")
+ for chunk in response.iter_bytes(chunk_size=1024):
+ player_stream.write(chunk)
+
+ print(f"Done in {int((time.time() - start_time) * 1000)}ms.")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/stream_tts_playback.py b/examples/stream_tts_playback.py
new file mode 100644
index 0000000..70999a8
--- /dev/null
+++ b/examples/stream_tts_playback.py
@@ -0,0 +1,125 @@
+#!/usr/bin/env python3
+import requests
+import numpy as np
+import sounddevice as sd
+import time
+import os
+import wave
+
+def play_streaming_tts(text: str, output_file: str = None, voice: str = "af"):
+ """Stream TTS audio and play it back in real-time"""
+
+ print("\nStarting TTS stream request...")
+ start_time = time.time()
+
+ # Initialize variables
+ sample_rate = 24000 # Known sample rate for Kokoro
+ audio_started = False
+ chunk_count = 0
+ total_bytes = 0
+ first_chunk_time = None
+ all_audio_data = bytearray() # Raw PCM audio data
+
+ # Start sounddevice stream with buffer
+ stream = sd.OutputStream(
+ samplerate=sample_rate,
+ channels=1,
+ dtype=np.int16,
+ blocksize=1024, # Buffer size in samples
+ latency='low' # Request low latency
+ )
+ stream.start()
+
+ # Make streaming request to API
+ try:
+ response = requests.post(
+ "http://localhost:8880/v1/audio/speech",
+ json={
+ "model": "kokoro",
+ "input": text,
+ "voice": voice,
+ "response_format": "pcm",
+ "stream": True
+ },
+ stream=True,
+ timeout=1800
+ )
+ response.raise_for_status()
+ print(f"Request started successfully after {time.time() - start_time:.2f}s")
+
+ # Process streaming response with smaller chunks for lower latency
+ for chunk in response.iter_content(chunk_size=512): # 512 bytes = 256 samples at 16-bit
+ if chunk:
+ chunk_count += 1
+ total_bytes += len(chunk)
+
+ # Handle first chunk
+ if not audio_started:
+ first_chunk_time = time.time()
+ print(f"\nReceived first chunk after {first_chunk_time - start_time:.2f}s")
+ print(f"First chunk size: {len(chunk)} bytes")
+ audio_started = True
+
+ # Convert bytes to numpy array and play
+ audio_chunk = np.frombuffer(chunk, dtype=np.int16)
+ stream.write(audio_chunk)
+
+ # Accumulate raw audio data
+ all_audio_data.extend(chunk)
+
+ # Log progress every 10 chunks
+ if chunk_count % 10 == 0:
+ elapsed = time.time() - start_time
+ print(f"Progress: {chunk_count} chunks, {total_bytes/1024:.1f}KB received, {elapsed:.1f}s elapsed")
+
+ # Final stats
+ total_time = time.time() - start_time
+ print(f"\nStream complete:")
+ print(f"Total chunks: {chunk_count}")
+ print(f"Total data: {total_bytes/1024:.1f}KB")
+ print(f"Total time: {total_time:.2f}s")
+ print(f"Average speed: {(total_bytes/1024)/total_time:.1f}KB/s")
+
+ # Save as WAV file
+ if output_file:
+ print(f"\nWriting audio to {output_file}")
+ with wave.open(output_file, 'wb') as wav_file:
+ wav_file.setnchannels(1) # Mono
+ wav_file.setsampwidth(2) # 2 bytes per sample (16-bit)
+ wav_file.setframerate(sample_rate)
+ wav_file.writeframes(all_audio_data)
+ print(f"Saved {len(all_audio_data)} bytes of audio data")
+
+ # Clean up
+ stream.stop()
+ stream.close()
+
+ except requests.exceptions.ConnectionError as e:
+ print(f"Connection error - Is the server running? Error: {str(e)}")
+ stream.stop()
+ stream.close()
+ except Exception as e:
+ print(f"Error during streaming: {str(e)}")
+ stream.stop()
+ stream.close()
+
+def main():
+ # Load sample text from HG Wells
+ script_dir = os.path.dirname(os.path.abspath(__file__))
+ wells_path = os.path.join(script_dir, "assorted_checks/benchmarks/the_time_machine_hg_wells.txt")
+ output_path = os.path.join(script_dir, "output.wav")
+
+ with open(wells_path, "r", encoding="utf-8") as f:
+ full_text = f.read()
+ # Take first few paragraphs
+ text = " ".join(full_text.split("\n\n")[:2])
+
+ print("\nStarting TTS stream playback...")
+ print(f"Text length: {len(text)} characters")
+ print("\nFirst 100 characters:")
+ print(text[:100] + "...")
+
+ play_streaming_tts(text, output_file=output_path)
+
+if __name__ == "__main__":
+ main()
diff --git a/requirements.txt b/requirements.txt
index 284620c..365e005 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -13,7 +13,7 @@ numpy==2.2.1
scipy==1.14.1
# Audio processing
-soundfile==0.12.1
+soundfile==0.13.0
# Text processing
phonemizer==3.3.0