Add clear text button and enhance temporary file management

- Introduced a "Clear Text" button in the web interface for user convenience.
- Updated temporary file management settings in the configuration.
- Added new debug endpoints for system and storage information.
- Improved logging levels for better debugging insights.
This commit is contained in:
remsky 2025-01-29 18:29:02 -07:00
parent 946e322242
commit 2e318051f8
16 changed files with 445 additions and 169 deletions

1
.gitignore vendored
View file

@ -56,6 +56,7 @@ Kokoro-82M/
ui/data/
EXTERNAL_UV_DOCUMENTATION*
app
api/temp_files/
# Docker
Dockerfile*

View file

@ -32,10 +32,12 @@ class Settings(BaseSettings):
cors_origins: list[str] = ["*"] # CORS origins for web player
cors_enabled: bool = True # Whether to enable CORS
# Temp File Settings
# Temp File Settings for WEB Ui
temp_file_dir: str = "api/temp_files" # Directory for temporary audio files (relative to project root)
max_temp_dir_size_mb: int = 2048 # Maximum size of temp directory (2GB)
temp_file_max_age_hours: int = 1 # Remove temp files older than 1 hour
max_temp_dir_age_hours: int = 1 # Remove temp files older than 1 hour
max_temp_dir_count: int = 3 # Maximum number of temp files to keep
class Config:
env_file = ".env"

View file

@ -351,7 +351,7 @@ async def cleanup_temp_files() -> None:
for entry in entries:
if entry.is_file():
stat = await aiofiles.os.stat(entry.path)
max_age = stat.st_mtime + (settings.temp_file_max_age_hours * 3600)
max_age = stat.st_mtime + (settings.max_temp_dir_age_hours * 3600)
if max_age < stat.st_mtime:
try:
await aiofiles.os.remove(entry.path)

View file

@ -18,6 +18,7 @@ from .routers.web_player import router as web_router
from .core.model_config import model_config
from .routers.development import router as dev_router
from .routers.openai_compatible import router as openai_router
from .routers.debug import router as debug_router
from .services.tts_service import TTSService
@ -48,7 +49,7 @@ async def lifespan(app: FastAPI):
"""Lifespan context manager for model initialization"""
from .inference.model_manager import get_manager
from .inference.voice_manager import get_manager as get_voice_manager
from .core.paths import cleanup_temp_files
from .services.temp_manager import cleanup_temp_files
# Clean old temp files on startup
await cleanup_temp_files()
@ -130,6 +131,7 @@ if settings.cors_enabled:
# Include routers
app.include_router(openai_router, prefix="/v1")
app.include_router(dev_router) # Development endpoints
app.include_router(debug_router) # Debug endpoints
if settings.enable_web_player:
app.include_router(web_router, prefix="/web") # Web player static files

134
api/src/routers/debug.py Normal file
View file

@ -0,0 +1,134 @@
from fastapi import APIRouter
import psutil
import threading
import time
from datetime import datetime
try:
import GPUtil
GPU_AVAILABLE = True
except ImportError:
GPU_AVAILABLE = False
router = APIRouter(tags=["debug"])
@router.get("/debug/threads")
async def get_thread_info():
process = psutil.Process()
current_threads = threading.enumerate()
# Get per-thread CPU times
thread_details = []
for thread in current_threads:
thread_info = {
"name": thread.name,
"id": thread.ident,
"alive": thread.is_alive(),
"daemon": thread.daemon
}
thread_details.append(thread_info)
return {
"total_threads": process.num_threads(),
"active_threads": len(current_threads),
"thread_names": [t.name for t in current_threads],
"thread_details": thread_details,
"memory_mb": process.memory_info().rss / 1024 / 1024
}
@router.get("/debug/storage")
async def get_storage_info():
# Get disk partitions
partitions = psutil.disk_partitions()
storage_info = []
for partition in partitions:
try:
usage = psutil.disk_usage(partition.mountpoint)
storage_info.append({
"device": partition.device,
"mountpoint": partition.mountpoint,
"fstype": partition.fstype,
"total_gb": usage.total / (1024**3),
"used_gb": usage.used / (1024**3),
"free_gb": usage.free / (1024**3),
"percent_used": usage.percent
})
except PermissionError:
continue
return {
"storage_info": storage_info
}
@router.get("/debug/system")
async def get_system_info():
process = psutil.Process()
# CPU Info
cpu_info = {
"cpu_count": psutil.cpu_count(),
"cpu_percent": psutil.cpu_percent(interval=1),
"per_cpu_percent": psutil.cpu_percent(interval=1, percpu=True),
"load_avg": psutil.getloadavg()
}
# Memory Info
virtual_memory = psutil.virtual_memory()
swap_memory = psutil.swap_memory()
memory_info = {
"virtual": {
"total_gb": virtual_memory.total / (1024**3),
"available_gb": virtual_memory.available / (1024**3),
"used_gb": virtual_memory.used / (1024**3),
"percent": virtual_memory.percent
},
"swap": {
"total_gb": swap_memory.total / (1024**3),
"used_gb": swap_memory.used / (1024**3),
"free_gb": swap_memory.free / (1024**3),
"percent": swap_memory.percent
}
}
# Process Info
process_info = {
"pid": process.pid,
"status": process.status(),
"create_time": datetime.fromtimestamp(process.create_time()).isoformat(),
"cpu_percent": process.cpu_percent(),
"memory_percent": process.memory_percent(),
}
# Network Info
network_info = {
"connections": len(process.net_connections()),
"network_io": psutil.net_io_counters()._asdict()
}
# GPU Info if available
gpu_info = None
if GPU_AVAILABLE:
try:
gpus = GPUtil.getGPUs()
gpu_info = [{
"id": gpu.id,
"name": gpu.name,
"load": gpu.load,
"memory": {
"total": gpu.memoryTotal,
"used": gpu.memoryUsed,
"free": gpu.memoryFree,
"percent": (gpu.memoryUsed / gpu.memoryTotal) * 100
},
"temperature": gpu.temperature
} for gpu in gpus]
except Exception:
gpu_info = "GPU information unavailable"
return {
"cpu": cpu_info,
"memory": memory_info,
"process": process_info,
"network": network_info,
"gpu": gpu_info
}

View file

@ -2,14 +2,71 @@
import os
import tempfile
from typing import Optional
from typing import Optional, List
import aiofiles
from fastapi import HTTPException
from loguru import logger
from ..core.config import settings
from ..core.paths import _scan_directories
async def cleanup_temp_files() -> None:
"""Clean up old temp files"""
try:
if not await aiofiles.os.path.exists(settings.temp_file_dir):
await aiofiles.os.makedirs(settings.temp_file_dir, exist_ok=True)
return
# Get all temp files with stats
files = []
total_size = 0
# Use os.scandir for sync iteration, but aiofiles.os.stat for async stats
for entry in os.scandir(settings.temp_file_dir):
if entry.is_file():
stat = await aiofiles.os.stat(entry.path)
files.append((entry.path, stat.st_mtime, stat.st_size))
total_size += stat.st_size
# Sort by modification time (oldest first)
files.sort(key=lambda x: x[1])
# Remove files if:
# 1. They're too old
# 2. We have too many files
# 3. Directory is too large
current_time = (await aiofiles.os.stat(settings.temp_file_dir)).st_mtime
max_age = settings.max_temp_dir_age_hours * 3600
for path, mtime, size in files:
should_delete = False
# Check age
if current_time - mtime > max_age:
should_delete = True
logger.info(f"Deleting old temp file: {path}")
# Check count limit
elif len(files) > settings.max_temp_dir_count:
should_delete = True
logger.info(f"Deleting excess temp file: {path}")
# Check size limit
elif total_size > settings.max_temp_dir_size_mb * 1024 * 1024:
should_delete = True
logger.info(f"Deleting to reduce directory size: {path}")
if should_delete:
try:
await aiofiles.os.remove(path)
total_size -= size
logger.info(f"Deleted temp file: {path}")
except Exception as e:
logger.warning(f"Failed to delete temp file {path}: {e}")
except Exception as e:
logger.warning(f"Error during temp file cleanup: {e}")
class TempFileWriter:
@ -27,21 +84,11 @@ class TempFileWriter:
async def __aenter__(self):
"""Async context manager entry"""
# Check temp dir size by scanning
total_size = 0
entries = await _scan_directories([settings.temp_file_dir])
for entry in entries:
stat = await aiofiles.os.stat(os.path.join(settings.temp_file_dir, entry))
total_size += stat.st_size
if total_size >= settings.max_temp_dir_size_mb * 1024 * 1024:
raise HTTPException(
status_code=507,
detail="Temporary storage full. Please try again later."
)
# Clean up old files first
await cleanup_temp_files()
# Create temp file with proper extension
os.makedirs(settings.temp_file_dir, exist_ok=True)
await aiofiles.os.makedirs(settings.temp_file_dir, exist_ok=True)
temp = tempfile.NamedTemporaryFile(
dir=settings.temp_file_dir,
delete=False,

View file

@ -1,31 +0,0 @@
# """Text chunking module for TTS processing"""
# from typing import List, AsyncGenerator
# async def fallback_split(text: str, max_chars: int = 400) -> List[str]:
# """Emergency length control - only used if chunks are too long"""
# words = text.split()
# chunks = []
# current = []
# current_len = 0
# for word in words:
# # Always include at least one word per chunk
# if not current:
# current.append(word)
# current_len = len(word)
# continue
# # Check if adding word would exceed limit
# if current_len + len(word) + 1 <= max_chars:
# current.append(word)
# current_len += len(word) + 1
# else:
# chunks.append(" ".join(current))
# current = [word]
# current_len = len(word)
# if current:
# chunks.append(" ".join(current))
# return chunks

View file

@ -50,7 +50,7 @@ def process_text_chunk(text: str, language: str = "a") -> List[int]:
async def yield_chunk(text: str, tokens: List[int], chunk_count: int) -> Tuple[str, List[int]]:
"""Yield a chunk with consistent logging."""
logger.info(f"Yielding chunk {chunk_count}: '{text[:50]}...' ({len(tokens)} tokens)")
logger.debug(f"Yielding chunk {chunk_count}: '{text[:50]}...' ({len(tokens)} tokens)")
return text, tokens
def process_text(text: str, language: str = "a") -> List[int]:
@ -111,7 +111,7 @@ async def smart_split(text: str, max_tokens: int = ABSOLUTE_MAX) -> AsyncGenerat
if current_chunk:
chunk_text = " ".join(current_chunk)
chunk_count += 1
logger.info(f"Yielding chunk {chunk_count}: '{chunk_text[:50]}...' ({current_count} tokens)")
logger.debug(f"Yielding chunk {chunk_count}: '{chunk_text[:50]}...' ({current_count} tokens)")
yield chunk_text, current_tokens
current_chunk = []
current_tokens = []
@ -144,7 +144,7 @@ async def smart_split(text: str, max_tokens: int = ABSOLUTE_MAX) -> AsyncGenerat
if clause_chunk:
chunk_text = " ".join(clause_chunk)
chunk_count += 1
logger.info(f"Yielding clause chunk {chunk_count}: '{chunk_text[:50]}...' ({clause_count} tokens)")
logger.debug(f"Yielding clause chunk {chunk_count}: '{chunk_text[:50]}...' ({clause_count} tokens)")
yield chunk_text, clause_tokens
clause_chunk = [full_clause]
clause_tokens = tokens
@ -154,7 +154,7 @@ async def smart_split(text: str, max_tokens: int = ABSOLUTE_MAX) -> AsyncGenerat
if clause_chunk:
chunk_text = " ".join(clause_chunk)
chunk_count += 1
logger.info(f"Yielding final clause chunk {chunk_count}: '{chunk_text[:50]}...' ({clause_count} tokens)")
logger.debug(f"Yielding final clause chunk {chunk_count}: '{chunk_text[:50]}...' ({clause_count} tokens)")
yield chunk_text, clause_tokens
# Regular sentence handling

11
api/tests/debug.http Normal file
View file

@ -0,0 +1,11 @@
### Get Thread Information
GET http://localhost:8880/debug/threads
Accept: application/json
### Get Storage Information
GET http://localhost:8880/debug/storage
Accept: application/json
### Get System Information
GET http://localhost:8880/debug/system
Accept: application/json

View file

@ -1,46 +0,0 @@
"""Tests for text chunking service"""
from unittest.mock import patch
import pytest
from api.src.services.text_processing import chunker
@pytest.fixture(autouse=True)
def mock_settings():
"""Mock settings for all tests"""
with patch("api.src.services.text_processing.chunker.settings") as mock_settings:
mock_settings.max_chunk_size = 300
yield mock_settings
def test_split_text():
"""Test text splitting into sentences"""
text = "First sentence. Second sentence! Third sentence?"
sentences = list(chunker.split_text(text))
assert len(sentences) == 3
assert sentences[0] == "First sentence."
assert sentences[1] == "Second sentence!"
assert sentences[2] == "Third sentence?"
def test_split_text_empty():
"""Test splitting empty text"""
assert list(chunker.split_text("")) == []
def test_split_text_single_sentence():
"""Test splitting single sentence"""
text = "Just one sentence."
assert list(chunker.split_text(text)) == ["Just one sentence."]
def test_split_text_with_custom_chunk_size():
"""Test splitting with custom max chunk size"""
text = "First part, second part, third part."
chunks = list(chunker.split_text(text, max_chunk=15))
assert len(chunks) == 3
assert chunks[0] == "First part,"
assert chunks[1] == "second part,"
assert chunks[2] == "third part."

View file

@ -36,6 +36,7 @@ dependencies = [
"matplotlib>=3.10.0",
"semchunk>=3.0.1",
"mutagen>=1.47.0",
"psutil>=6.1.1",
]
[project.optional-dependencies]

17
uv.lock generated
View file

@ -1015,6 +1015,7 @@ dependencies = [
{ name = "numpy" },
{ name = "openai" },
{ name = "phonemizer" },
{ name = "psutil" },
{ name = "pydantic" },
{ name = "pydantic-settings" },
{ name = "pydub" },
@ -1068,6 +1069,7 @@ requires-dist = [
{ name = "openai", specifier = ">=1.59.6" },
{ name = "openai", marker = "extra == 'test'", specifier = ">=1.59.6" },
{ name = "phonemizer", specifier = "==3.3.0" },
{ name = "psutil", specifier = ">=6.1.1" },
{ name = "pydantic", specifier = "==2.10.4" },
{ name = "pydantic-settings", specifier = "==2.7.0" },
{ name = "pydub", specifier = ">=0.25.1" },
@ -2111,6 +2113,21 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/fd/b2/ab07b09e0f6d143dfb839693aa05765257bceaa13d03bf1a696b78323e7a/protobuf-5.29.3-py3-none-any.whl", hash = "sha256:0a18ed4a24198528f2333802eb075e59dea9d679ab7a6c5efb017a59004d849f", size = 172550 },
]
[[package]]
name = "psutil"
version = "6.1.1"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/1f/5a/07871137bb752428aa4b659f910b399ba6f291156bdea939be3e96cae7cb/psutil-6.1.1.tar.gz", hash = "sha256:cf8496728c18f2d0b45198f06895be52f36611711746b7f30c464b422b50e2f5", size = 508502 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/61/99/ca79d302be46f7bdd8321089762dd4476ee725fce16fc2b2e1dbba8cac17/psutil-6.1.1-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:fc0ed7fe2231a444fc219b9c42d0376e0a9a1a72f16c5cfa0f68d19f1a0663e8", size = 247511 },
{ url = "https://files.pythonhosted.org/packages/0b/6b/73dbde0dd38f3782905d4587049b9be64d76671042fdcaf60e2430c6796d/psutil-6.1.1-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:0bdd4eab935276290ad3cb718e9809412895ca6b5b334f5a9111ee6d9aff9377", size = 248985 },
{ url = "https://files.pythonhosted.org/packages/17/38/c319d31a1d3f88c5b79c68b3116c129e5133f1822157dd6da34043e32ed6/psutil-6.1.1-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b6e06c20c05fe95a3d7302d74e7097756d4ba1247975ad6905441ae1b5b66003", size = 284488 },
{ url = "https://files.pythonhosted.org/packages/9c/39/0f88a830a1c8a3aba27fededc642da37613c57cbff143412e3536f89784f/psutil-6.1.1-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:97f7cb9921fbec4904f522d972f0c0e1f4fabbdd4e0287813b21215074a0f160", size = 287477 },
{ url = "https://files.pythonhosted.org/packages/47/da/99f4345d4ddf2845cb5b5bd0d93d554e84542d116934fde07a0c50bd4e9f/psutil-6.1.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:33431e84fee02bc84ea36d9e2c4a6d395d479c9dd9bba2376c1f6ee8f3a4e0b3", size = 289017 },
{ url = "https://files.pythonhosted.org/packages/38/53/bd755c2896f4461fd4f36fa6a6dcb66a88a9e4b9fd4e5b66a77cf9d4a584/psutil-6.1.1-cp37-abi3-win32.whl", hash = "sha256:eaa912e0b11848c4d9279a93d7e2783df352b082f40111e078388701fd479e53", size = 250602 },
{ url = "https://files.pythonhosted.org/packages/7b/d7/7831438e6c3ebbfa6e01a927127a6cb42ad3ab844247f3c5b96bea25d73d/psutil-6.1.1-cp37-abi3-win_amd64.whl", hash = "sha256:f35cfccb065fff93529d2afb4a2e89e363fe63ca1e4a5da22b603a85833c2649", size = 254444 },
]
[[package]]
name = "pycparser"
version = "2.22"

View file

@ -84,6 +84,9 @@
</div>
</div>
<div class="button-group">
<button id="clear-btn" class="clear-btn">
Clear Text
</button>
<button id="generate-btn">
<span class="btn-text">Generate Speech</span>
<span class="loader"></span>

View file

@ -16,7 +16,8 @@ export class App {
autoplayToggle: document.getElementById('autoplay-toggle'),
formatSelect: document.getElementById('format-select'),
status: document.getElementById('status'),
cancelBtn: document.getElementById('cancel-btn')
cancelBtn: document.getElementById('cancel-btn'),
clearBtn: document.getElementById('clear-btn')
};
this.initialize();
@ -60,6 +61,12 @@ export class App {
this.showStatus('Generation cancelled', 'info');
});
// Clear text button
this.elements.clearBtn.addEventListener('click', () => {
this.elements.textInput.value = '';
this.elements.textInput.focus();
});
// Handle page unload
window.addEventListener('beforeunload', () => {
this.audioService.cleanup();
@ -74,15 +81,34 @@ export class App {
this.elements.downloadBtn.style.display = 'flex';
});
// Handle buffer errors
this.audioService.addEventListener('bufferError', () => {
this.showStatus('Processing... (Download will be available when complete)', 'info');
});
// Handle completion
this.audioService.addEventListener('complete', () => {
this.setGenerating(false);
// Show preparing status
this.showStatus('Preparing file...', 'info');
// Trigger coffee steam animation
const steamElement = document.querySelector('.cup .steam');
if (steamElement) {
// Remove and re-add the element to restart animation
const parent = steamElement.parentNode;
const clone = steamElement.cloneNode(true);
parent.removeChild(steamElement);
parent.appendChild(clone);
}
});
// Handle download ready
this.audioService.addEventListener('downloadReady', () => {
this.showStatus('Generation complete', 'success');
setTimeout(() => {
this.showStatus('Generation complete', 'success');
}, 500); // Small delay to ensure "Preparing file..." is visible
});
// Handle audio end
@ -175,20 +201,23 @@ export class App {
downloadAudio() {
const downloadUrl = this.audioService.getDownloadUrl();
if (!downloadUrl) return;
if (!downloadUrl) {
console.warn('No download URL available');
return;
}
console.log('Starting download from:', downloadUrl);
const format = this.elements.formatSelect.value;
const voice = this.voiceService.getSelectedVoiceString();
const timestamp = new Date().toISOString().replace(/[:.]/g, '-');
// Create download link
const a = document.createElement('a');
a.href = downloadUrl;
a.download = `${voice}_${timestamp}.${format}`;
document.body.appendChild(a);
a.click();
document.body.removeChild(a);
URL.revokeObjectURL(downloadUrl);
}
}

View file

@ -5,31 +5,26 @@ export class AudioService {
this.audio = null;
this.controller = null;
this.eventListeners = new Map();
this.chunks = [];
this.minimumPlaybackSize = 50000; // 50KB minimum before playback
this.textLength = 0;
this.shouldAutoplay = false;
this.CHARS_PER_CHUNK = 300; // Estimated chars per chunk
this.serverDownloadPath = null; // Server-side download path
this.pendingOperations = []; // Queue for buffer operations
}
async streamAudio(text, voice, speed, onProgress) {
try {
console.log('AudioService: Starting stream...', { text, voice, speed });
// Only abort if there's an active controller
if (this.controller) {
this.controller.abort();
this.controller = null;
}
// Create new controller before cleanup to prevent race conditions
this.controller = new AbortController();
// Clean up previous audio state
this.cleanup();
onProgress?.(0, 1); // Reset progress to 0
this.chunks = [];
this.textLength = text.length;
this.shouldAutoplay = document.getElementById('autoplay-toggle').checked;
@ -52,7 +47,10 @@ export class AudioService {
signal: this.controller.signal
});
console.log('AudioService: Got response', { status: response.status });
console.log('AudioService: Got response', {
status: response.status,
headers: Object.fromEntries(response.headers.entries())
});
if (!response.ok) {
const error = await response.json();
@ -68,12 +66,16 @@ export class AudioService {
}
}
async setupAudioStream(stream, response, onProgress, estimatedTotalSize) {
async setupAudioStream(stream, response, onProgress, estimatedChunks) {
this.audio = new Audio();
this.mediaSource = new MediaSource();
this.audio.src = URL.createObjectURL(this.mediaSource);
// Set up ended event handler
// Monitor for audio element errors
this.audio.addEventListener('error', (e) => {
console.error('Audio error:', this.audio.error);
});
this.audio.addEventListener('ended', () => {
this.dispatchEvent('ended');
});
@ -82,7 +84,13 @@ export class AudioService {
this.mediaSource.addEventListener('sourceopen', async () => {
try {
this.sourceBuffer = this.mediaSource.addSourceBuffer('audio/mpeg');
await this.processStream(stream, response, onProgress, estimatedTotalSize);
this.sourceBuffer.mode = 'sequence';
this.sourceBuffer.addEventListener('updateend', () => {
this.processNextOperation();
});
await this.processStream(stream, response, onProgress, estimatedChunks);
resolve();
} catch (error) {
reject(error);
@ -96,41 +104,88 @@ export class AudioService {
let hasStartedPlaying = false;
let receivedChunks = 0;
// Check for download path in response headers
const downloadPath = response.headers.get('X-Download-Path');
if (downloadPath) {
this.serverDownloadPath = downloadPath;
}
try {
while (true) {
const {value, done} = await reader.read();
if (done) {
// Get final download path from header
const downloadPath = response.headers.get('X-Download-Path');
if (downloadPath) {
// Prepend /v1 since the router is mounted there
this.serverDownloadPath = `/v1${downloadPath}`;
console.log('Download path received:', this.serverDownloadPath);
// Log all headers to see what we're getting
console.log('All response headers:', Object.fromEntries(response.headers.entries()));
} else {
console.warn('No X-Download-Path header found in response');
}
if (this.mediaSource.readyState === 'open') {
this.mediaSource.endOfStream();
}
// Ensure we show 100% at completion
// Signal completion
onProgress?.(estimatedChunks, estimatedChunks);
this.dispatchEvent('complete');
this.dispatchEvent('downloadReady');
setTimeout(() => {
this.dispatchEvent('downloadReady');
}, 800);
return;
}
this.chunks.push(value);
receivedChunks++;
await this.appendChunk(value);
// Update progress based on received chunks
onProgress?.(receivedChunks, estimatedChunks);
// Start playback if we have enough chunks
if (!hasStartedPlaying && receivedChunks >= 1) {
hasStartedPlaying = true;
if (this.shouldAutoplay) {
// Small delay to ensure buffer is ready
setTimeout(() => this.play(), 100);
try {
// Check for audio errors before proceeding
if (this.audio.error) {
console.error('Audio error detected:', this.audio.error);
continue; // Skip this chunk if audio is in error state
}
// Only remove old data if we're hitting quota errors
if (this.sourceBuffer.buffered.length > 0) {
const currentTime = this.audio.currentTime;
const start = this.sourceBuffer.buffered.start(0);
const end = this.sourceBuffer.buffered.end(0);
// Only remove if we have a lot of historical data
if (currentTime - start > 30) {
const removeEnd = Math.max(start, currentTime - 15);
if (removeEnd > start) {
await this.removeBufferRange(start, removeEnd);
}
}
}
await this.appendChunk(value);
if (!hasStartedPlaying && this.sourceBuffer.buffered.length > 0) {
hasStartedPlaying = true;
if (this.shouldAutoplay) {
setTimeout(() => this.play(), 100);
}
}
} catch (error) {
if (error.name === 'QuotaExceededError') {
// If we hit quota, try more aggressive cleanup
if (this.sourceBuffer.buffered.length > 0) {
const currentTime = this.audio.currentTime;
const start = this.sourceBuffer.buffered.start(0);
const removeEnd = Math.max(start, currentTime - 5);
if (removeEnd > start) {
await this.removeBufferRange(start, removeEnd);
// Retry append after removing data
try {
await this.appendChunk(value);
} catch (retryError) {
console.warn('Buffer error after cleanup:', retryError);
}
}
}
} else {
console.warn('Buffer error:', error);
}
}
}
@ -141,23 +196,77 @@ export class AudioService {
}
}
async appendChunk(chunk) {
async removeBufferRange(start, end) {
// Double check that end is greater than start
if (end <= start) {
console.warn('Invalid buffer remove range:', {start, end});
return;
}
return new Promise((resolve) => {
const appendChunk = () => {
this.sourceBuffer.appendBuffer(chunk);
this.sourceBuffer.addEventListener('updateend', resolve, { once: true });
const doRemove = () => {
try {
this.sourceBuffer.remove(start, end);
} catch (e) {
console.warn('Error removing buffer:', e);
}
resolve();
};
if (!this.sourceBuffer.updating) {
appendChunk();
if (this.sourceBuffer.updating) {
this.sourceBuffer.addEventListener('updateend', () => {
doRemove();
}, { once: true });
} else {
this.sourceBuffer.addEventListener('updateend', appendChunk, { once: true });
doRemove();
}
});
}
async appendChunk(chunk) {
// Don't append if audio is in error state
if (this.audio.error) {
console.warn('Skipping chunk append due to audio error');
return;
}
return new Promise((resolve, reject) => {
const operation = { chunk, resolve, reject };
this.pendingOperations.push(operation);
if (!this.sourceBuffer.updating) {
this.processNextOperation();
}
});
}
processNextOperation() {
if (this.sourceBuffer.updating || this.pendingOperations.length === 0) {
return;
}
// Don't process if audio is in error state
if (this.audio.error) {
console.warn('Skipping operation due to audio error');
return;
}
const operation = this.pendingOperations.shift();
try {
this.sourceBuffer.appendBuffer(operation.chunk);
operation.resolve();
} catch (error) {
operation.reject(error);
// Only continue processing if it's not a fatal error
if (error.name !== 'InvalidStateError') {
this.processNextOperation();
}
}
}
play() {
if (this.audio && this.audio.readyState >= 2) {
if (this.audio && this.audio.readyState >= 2 && !this.audio.error) {
const playPromise = this.audio.play();
if (playPromise) {
playPromise.catch(error => {
@ -178,7 +287,7 @@ export class AudioService {
}
seek(time) {
if (this.audio) {
if (this.audio && !this.audio.error) {
const wasPlaying = !this.audio.paused;
this.audio.currentTime = time;
if (wasPlaying) {
@ -239,7 +348,6 @@ export class AudioService {
this.controller = null;
}
// Full cleanup of all resources
if (this.audio) {
this.audio.pause();
this.audio.src = '';
@ -256,18 +364,14 @@ export class AudioService {
this.mediaSource = null;
this.sourceBuffer = null;
this.chunks = [];
this.textLength = 0;
this.serverDownloadPath = null;
this.pendingOperations = [];
// Force a hard refresh of the page to ensure clean state
window.location.reload();
}
cleanup() {
// Clean up audio elements
if (this.audio) {
// Remove all event listeners
this.eventListeners.forEach((listeners, event) => {
listeners.forEach(callback => {
this.audio.removeEventListener(event, callback);
@ -289,28 +393,16 @@ export class AudioService {
this.mediaSource = null;
this.sourceBuffer = null;
this.chunks = [];
this.textLength = 0;
this.serverDownloadPath = null;
this.pendingOperations = [];
}
getDownloadUrl() {
// Check for server-side download link first
const downloadPath = this.serverDownloadPath;
if (downloadPath) {
return downloadPath;
}
// Fall back to client-side blob URL
if (!this.audio || !this.sourceBuffer || this.chunks.length === 0) return null;
// Get the buffered data from MediaSource
const buffered = this.sourceBuffer.buffered;
if (buffered.length === 0) return null;
// Create blob from the original chunks
const blob = new Blob(this.chunks, { type: 'audio/mpeg' });
return URL.createObjectURL(blob);
return URL.createObjectURL(blob);
getDownloadUrl() {
if (!this.serverDownloadPath) {
console.warn('No download path available');
return null;
}
return this.serverDownloadPath;
}
}

View file

@ -276,4 +276,18 @@ button:disabled {
.loading .btn-text {
display: none;
}
.clear-btn {
background: transparent !important;
border: 1px solid var(--border) !important;
color: var(--text-light) !important;
padding: 0.5rem 1rem !important;
flex: 0 !important; /* Don't expand like other buttons */
}
.clear-btn:hover {
background: rgba(99, 102, 241, 0.1) !important;
transform: none !important;
box-shadow: none !important;
}