Kokoro-FastAPI/api/src/services/audio.py

211 lines
8.2 KiB
Python
Raw Normal View History

"""Audio conversion service"""
import struct
2025-02-11 22:32:10 -05:00
import time
from typing import Tuple
2025-02-09 18:32:17 -07:00
from io import BytesIO
import numpy as np
import math
import scipy.io.wavfile as wavfile
2025-01-13 20:15:46 -07:00
import soundfile as sf
from loguru import logger
2025-01-17 21:43:10 -07:00
from pydub import AudioSegment
from torch import norm
2025-01-09 18:41:44 -07:00
from ..core.config import settings
from .streaming_audio_writer import StreamingAudioWriter
2025-02-11 22:32:10 -05:00
from ..inference.base import AudioChunk
2025-02-09 18:32:17 -07:00
2025-01-04 17:54:54 -07:00
class AudioNormalizer:
"""Handles audio normalization state for a single stream"""
2025-01-09 18:41:44 -07:00
2025-01-04 17:54:54 -07:00
def __init__(self):
self.chunk_trim_ms = settings.gap_trim_ms
self.sample_rate = 24000 # Sample rate of the audio
self.samples_to_trim = int(self.chunk_trim_ms * self.sample_rate / 1000)
self.samples_to_pad_start= int(50 * self.sample_rate / 1000)
def find_first_last_non_silent(self,audio_data: np.ndarray, chunk_text: str, speed: float, silence_threshold_db: int = -45, is_last_chunk: bool = False) -> tuple[int, int]:
"""Finds the indices of the first and last non-silent samples in audio data.
Args:
audio_data: Input audio data as numpy array
chunk_text: The text sent to the model to generate the resulting speech
speed: The speaking speed of the voice
silence_threshold_db: How quiet audio has to be to be conssidered silent
is_last_chunk: Whether this is the last chunk
Returns:
A tuple with the start of the non silent portion and with the end of the non silent portion
"""
pad_multiplier=1
split_character=chunk_text.strip()
if len(split_character) > 0:
split_character=split_character[-1]
if split_character in settings.dynamic_gap_trim_padding_char_multiplier:
pad_multiplier=settings.dynamic_gap_trim_padding_char_multiplier[split_character]
if not is_last_chunk:
samples_to_pad_end= max(int((settings.dynamic_gap_trim_padding_ms * self.sample_rate * pad_multiplier) / 1000) - self.samples_to_pad_start, 0)
else:
samples_to_pad_end=self.samples_to_pad_start
# Convert dBFS threshold to amplitude
amplitude_threshold = np.iinfo(audio_data.dtype).max * (10 ** (silence_threshold_db / 20))
# Find the first samples above the silence threshold at the start and end of the audio
non_silent_index_start, non_silent_index_end = None,None
for X in range(0,len(audio_data)):
if audio_data[X] > amplitude_threshold:
non_silent_index_start=X
break
for X in range(len(audio_data) - 1, -1, -1):
if audio_data[X] > amplitude_threshold:
non_silent_index_end=X
break
# Handle the case where the entire audio is silent
if non_silent_index_start == None or non_silent_index_end == None:
return 0, len(audio_data)
return max(non_silent_index_start - self.samples_to_pad_start,0), min(non_silent_index_end + math.ceil(samples_to_pad_end / speed),len(audio_data))
2025-01-09 18:41:44 -07:00
2025-02-13 16:12:51 -05:00
def normalize(self, audio_data: np.ndarray) -> np.ndarray:
"""Convert audio data to int16 range
2025-02-09 18:32:17 -07:00
Args:
audio_data: Input audio data as numpy array
Returns:
Normalized audio data
"""
2025-02-13 16:12:51 -05:00
if audio_data.dtype != np.int16:
# Scale directly to int16 range with clipping
return np.clip(audio_data * 32767, -32768, 32767).astype(np.int16)
return audio_data
2025-01-09 18:41:44 -07:00
class AudioService:
"""Service for audio format conversions with streaming support"""
2025-01-09 18:41:44 -07:00
2025-01-28 13:52:57 -07:00
# Supported formats
SUPPORTED_FORMATS = {"wav", "mp3", "opus", "flac", "aac", "pcm"}
2025-01-28 13:52:57 -07:00
# Default audio format settings balanced for speed and compression
DEFAULT_SETTINGS = {
"mp3": {
"bitrate_mode": "CONSTANT", # Faster than variable bitrate
"compression_level": 0.0, # Balanced compression
},
"opus": {
"compression_level": 0.0, # Good balance for speech
},
"flac": {
"compression_level": 0.0, # Light compression, still fast
2025-01-09 18:41:44 -07:00
},
2025-01-17 21:43:10 -07:00
"aac": {
"bitrate": "192k", # Default AAC bitrate
},
}
2025-01-09 18:41:44 -07:00
@staticmethod
async def convert_audio(
2025-02-11 22:32:10 -05:00
audio_chunk: AudioChunk,
2025-01-09 18:41:44 -07:00
output_format: str,
2025-03-20 16:20:28 +00:00
writer: StreamingAudioWriter,
speed: float = 1,
chunk_text: str = "",
is_last_chunk: bool = False,
trim_audio: bool = True,
normalizer: AudioNormalizer = None,
) -> AudioChunk:
"""Convert audio data to specified format with streaming support
Args:
audio_data: Numpy array of audio samples
output_format: Target format (wav, mp3, ogg, pcm)
2025-03-20 19:15:07 +00:00
writer: The StreamingAudioWriter to use
speed: The speaking speed of the voice
chunk_text: The text sent to the model to generate the resulting speech
is_last_chunk: Whether this is the last chunk
2025-03-20 19:15:07 +00:00
trim_audio: Whether audio should be trimmed
normalizer: Optional AudioNormalizer instance for consistent normalization
Returns:
Bytes of the converted audio chunk
"""
2025-02-11 22:32:10 -05:00
try:
2025-01-28 13:52:57 -07:00
# Validate format
if output_format not in AudioService.SUPPORTED_FORMATS:
raise ValueError(f"Format {output_format} not supported")
2025-01-04 17:55:36 -07:00
# Always normalize audio to ensure proper amplitude scaling
if normalizer is None:
normalizer = AudioNormalizer()
2025-02-13 16:12:51 -05:00
audio_chunk.audio = normalizer.normalize(audio_chunk.audio)
if trim_audio == True:
audio_chunk = AudioService.trim_audio(audio_chunk,chunk_text,speed,is_last_chunk,normalizer)
2025-02-13 16:12:51 -05:00
# Write audio data first
2025-02-11 22:32:10 -05:00
if len(audio_chunk.audio) > 0:
chunk_data = writer.write_chunk(audio_chunk.audio)
2025-02-12 15:06:11 +00:00
2025-03-20 16:20:28 +00:00
# Then finalize if this is the last chunk
if is_last_chunk:
final_data = writer.write_chunk(finalize=True)
2025-03-20 16:20:28 +00:00
if final_data:
audio_chunk.output=final_data
return audio_chunk
2025-03-20 16:20:28 +00:00
if chunk_data:
2025-03-20 16:20:28 +00:00
audio_chunk.output=chunk_data
return audio_chunk
2025-01-01 21:11:23 +05:30
except Exception as e:
logger.error(f"Error converting audio stream to {output_format}: {str(e)}")
2025-02-09 18:32:17 -07:00
raise ValueError(
f"Failed to convert audio stream to {output_format}: {str(e)}"
)
@staticmethod
2025-02-11 22:32:10 -05:00
def trim_audio(audio_chunk: AudioChunk, chunk_text: str = "", speed: float = 1, is_last_chunk: bool = False, normalizer: AudioNormalizer = None) -> AudioChunk:
"""Trim silence from start and end
Args:
audio_data: Input audio data as numpy array
chunk_text: The text sent to the model to generate the resulting speech
speed: The speaking speed of the voice
is_last_chunk: Whether this is the last chunk
normalizer: Optional AudioNormalizer instance for consistent normalization
Returns:
Trimmed audio data
"""
if normalizer is None:
normalizer = AudioNormalizer()
2025-02-13 16:12:51 -05:00
audio_chunk.audio=normalizer.normalize(audio_chunk.audio)
2025-02-14 13:37:42 -05:00
trimed_samples=0
# Trim start and end if enough samples
2025-02-11 22:32:10 -05:00
if len(audio_chunk.audio) > (2 * normalizer.samples_to_trim):
audio_chunk.audio = audio_chunk.audio[normalizer.samples_to_trim : -normalizer.samples_to_trim]
2025-02-14 13:37:42 -05:00
trimed_samples+=normalizer.samples_to_trim
# Find non silent portion and trim
2025-02-11 22:32:10 -05:00
start_index,end_index=normalizer.find_first_last_non_silent(audio_chunk.audio,chunk_text,speed,is_last_chunk=is_last_chunk)
audio_chunk.audio=audio_chunk.audio[start_index:end_index]
2025-02-14 13:37:42 -05:00
trimed_samples+=start_index
2025-02-12 15:06:11 +00:00
if audio_chunk.word_timestamps is not None:
for timestamp in audio_chunk.word_timestamps:
2025-02-14 13:37:42 -05:00
timestamp.start_time-=trimed_samples / 24000
timestamp.end_time-=trimed_samples / 24000
2025-02-11 22:32:10 -05:00
return audio_chunk