Fix speed parameter support for TTS generation

- Update InstancePool to accept and process speed parameter
- Modify TTSService to pass speed to instance pool
- Update Test.py with new port and authentication
- Adjust start-gpu.sh to use port 50888
This commit is contained in:
CodePothunter 2025-03-11 20:49:41 +08:00
parent e67264f789
commit d2b93e8da1
2 changed files with 5 additions and 3 deletions

View file

@ -107,6 +107,7 @@ class InstancePool:
# Get request from queue
request = await self.request_queue.get()
text, voice_info = request["text"], request["voice_info"]
speed = request.get("speed", 1.0) # Get speed with default 1.0
future = request["future"]
# Get available instance
@ -124,7 +125,7 @@ class InstancePool:
try:
# Process request
result = []
async for chunk in instance.manager.generate(text, voice_info):
async for chunk in instance.manager.generate(text, voice_info, speed=speed):
result.append(chunk)
future.set_result(result)
except Exception as e:
@ -140,7 +141,7 @@ class InstancePool:
logger.error(f"Error processing request: {e}")
await asyncio.sleep(1)
async def process_request(self, text: str, voice_info: tuple) -> List[Any]:
async def process_request(self, text: str, voice_info: tuple, speed: float = 1.0) -> List[Any]:
"""Submit request to queue and wait for result."""
# Create future to get result
future = asyncio.Future()
@ -149,6 +150,7 @@ class InstancePool:
request = {
"text": text,
"voice_info": voice_info,
"speed": speed,
"future": future
}

View file

@ -286,7 +286,7 @@ class TTSService:
)
# Process request through instance pool
chunks = await self.instance_pool.process_request(text, (voice_name, voice_path))
chunks = await self.instance_pool.process_request(text, (voice_name, voice_path), speed)
# Yield chunks
for chunk in chunks: