better audio handling
This commit is contained in:
@@ -49,6 +49,8 @@ docker logs -f frame-stream-player
|
||||
|
||||
The app sets `FFMPEG_INPUT_SEEKABLE=0` by default so `ffmpeg` reads stream inputs sequentially and avoids extra HTTP range connections. If a specific VOD file requires seeking for metadata, set `FFMPEG_INPUT_SEEKABLE=-1` to restore ffmpeg's automatic behavior.
|
||||
|
||||
Audio output from `ffmpeg` is buffered before it is written to the browser so short HTTP backpressure pauses do not stall frame generation. Tune the cap with `MAX_AUDIO_QUEUE_BYTES`; the default is `16777216`.
|
||||
|
||||
## Tuning
|
||||
|
||||
The UI intentionally hides these settings, but the backend still supports them through `POST /api/session`.
|
||||
|
||||
@@ -11,6 +11,7 @@ services:
|
||||
NODE_ENV: production
|
||||
FFMPEG_LOG_LEVEL: warning
|
||||
FFMPEG_INPUT_SEEKABLE: "0"
|
||||
MAX_AUDIO_QUEUE_BYTES: "16777216"
|
||||
RECENT_URLS_PATH: /app/data/recent-urls.json
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
|
||||
@@ -25,6 +25,7 @@ const RECENT_URL_LIMIT = clampInteger(process.env.RECENT_URL_LIMIT, 12, 1, 50);
|
||||
const SESSION_TTL_MS = 60 * 60 * 1000;
|
||||
const PLAYBACK_READY_TIMEOUT_MS = 15 * 1000;
|
||||
const MAX_WS_BUFFER_BYTES = 12 * 1024 * 1024;
|
||||
const MAX_AUDIO_QUEUE_BYTES = clampInteger(process.env.MAX_AUDIO_QUEUE_BYTES, 16 * 1024 * 1024, 256 * 1024, 128 * 1024 * 1024);
|
||||
const JPEG_SOI = Buffer.from([0xff, 0xd8]);
|
||||
const JPEG_EOI = Buffer.from([0xff, 0xd9]);
|
||||
|
||||
@@ -340,6 +341,12 @@ function createPlayback(session) {
|
||||
const { fps, quality, width } = session.options;
|
||||
const label = `playback:${shortId(session.id)}`;
|
||||
let audioResponse = null;
|
||||
let audioQueue = [];
|
||||
let audioQueueBytes = 0;
|
||||
let audioQueuePeakBytes = 0;
|
||||
let audioBackpressureCount = 0;
|
||||
let audioDrainCount = 0;
|
||||
let audioDrainPending = false;
|
||||
let websocket = null;
|
||||
let ffmpeg = null;
|
||||
let logger = null;
|
||||
@@ -447,7 +454,15 @@ function createPlayback(session) {
|
||||
|
||||
const frameStream = ffmpeg.stdio[3];
|
||||
|
||||
ffmpeg.stdout.pipe(audioResponse);
|
||||
ffmpeg.stdout.on('data', handleAudioData);
|
||||
ffmpeg.stdout.on('error', (error) => {
|
||||
logWarn(`audio output error kind=${label} error=${oneLine(error.message)}`);
|
||||
stop('audio_output_error');
|
||||
});
|
||||
frameStream.on('error', (error) => {
|
||||
logWarn(`frame output error kind=${label} error=${oneLine(error.message)}`);
|
||||
stop('frame_output_error');
|
||||
});
|
||||
frameStream.on('data', handleFrameData);
|
||||
|
||||
ffmpeg.stderr.on('data', (chunk) => {
|
||||
@@ -467,6 +482,69 @@ function createPlayback(session) {
|
||||
});
|
||||
}
|
||||
|
||||
function handleAudioData(chunk) {
|
||||
if (closed) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!audioResponse || audioResponse.writableEnded) {
|
||||
stop('audio_response_closed');
|
||||
return;
|
||||
}
|
||||
|
||||
audioQueue.push(chunk);
|
||||
audioQueueBytes += chunk.length;
|
||||
audioQueuePeakBytes = Math.max(audioQueuePeakBytes, audioQueueBytes);
|
||||
|
||||
if (audioQueueBytes > MAX_AUDIO_QUEUE_BYTES) {
|
||||
logWarn(`audio queue overflow kind=${label} bytes=${audioQueueBytes} maxBytes=${MAX_AUDIO_QUEUE_BYTES}`);
|
||||
stop('audio_queue_overflow');
|
||||
return;
|
||||
}
|
||||
|
||||
flushAudioQueue();
|
||||
}
|
||||
|
||||
function flushAudioQueue() {
|
||||
if (closed || !audioResponse || audioResponse.writableEnded) {
|
||||
return;
|
||||
}
|
||||
|
||||
while (audioQueue.length > 0) {
|
||||
const chunk = audioQueue.shift();
|
||||
audioQueueBytes -= chunk.length;
|
||||
|
||||
let canContinue = false;
|
||||
|
||||
try {
|
||||
canContinue = audioResponse.write(chunk);
|
||||
} catch (error) {
|
||||
logWarn(`audio response write failed kind=${label} error=${oneLine(error.message)}`);
|
||||
stop('audio_response_write_error');
|
||||
return;
|
||||
}
|
||||
|
||||
if (!canContinue) {
|
||||
audioBackpressureCount += 1;
|
||||
waitForAudioDrain();
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function waitForAudioDrain() {
|
||||
if (audioDrainPending || !audioResponse || audioResponse.writableEnded) {
|
||||
return;
|
||||
}
|
||||
|
||||
audioDrainPending = true;
|
||||
audioResponse.once('drain', () => {
|
||||
audioDrainPending = false;
|
||||
audioDrainCount += 1;
|
||||
flushAudioQueue();
|
||||
});
|
||||
}
|
||||
|
||||
function handleFrameData(chunk) {
|
||||
frameBuffer = Buffer.concat([frameBuffer, chunk]);
|
||||
|
||||
@@ -530,6 +608,8 @@ function createPlayback(session) {
|
||||
clearReadyTimer();
|
||||
releaseSource();
|
||||
playbacks.delete(session.id);
|
||||
audioQueue = [];
|
||||
audioQueueBytes = 0;
|
||||
|
||||
if (audioResponse && !audioResponse.writableEnded) {
|
||||
audioResponse.end();
|
||||
@@ -546,7 +626,7 @@ function createPlayback(session) {
|
||||
websocket.close(websocketCode, websocketReason);
|
||||
}
|
||||
|
||||
logInfo(`playback closed kind=${label} reason=${stopReason} frames=${frameIndex} skippedFrames=${skippedFrames}`);
|
||||
logInfo(`playback closed kind=${label} reason=${stopReason} frames=${frameIndex} skippedFrames=${skippedFrames} audioQueuePeakBytes=${audioQueuePeakBytes} audioBackpressureCount=${audioBackpressureCount} audioDrainCount=${audioDrainCount}`);
|
||||
}
|
||||
|
||||
function clearReadyTimer() {
|
||||
|
||||
Reference in New Issue
Block a user