mirror of
https://github.com/livepeer/lpms
synced 2026-04-22 15:57:25 +08:00
Handle AV_NOPTS_VALUE inputs
This commit is contained in:
@@ -11,11 +11,43 @@ static int lpms_send_packet(struct input_ctx *ictx, AVCodecContext *dec, AVPacke
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int64_t decoded_video_pts_step(struct input_ctx *ictx, AVFrame *frame)
|
||||
{
|
||||
if (frame && frame->duration > 0) return frame->duration;
|
||||
AVStream *vst = (ictx && ictx->ic && ictx->vi >= 0) ? ictx->ic->streams[ictx->vi] : NULL;
|
||||
if (vst && vst->r_frame_rate.num > 0 && vst->r_frame_rate.den > 0) {
|
||||
int64_t step = av_rescale_q(1, av_inv_q(vst->r_frame_rate), vst->time_base);
|
||||
if (step > 0) return step;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
// Fix malformed decode timestamps (missing/regressive PTS) so downstream stages
|
||||
// receive a stable, non-AV_NOPTS_VALUE video timeline.
|
||||
static void fix_video_pts(struct input_ctx *ictx, AVFrame *frame)
|
||||
{
|
||||
int64_t pts = frame->pts;
|
||||
int synthesized = 0;
|
||||
if (pts == AV_NOPTS_VALUE) pts = frame->best_effort_timestamp;
|
||||
if (pts == AV_NOPTS_VALUE) {
|
||||
pts = decoded_video_pts_step(ictx, frame);
|
||||
if (ictx->last_video_pts != AV_NOPTS_VALUE) pts += ictx->last_video_pts;
|
||||
synthesized = 1;
|
||||
}
|
||||
if (ictx->last_video_pts != AV_NOPTS_VALUE && pts <= ictx->last_video_pts) {
|
||||
int64_t step = synthesized ? decoded_video_pts_step(ictx, frame) : 1;
|
||||
pts = ictx->last_video_pts + step;
|
||||
}
|
||||
frame->pts = pts;
|
||||
ictx->last_video_pts = pts;
|
||||
}
|
||||
|
||||
static int lpms_receive_frame(struct input_ctx *ictx, AVCodecContext *dec, AVFrame *frame)
|
||||
{
|
||||
int ret = avcodec_receive_frame(dec, frame);
|
||||
if (dec != ictx->vc) return ret;
|
||||
if (!ret && frame && !is_flush_frame(frame)) {
|
||||
fix_video_pts(ictx, frame);
|
||||
ictx->pkt_diff--; // decrease buffer count for non-sentinel video frames
|
||||
if (ictx->flushing) ictx->sentinel_count = 0;
|
||||
}
|
||||
@@ -328,6 +360,7 @@ int open_input(input_params *params, struct input_ctx *ctx)
|
||||
if (!ctx->last_frame_v) LPMS_ERR(open_input_err, "Unable to alloc last_frame_v");
|
||||
ctx->last_frame_a = av_frame_alloc();
|
||||
if (!ctx->last_frame_a) LPMS_ERR(open_input_err, "Unable to alloc last_frame_a");
|
||||
ctx->last_video_pts = AV_NOPTS_VALUE;
|
||||
|
||||
return 0;
|
||||
|
||||
|
||||
@@ -36,6 +36,8 @@ struct input_ctx {
|
||||
#define SENTINEL_MAX 8
|
||||
uint16_t sentinel_count;
|
||||
|
||||
int64_t last_video_pts; // Resets after each segment
|
||||
|
||||
// Packet held while decoder is blocked and needs to drain
|
||||
AVPacket *blocked_pkt;
|
||||
|
||||
|
||||
+18
-16
@@ -2543,8 +2543,8 @@ func TestTranscoder_LargeOutputs(t *testing.T) {
|
||||
close(closeCh)
|
||||
assert.Nil(err)
|
||||
assert.Equal(120, res.Decoded.Frames)
|
||||
assert.Equal(116, res.Encoded[0].Frames) // ffmpeg probably drops missing timestamp frames
|
||||
assert.Equal(56, res.Encoded[1].Frames)
|
||||
assert.Equal(120, res.Encoded[0].Frames) // passthrough
|
||||
assert.Equal(60, res.Encoded[1].Frames) // 30fps, 2 second input
|
||||
cmd := `
|
||||
# check input properties to ensure they still have the weird timestamps
|
||||
ffprobe -of csv -hide_banner -show_entries frame=pts_time,pkt_dts_time,media_type,pict_type $1/../data/missing-dts.ts 2>&1 | grep video > input.out
|
||||
@@ -2675,8 +2675,6 @@ func TestTranscoder_LargeOutputs(t *testing.T) {
|
||||
|
||||
|
||||
# check output
|
||||
ls -lha
|
||||
#ffprobe -of csv -hide_banner -show_entries frame=pts_time,pkt_dts_time,media_type,pict_type out-30fps.ts
|
||||
ffprobe -of csv -hide_banner -show_entries frame=pts_time,pkt_dts_time,media_type,pict_type out-30fps.ts 2>&1 | grep video > output.out
|
||||
cat <<- 'EOF2' > expected-output.out
|
||||
frame,video,25994.033333,25994.033333,I,
|
||||
@@ -2697,9 +2695,10 @@ func TestTranscoder_LargeOutputs(t *testing.T) {
|
||||
frame,video,25994.533333,25994.533333,B
|
||||
frame,video,25994.566667,25994.566667,B
|
||||
frame,video,25994.600000,25994.600000,P,
|
||||
frame,video,25994.666667,25994.666667,P,
|
||||
frame,video,25994.633333,25994.633333,P,
|
||||
frame,video,25994.666667,25994.666667,B,
|
||||
frame,video,25994.700000,25994.700000,B,
|
||||
frame,video,25994.733333,25994.733333,B,
|
||||
frame,video,25994.733333,25994.733333,P,
|
||||
frame,video,25994.766667,25994.766667,B,
|
||||
frame,video,25994.800000,25994.800000,P,
|
||||
frame,video,25994.833333,25994.833333,B,
|
||||
@@ -2710,30 +2709,33 @@ func TestTranscoder_LargeOutputs(t *testing.T) {
|
||||
frame,video,25995.000000,25995.000000,P,
|
||||
frame,video,25995.033333,25995.033333,B,
|
||||
frame,video,25995.066667,25995.066667,B,
|
||||
frame,video,25995.100000,25995.100000,P,
|
||||
frame,video,25995.133333,25995.133333,B,
|
||||
frame,video,25995.166667,25995.166667,P,
|
||||
frame,video,25995.200000,25995.200000,B,
|
||||
frame,video,25995.233333,25995.233333,B,
|
||||
frame,video,25995.266667,25995.266667,B,
|
||||
frame,video,25995.300000,25995.300000,B,
|
||||
frame,video,25995.333333,25995.333333,P,
|
||||
frame,video,25995.300000,25995.300000,P,
|
||||
frame,video,25995.333333,25995.333333,B,
|
||||
frame,video,25995.366667,25995.366667,B,
|
||||
frame,video,25995.400000,25995.400000,B,
|
||||
frame,video,25995.433333,25995.433333,B,
|
||||
frame,video,25995.466667,25995.466667,P,
|
||||
frame,video,25995.433333,25995.433333,P,
|
||||
frame,video,25995.466667,25995.466667,B,
|
||||
frame,video,25995.500000,25995.500000,B,
|
||||
frame,video,25995.533333,25995.533333,B,
|
||||
frame,video,25995.566667,25995.566667,B,
|
||||
frame,video,25995.600000,25995.600000,P,
|
||||
frame,video,25995.566667,25995.566667,P,
|
||||
frame,video,25995.600000,25995.600000,B,
|
||||
frame,video,25995.633333,25995.633333,B,
|
||||
frame,video,25995.666667,25995.666667,B,
|
||||
frame,video,25995.700000,25995.700000,P,
|
||||
frame,video,25995.733333,25995.733333,B,
|
||||
frame,video,25995.766667,25995.766667,P,
|
||||
frame,video,25995.766667,25995.766667,B,
|
||||
frame,video,25995.800000,25995.800000,B,
|
||||
frame,video,25995.833333,25995.833333,B,
|
||||
frame,video,25995.833333,25995.833333,P,
|
||||
frame,video,25995.866667,25995.866667,B,
|
||||
frame,video,25995.900000,25995.900000,P,
|
||||
frame,video,25995.900000,25995.900000,B,
|
||||
frame,video,25995.933333,25995.933333,B,
|
||||
frame,video,25995.966667,N/A,B,
|
||||
frame,video,25995.966667,N/A,P,
|
||||
frame,video,25996.000000,N/A,P,
|
||||
EOF2
|
||||
diff -u expected-output.out output.out
|
||||
|
||||
@@ -161,6 +161,7 @@ int transcode_init(struct transcode_thread *h, input_params *inp,
|
||||
int nb_outputs = h->nb_outputs;
|
||||
|
||||
if (!inp) LPMS_ERR(transcode_cleanup, "Missing input params")
|
||||
ictx->last_video_pts = AV_NOPTS_VALUE;
|
||||
|
||||
AVDictionary **demuxer_opts = NULL;
|
||||
if (inp->demuxer.opts) demuxer_opts = &inp->demuxer.opts;
|
||||
|
||||
Reference in New Issue
Block a user