summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authordalecurtis@chromium.org <dalecurtis@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2014-08-02 07:35:55 +0000
committerdalecurtis@chromium.org <dalecurtis@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2014-08-02 07:35:55 +0000
commita093e53ac4c5df37c17ee95c1ba456133bd4f013 (patch)
treed8c73a62d5c7369b845920e51af96047706f63c0
parent6b2a85c2577f7b4488ff1f2c804d57db2ea92537 (diff)
downloadchromium_src-a093e53ac4c5df37c17ee95c1ba456133bd4f013.zip
chromium_src-a093e53ac4c5df37c17ee95c1ba456133bd4f013.tar.gz
chromium_src-a093e53ac4c5df37c17ee95c1ba456133bd4f013.tar.bz2
Add support for partial append window end trimming.
Facilitates gapless playback across mp3 and aac. Relying on splice frames and crossfading doesn't work in cases where the prior segment signal diverges to null-padding values too quickly relative to the newly appended segment. BUG=395899 TEST=new unittests. llama-demo works. Review URL: https://codereview.chromium.org/414603002 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@287174 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r--media/base/audio_discard_helper.cc63
-rw-r--r--media/base/audio_discard_helper.h28
-rw-r--r--media/base/audio_discard_helper_unittest.cc80
-rw-r--r--media/filters/chunk_demuxer_unittest.cc22
-rw-r--r--media/filters/frame_processor.cc92
-rw-r--r--media/filters/frame_processor.h5
-rw-r--r--media/filters/frame_processor_unittest.cc13
7 files changed, 236 insertions, 67 deletions
diff --git a/media/base/audio_discard_helper.cc b/media/base/audio_discard_helper.cc
index 303ee79..06e7a31 100644
--- a/media/base/audio_discard_helper.cc
+++ b/media/base/audio_discard_helper.cc
@@ -29,7 +29,8 @@ AudioDiscardHelper::AudioDiscardHelper(int sample_rate, size_t decoder_delay)
timestamp_helper_(sample_rate_),
discard_frames_(0),
last_input_timestamp_(kNoTimestamp()),
- delayed_discard_(false) {
+ delayed_discard_(false),
+ delayed_end_discard_(0) {
DCHECK_GT(sample_rate_, 0);
}
@@ -99,6 +100,9 @@ bool AudioDiscardHelper::ProcessBuffers(
const size_t frames_to_discard = std::min(discard_frames_, decoded_frames);
discard_frames_ -= frames_to_discard;
+ DVLOG(1) << "Initial discard of " << frames_to_discard << " out of "
+ << decoded_frames << " frames.";
+
// If everything would be discarded, indicate a new buffer is required.
if (frames_to_discard == decoded_frames) {
// For simplicity disallow cases where a buffer with discard padding is
@@ -112,6 +116,24 @@ bool AudioDiscardHelper::ProcessBuffers(
decoded_buffer->TrimStart(frames_to_discard);
}
+ // Process any delayed end discard from the previous buffer.
+ if (delayed_end_discard_ > 0) {
+ DCHECK_GT(decoder_delay_, 0u);
+
+ const size_t discard_index = decoder_delay_ - delayed_end_discard_;
+ DCHECK_LT(discard_index, decoder_delay_);
+
+ const size_t decoded_frames = decoded_buffer->frame_count();
+ DCHECK_LT(delayed_end_discard_, decoded_frames);
+
+ DVLOG(1) << "Delayed end discard of " << delayed_end_discard_ << " out of "
+ << decoded_frames << " frames starting at " << discard_index;
+
+ decoded_buffer->TrimRange(discard_index,
+ discard_index + delayed_end_discard_);
+ delayed_end_discard_ = 0;
+ }
+
// Handle front discard padding.
if (current_discard_padding.first > base::TimeDelta()) {
const size_t decoded_frames = decoded_buffer->frame_count();
@@ -153,6 +175,9 @@ bool AudioDiscardHelper::ProcessBuffers(
DCHECK(!discard_frames_);
discard_frames_ = start_frames_to_discard - frames_to_discard;
+ DVLOG(1) << "Front discard of " << frames_to_discard << " out of "
+ << decoded_frames << " frames starting at " << discard_start;
+
// If everything would be discarded, indicate a new buffer is required.
if (frames_to_discard == decoded_frames) {
// The buffer should not have been marked with end discard if the front
@@ -168,24 +193,42 @@ bool AudioDiscardHelper::ProcessBuffers(
// Handle end discard padding.
if (current_discard_padding.second > base::TimeDelta()) {
- // Limit end discarding to when there is no |decoder_delay_|, otherwise it's
- // non-trivial determining where to start discarding end frames.
- CHECK(!decoder_delay_);
-
const size_t decoded_frames = decoded_buffer->frame_count();
- const size_t end_frames_to_discard =
+ size_t end_frames_to_discard =
TimeDeltaToFrames(current_discard_padding.second);
+ if (decoder_delay_) {
+ // Delayed end discard only works if the decoder delay is less than a
+ // single buffer.
+ DCHECK_LT(decoder_delay_, original_frame_count);
+
+ // If the discard is >= the decoder delay, trim everything we can off the
+ // end of this buffer and the rest from the start of the next.
+ if (end_frames_to_discard >= decoder_delay_) {
+ DCHECK(!discard_frames_);
+ discard_frames_ = decoder_delay_;
+ end_frames_to_discard -= decoder_delay_;
+ } else {
+ DCHECK(!delayed_end_discard_);
+ std::swap(delayed_end_discard_, end_frames_to_discard);
+ }
+ }
+
if (end_frames_to_discard > decoded_frames) {
DLOG(ERROR) << "Encountered invalid discard padding value.";
return false;
}
- // If everything would be discarded, indicate a new buffer is required.
- if (end_frames_to_discard == decoded_frames)
- return false;
+ if (end_frames_to_discard > 0) {
+ DVLOG(1) << "End discard of " << end_frames_to_discard << " out of "
+ << decoded_frames;
- decoded_buffer->TrimEnd(end_frames_to_discard);
+ // If everything would be discarded, indicate a new buffer is required.
+ if (end_frames_to_discard == decoded_frames)
+ return false;
+
+ decoded_buffer->TrimEnd(end_frames_to_discard);
+ }
} else {
DCHECK(current_discard_padding.second == base::TimeDelta());
}
diff --git a/media/base/audio_discard_helper.h b/media/base/audio_discard_helper.h
index deeb45f..ded404f 100644
--- a/media/base/audio_discard_helper.h
+++ b/media/base/audio_discard_helper.h
@@ -31,8 +31,6 @@ class MEDIA_EXPORT AudioDiscardHelper {
// corresponding to the first encoded buffer is output. These frames are not
// represented in the encoded data stream and instead are an artifact of how
// most MP3 decoders work. See http://lame.sourceforge.net/tech-FAQ.txt
- //
- // NOTE: End discard is only supported when there is no |decoder_delay|.
AudioDiscardHelper(int sample_rate, size_t decoder_delay);
~AudioDiscardHelper();
@@ -63,16 +61,42 @@ class MEDIA_EXPORT AudioDiscardHelper {
}
private:
+ // The sample rate of the decoded audio samples. Used by TimeDeltaToFrames()
+ // and the timestamp helper.
const int sample_rate_;
+
+ // Some codecs output extra samples during the first decode. In order to trim
+ // DiscardPadding correctly the helper must know the offset into the decoded
+ // buffers at which real samples start.
const size_t decoder_delay_;
+
+ // Used to regenerate sample accurate timestamps for decoded buffers. The
+ // timestamp of the first encoded buffer seen by ProcessBuffers() is used as
+ // the base timestamp.
AudioTimestampHelper timestamp_helper_;
+ // The number of frames to discard from the front of the next buffer. Can be
+ // set by Reset() and added to by a front DiscardPadding larger than its
+ // associated buffer.
size_t discard_frames_;
+
+ // The last encoded buffer timestamp seen by ProcessBuffers() or kNoTimestamp
+ // if no buffers have been seen thus far. Used to issue warnings for buffer
+ // sequences with non-monotonic timestamps.
base::TimeDelta last_input_timestamp_;
+ // Certain codecs require two encoded buffers before they'll output the first
+ // decoded buffer. In this case DiscardPadding must be carried over from the
+ // previous encoded buffer. Enabled automatically if an encoded buffer is
+ // given to ProcessBuffers() with a NULL decoded buffer.
bool delayed_discard_;
DecoderBuffer::DiscardPadding delayed_discard_padding_;
+ // When |decoder_delay_| > 0, the number of frames which should be discarded
+ // from the next buffer. The index at which to start discarding is calculated
+ // by subtracting |delayed_end_discard_| from |decoder_delay_|.
+ size_t delayed_end_discard_;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(AudioDiscardHelper);
};
diff --git a/media/base/audio_discard_helper_unittest.cc b/media/base/audio_discard_helper_unittest.cc
index 1ea0cc6..8919530 100644
--- a/media/base/audio_discard_helper_unittest.cc
+++ b/media/base/audio_discard_helper_unittest.cc
@@ -325,18 +325,40 @@ TEST(AudioDiscardHelperTest, InitialDiscardAndDiscardPaddingAndDecoderDelay) {
encoded_buffer->set_discard_padding(
std::make_pair(kDuration / 2, base::TimeDelta()));
- // All of the first buffer should be discarded.
+ // All of the first buffer should be discarded, half from the inital delay and
+ // another half from the front discard padding.
+ //
+ // Encoded Discard Delay
+ // |--------| |---------| |----|
+ // |AAAAAAAA| --> |....|AAAA| --> |AAAA| -------> NULL
+ // |--------| |---------| |----|
+ // Decoded Discard Front Padding
+ //
ASSERT_FALSE(discard_helper.ProcessBuffers(encoded_buffer, decoded_buffer));
ASSERT_TRUE(discard_helper.initialized());
- // Processing another buffer (with the same discard padding) should discard
- // the back half of the buffer since kDecoderDelay is half a buffer.
- encoded_buffer->set_timestamp(kTimestamp + kDuration);
+ // Processing another buffer that has front discard set to half the buffer's
+ // duration should discard the back half of the buffer since kDecoderDelay is
+ // half a buffer. The end padding should not be discarded until another
+ // buffer is processed. kDuration / 4 is chosen for the end discard since it
+ // will force the end discard to start after position zero within the next
+ // decoded buffer.
+ //
+ // Encoded Discard Front Padding (from B)
+ // |--------| |---------| |----|
+ // |BBBBBBBB| --> |AAAA|BBBB| ----------> |AAAA|
+ // |--------| |---------| |----|
+ // Decoded
+ // (includes carryover from A)
+ //
+ encoded_buffer->set_timestamp(encoded_buffer->timestamp() + kDuration);
+ encoded_buffer->set_discard_padding(
+ std::make_pair(kDuration / 2, kDuration / 4));
decoded_buffer = CreateDecodedBuffer(kTestFrames);
ASSERT_FLOAT_EQ(0.0f, ExtractDecodedData(decoded_buffer, 0));
ASSERT_NEAR(kDecoderDelay * kDataStep,
ExtractDecodedData(decoded_buffer, kDecoderDelay),
- kDataStep * 1000);
+ kDataStep / 1000);
ASSERT_TRUE(discard_helper.ProcessBuffers(encoded_buffer, decoded_buffer));
EXPECT_EQ(kTimestamp, decoded_buffer->timestamp());
EXPECT_EQ(kDuration / 2, decoded_buffer->duration());
@@ -344,6 +366,54 @@ TEST(AudioDiscardHelperTest, InitialDiscardAndDiscardPaddingAndDecoderDelay) {
// Verify it was actually the latter half of the buffer that was removed.
ASSERT_FLOAT_EQ(0.0f, ExtractDecodedData(decoded_buffer, 0));
+
+ // Verify the end discard padding is carried over to the next buffer. Use
+ // kDuration / 2 for the end discard padding so that the next buffer has its
+ // start entirely discarded.
+ //
+ // Encoded Discard End Padding (from B)
+ // |--------| |---------| |-------|
+ // |CCCCCCCC| --> |BBBB|CCCC| ----------> |BB|CCCC|
+ // |--------| |---------| |-------|
+ // Decoded
+ // (includes carryover from B)
+ //
+ encoded_buffer->set_timestamp(encoded_buffer->timestamp() + kDuration);
+ encoded_buffer->set_discard_padding(
+ std::make_pair(base::TimeDelta(), kDuration / 2));
+ decoded_buffer = CreateDecodedBuffer(kTestFrames);
+ ASSERT_TRUE(discard_helper.ProcessBuffers(encoded_buffer, decoded_buffer));
+ EXPECT_EQ(kTimestamp + kDuration / 2, decoded_buffer->timestamp());
+ EXPECT_EQ(3 * kDuration / 4, decoded_buffer->duration());
+ EXPECT_EQ(3 * kTestFrames / 4, decoded_buffer->frame_count());
+
+ // Verify it was actually the second quarter of the buffer that was removed.
+ const int kDiscardFrames = kTestFrames / 4;
+ ASSERT_FLOAT_EQ(0.0f, ExtractDecodedData(decoded_buffer, 0));
+ ASSERT_FLOAT_EQ(
+ kDiscardFrames * 2 * kDataStep,
+ ExtractDecodedData(decoded_buffer, kDecoderDelay - kDiscardFrames));
+
+ // One last test to ensure carryover discard from the start works.
+ //
+ // Encoded Discard End Padding (from C)
+ // |--------| |---------| |----|
+ // |DDDDDDDD| --> |CCCC|DDDD| ----------> |DDDD|
+ // |--------| |---------| |----|
+ // Decoded
+ // (includes carryover from C)
+ //
+ encoded_buffer->set_timestamp(encoded_buffer->timestamp() + kDuration);
+ encoded_buffer->set_discard_padding(DecoderBuffer::DiscardPadding());
+ decoded_buffer = CreateDecodedBuffer(kTestFrames);
+ ASSERT_FLOAT_EQ(0.0f, ExtractDecodedData(decoded_buffer, 0));
+ ASSERT_TRUE(discard_helper.ProcessBuffers(encoded_buffer, decoded_buffer));
+ EXPECT_EQ(kTimestamp + kDuration / 2 + 3 * kDuration / 4,
+ decoded_buffer->timestamp());
+ EXPECT_EQ(kDuration / 2, decoded_buffer->duration());
+ EXPECT_EQ(kTestFrames / 2, decoded_buffer->frame_count());
+ ASSERT_FLOAT_EQ(kTestFrames / 2 * kDataStep,
+ ExtractDecodedData(decoded_buffer, 0));
}
TEST(AudioDiscardHelperTest, DelayedDiscardInitialDiscardAndDiscardPadding) {
diff --git a/media/filters/chunk_demuxer_unittest.cc b/media/filters/chunk_demuxer_unittest.cc
index 0bb3ecc..4ac5616 100644
--- a/media/filters/chunk_demuxer_unittest.cc
+++ b/media/filters/chunk_demuxer_unittest.cc
@@ -458,6 +458,9 @@ class ChunkDemuxerTest : public ::testing::Test {
<< " All text blocks must be keyframes";
}
+ if (track_number == kAudioTrackNum)
+ ASSERT_TRUE(block_info.flags & kWebMFlagKeyframe);
+
blocks->push_back(block_info);
}
}
@@ -1370,19 +1373,19 @@ TEST_F(ChunkDemuxerTest, InitSegmentSetsNeedRandomAccessPointFlag) {
ASSERT_TRUE(audio_stream && video_stream && text_stream);
AppendMuxedCluster(
- MuxedStreamInfo(kAudioTrackNum, "0 23K"),
+ MuxedStreamInfo(kAudioTrackNum, "23K"),
MuxedStreamInfo(kVideoTrackNum, "0 30K"),
MuxedStreamInfo(kTextTrackNum, "25K 40K"));
CheckExpectedRanges(kSourceId, "{ [23,46) }");
AppendInitSegment(HAS_TEXT | HAS_AUDIO | HAS_VIDEO);
AppendMuxedCluster(
- MuxedStreamInfo(kAudioTrackNum, "46 69K"),
+ MuxedStreamInfo(kAudioTrackNum, "46K 69K"),
MuxedStreamInfo(kVideoTrackNum, "60 90K"),
MuxedStreamInfo(kTextTrackNum, "80K 90K"));
CheckExpectedRanges(kSourceId, "{ [23,92) }");
- CheckExpectedBuffers(audio_stream, "23 69");
+ CheckExpectedBuffers(audio_stream, "23 46 69");
CheckExpectedBuffers(video_stream, "30 90");
CheckExpectedBuffers(text_stream, "25 40 80 90");
}
@@ -2501,7 +2504,7 @@ TEST_F(ChunkDemuxerTest, GetBufferedRanges_AudioVideoText) {
// Append audio & video data
AppendMuxedCluster(
- MuxedStreamInfo(kAudioTrackNum, "0K 23"),
+ MuxedStreamInfo(kAudioTrackNum, "0K 23K"),
MuxedStreamInfo(kVideoTrackNum, "0K 33"));
// Verify that a text track with no cues does not result in an empty buffered
@@ -2510,7 +2513,7 @@ TEST_F(ChunkDemuxerTest, GetBufferedRanges_AudioVideoText) {
// Add some text cues.
AppendMuxedCluster(
- MuxedStreamInfo(kAudioTrackNum, "100K 123"),
+ MuxedStreamInfo(kAudioTrackNum, "100K 123K"),
MuxedStreamInfo(kVideoTrackNum, "100K 133"),
MuxedStreamInfo(kTextTrackNum, "100K 200K"));
@@ -3356,7 +3359,7 @@ TEST_F(ChunkDemuxerTest, AppendWindow_Audio) {
//
// The first 50ms of the range should be truncated since it overlaps
// the start of the append window.
- CheckExpectedRanges(kSourceId, "{ [50,270) }");
+ CheckExpectedRanges(kSourceId, "{ [50,280) }");
// The "50P" buffer is the "0" buffer marked for complete discard. The next
// "50" buffer is the "30" buffer marked with 20ms of start discard.
@@ -3369,7 +3372,7 @@ TEST_F(ChunkDemuxerTest, AppendWindow_Audio) {
AppendSingleStreamCluster(
kSourceId, kAudioTrackNum,
"360K 390K 420K 450K 480K 510K 540K 570K 600K 630K");
- CheckExpectedRanges(kSourceId, "{ [50,270) [360,630) }");
+ CheckExpectedRanges(kSourceId, "{ [50,280) [360,650) }");
}
TEST_F(ChunkDemuxerTest, AppendWindow_AudioOverlapStartAndEnd) {
@@ -3382,9 +3385,8 @@ TEST_F(ChunkDemuxerTest, AppendWindow_AudioOverlapStartAndEnd) {
// Append a cluster that starts before and ends after the append window.
AppendSingleStreamCluster(kSourceId, kAudioTrackNum, "0K");
- // Verify that everything is dropped in this case. No partial append should
- // be generated.
- CheckExpectedRanges(kSourceId, "{ }");
+ // Verify the append is clipped to the append window.
+ CheckExpectedRanges(kSourceId, "{ [10,20) }");
}
TEST_F(ChunkDemuxerTest, AppendWindow_WebMFile_AudioOnly) {
diff --git a/media/filters/frame_processor.cc b/media/filters/frame_processor.cc
index a10ed50..6b6c8c3 100644
--- a/media/filters/frame_processor.cc
+++ b/media/filters/frame_processor.cc
@@ -332,21 +332,11 @@ bool FrameProcessor::HandlePartialAppendWindowTrimming(
const scoped_refptr<StreamParserBuffer>& buffer) {
DCHECK(buffer->duration() > base::TimeDelta());
DCHECK_EQ(DemuxerStream::AUDIO, buffer->type());
+ DCHECK(buffer->IsKeyframe());
const base::TimeDelta frame_end_timestamp =
buffer->timestamp() + buffer->duration();
- // Ignore any buffers which start after |append_window_start| or end after
- // |append_window_end|. For simplicity, even those that start before
- // |append_window_start|.
- if (buffer->timestamp() > append_window_start ||
- frame_end_timestamp > append_window_end) {
- // TODO(dalecurtis): Partial append window trimming could also be done
- // around |append_window_end|, but is not necessary since splice frames
- // cover overlaps there.
- return false;
- }
-
// If the buffer is entirely before |append_window_start|, save it as preroll
// for the first buffer which overlaps |append_window_start|.
if (buffer->timestamp() < append_window_start &&
@@ -355,49 +345,74 @@ bool FrameProcessor::HandlePartialAppendWindowTrimming(
return false;
}
- // There's nothing to be done if we have no preroll and the buffer starts on
- // the append window start.
- if (buffer->timestamp() == append_window_start && !audio_preroll_buffer_)
+ // If the buffer is entirely after |append_window_end| there's nothing to do.
+ if (buffer->timestamp() >= append_window_end)
return false;
- // See if a partial discard can be done around |append_window_start|.
- DCHECK(buffer->timestamp() <= append_window_start);
- DCHECK(buffer->IsKeyframe());
- DVLOG(1) << "Truncating buffer which overlaps append window start."
- << " presentation_timestamp " << buffer->timestamp().InSecondsF()
- << " append_window_start " << append_window_start.InSecondsF();
+ DCHECK(buffer->timestamp() >= append_window_start ||
+ frame_end_timestamp > append_window_start);
+
+ bool processed_buffer = false;
- // If this isn't the first buffer discarded by the append window, try to use
- // the last buffer discarded for preroll. This ensures that the partially
- // trimmed buffer can be correctly decoded.
+ // If we have a preroll buffer see if we can attach it to the first buffer
+ // overlapping or after |append_window_start|.
if (audio_preroll_buffer_) {
- // We only want to use the preroll buffer if it directly precedes (less than
- // one sample apart) the current buffer.
+ // We only want to use the preroll buffer if it directly precedes (less
+ // than one sample apart) the current buffer.
const int64 delta = std::abs((audio_preroll_buffer_->timestamp() +
audio_preroll_buffer_->duration() -
buffer->timestamp()).InMicroseconds());
if (delta < sample_duration_.InMicroseconds()) {
+ DVLOG(1) << "Attaching audio preroll buffer ["
+ << audio_preroll_buffer_->timestamp().InSecondsF() << ", "
+ << (audio_preroll_buffer_->timestamp() +
+ audio_preroll_buffer_->duration()).InSecondsF() << ") to "
+ << buffer->timestamp().InSecondsF();
buffer->SetPrerollBuffer(audio_preroll_buffer_);
+ processed_buffer = true;
} else {
// TODO(dalecurtis): Add a MEDIA_LOG() for when this is dropped unused.
}
audio_preroll_buffer_ = NULL;
}
- // Decrease the duration appropriately. We only need to shorten the buffer if
- // it overlaps |append_window_start|.
+ // See if a partial discard can be done around |append_window_start|.
if (buffer->timestamp() < append_window_start) {
+ DVLOG(1) << "Truncating buffer which overlaps append window start."
+ << " presentation_timestamp " << buffer->timestamp().InSecondsF()
+ << " frame_end_timestamp " << frame_end_timestamp.InSecondsF()
+ << " append_window_start " << append_window_start.InSecondsF();
+
+ // Mark the overlapping portion of the buffer for discard.
buffer->set_discard_padding(std::make_pair(
append_window_start - buffer->timestamp(), base::TimeDelta()));
+
+ // Adjust the timestamp of this buffer forward to |append_window_start| and
+ // decrease the duration to compensate.
+ buffer->set_timestamp(append_window_start);
+ buffer->SetDecodeTimestamp(append_window_start);
buffer->set_duration(frame_end_timestamp - append_window_start);
+ processed_buffer = true;
}
- // Adjust the timestamp of this buffer forward to |append_window_start|. The
- // timestamps are always set, even if |buffer|'s timestamp is already set to
- // |append_window_start|, to ensure the preroll buffer is setup correctly.
- buffer->set_timestamp(append_window_start);
- buffer->SetDecodeTimestamp(append_window_start);
- return true;
+ // See if a partial discard can be done around |append_window_end|.
+ if (frame_end_timestamp > append_window_end) {
+ DVLOG(1) << "Truncating buffer which overlaps append window end."
+ << " presentation_timestamp " << buffer->timestamp().InSecondsF()
+ << " frame_end_timestamp " << frame_end_timestamp.InSecondsF()
+ << " append_window_end " << append_window_end.InSecondsF();
+
+ // Mark the overlapping portion of the buffer for discard.
+ buffer->set_discard_padding(
+ std::make_pair(buffer->discard_padding().first,
+ frame_end_timestamp - append_window_end));
+
+ // Decrease the duration of the buffer to remove the discarded portion.
+ buffer->set_duration(append_window_end - buffer->timestamp());
+ processed_buffer = true;
+ }
+
+ return processed_buffer;
}
bool FrameProcessor::ProcessFrame(
@@ -564,7 +579,7 @@ bool FrameProcessor::ProcessFrame(
// 9. Let frame end timestamp equal the sum of presentation timestamp and
// frame duration.
- const base::TimeDelta frame_end_timestamp =
+ base::TimeDelta frame_end_timestamp =
presentation_timestamp + frame_duration;
// 10. If presentation timestamp is less than appendWindowStart, then set
@@ -582,8 +597,9 @@ bool FrameProcessor::ProcessFrame(
HandlePartialAppendWindowTrimming(append_window_start,
append_window_end,
frame)) {
- // If |frame| was shortened a discontinuity may exist, so treat the next
- // frames appended as if they were the beginning of a new media segment.
+ // If |frame| was front-trimmed a discontinuity may exist, so treat the
+ // next frames appended as if they were the beginning of a new media
+ // segment.
if (frame->timestamp() != presentation_timestamp && !sequence_mode_)
*new_media_segment = true;
@@ -593,9 +609,7 @@ bool FrameProcessor::ProcessFrame(
// frame duration and reduces spurious discontinuity detection.
decode_timestamp = frame->GetDecodeTimestamp();
presentation_timestamp = frame->timestamp();
-
- // The end timestamp of the frame should be unchanged.
- DCHECK(frame_end_timestamp == presentation_timestamp + frame->duration());
+ frame_end_timestamp = frame->timestamp() + frame->duration();
}
if (presentation_timestamp < append_window_start ||
diff --git a/media/filters/frame_processor.h b/media/filters/frame_processor.h
index 0067b78..0cd24f1 100644
--- a/media/filters/frame_processor.h
+++ b/media/filters/frame_processor.h
@@ -114,7 +114,10 @@ class MEDIA_EXPORT FrameProcessor {
// |append_window_start| will be marked for post-decode discard. Further, if
// |audio_preroll_buffer_| exists and abuts |buffer|, it will be set as
// preroll on |buffer| and |audio_preroll_buffer_| will be cleared. If the
- // preroll buffer does not abut |buffer|, it will be discarded, but not used.
+ // preroll buffer does not abut |buffer|, it will be discarded unused.
+ //
+ // Likewise, if |buffer| overlaps |append_window_end|, the portion of |buffer|
+ // after |append_window_end| will be marked for post-decode discard.
//
// If |buffer| lies entirely before |append_window_start|, and thus would
// normally be discarded, |audio_preroll_buffer_| will be set to |buffer| and
diff --git a/media/filters/frame_processor_unittest.cc b/media/filters/frame_processor_unittest.cc
index f4cde5a..d6cabf0 100644
--- a/media/filters/frame_processor_unittest.cc
+++ b/media/filters/frame_processor_unittest.cc
@@ -607,6 +607,19 @@ TEST_P(FrameProcessorTest, AppendWindowFilterWithInexactPreroll) {
CheckReadsThenReadStalls(audio_.get(), "0P 0:9.75 10:20");
}
+TEST_P(FrameProcessorTest, AppendWindowFilterWithInexactPreroll_2) {
+ InSequence s;
+ AddTestTracks(HAS_AUDIO);
+ new_media_segment_ = true;
+ if (GetParam())
+ frame_processor_->SetSequenceMode(true);
+ SetTimestampOffset(-frame_duration_);
+ EXPECT_CALL(callbacks_, PossibleDurationIncrease(frame_duration_ * 2));
+ ProcessFrames("0K 10.25K 20K", "");
+ CheckExpectedRangesByTimestamp(audio_.get(), "{ [0,20) }");
+ CheckReadsThenReadStalls(audio_.get(), "0P 0:10.25 10:20");
+}
+
TEST_P(FrameProcessorTest, AllowNegativeFramePTSAndDTSBeforeOffsetAdjustment) {
InSequence s;
AddTestTracks(HAS_AUDIO);