diff options
author | scherkus@chromium.org <scherkus@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2014-05-03 00:05:58 +0000 |
---|---|---|
committer | scherkus@chromium.org <scherkus@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2014-05-03 00:05:58 +0000 |
commit | 6d4f03e98b018fb5c1b69ea42bf4e8396dc8284c (patch) | |
tree | 81be5241e99e9563eccd6dc855d4dd60b810c4af /media/filters | |
parent | ad73f546fffb603f3c08a23b3aa09659cc8951a4 (diff) | |
download | chromium_src-6d4f03e98b018fb5c1b69ea42bf4e8396dc8284c.zip chromium_src-6d4f03e98b018fb5c1b69ea42bf4e8396dc8284c.tar.gz chromium_src-6d4f03e98b018fb5c1b69ea42bf4e8396dc8284c.tar.bz2 |
Introduce AudioClock to improve playback time calculations.
The previous method for calculating playback time assumed all data
currently buffered contained audio data scaled at the same rate. In
reality, when the playback rate changes we enter a brief period where
audio data scaled at different rates is buffered. The end result was
that the media clock would jump backwards/forwards, introducing playback
jank.
BUG=367343
Review URL: https://codereview.chromium.org/256163005
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@267982 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'media/filters')
-rw-r--r-- | media/filters/audio_clock.cc | 135 | ||||
-rw-r--r-- | media/filters/audio_clock.h | 76 | ||||
-rw-r--r-- | media/filters/audio_clock_unittest.cc | 177 | ||||
-rw-r--r-- | media/filters/audio_renderer_impl.cc | 88 | ||||
-rw-r--r-- | media/filters/audio_renderer_impl.h | 10 | ||||
-rw-r--r-- | media/filters/audio_renderer_impl_unittest.cc | 26 |
6 files changed, 449 insertions, 63 deletions
diff --git a/media/filters/audio_clock.cc b/media/filters/audio_clock.cc new file mode 100644 index 0000000..0454e85 --- /dev/null +++ b/media/filters/audio_clock.cc @@ -0,0 +1,135 @@ +// Copyright 2014 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "media/filters/audio_clock.h" + +#include "base/logging.h" +#include "media/base/buffers.h" + +namespace media { + +AudioClock::AudioClock(int sample_rate) + : sample_rate_(sample_rate), last_endpoint_timestamp_(kNoTimestamp()) { +} + +AudioClock::~AudioClock() { +} + +void AudioClock::WroteAudio(int frames, + int delay_frames, + float playback_rate, + base::TimeDelta timestamp) { + CHECK_GT(playback_rate, 0); + CHECK(timestamp != kNoTimestamp()); + DCHECK_GE(frames, 0); + DCHECK_GE(delay_frames, 0); + + if (last_endpoint_timestamp_ == kNoTimestamp()) + PushBufferedAudio(delay_frames, 0, kNoTimestamp()); + + TrimBufferedAudioToMatchDelay(delay_frames); + PushBufferedAudio(frames, playback_rate, timestamp); + + last_endpoint_timestamp_ = timestamp; +} + +void AudioClock::WroteSilence(int frames, int delay_frames) { + DCHECK_GE(frames, 0); + DCHECK_GE(delay_frames, 0); + + if (last_endpoint_timestamp_ == kNoTimestamp()) + PushBufferedAudio(delay_frames, 0, kNoTimestamp()); + + TrimBufferedAudioToMatchDelay(delay_frames); + PushBufferedAudio(frames, 0, kNoTimestamp()); +} + +base::TimeDelta AudioClock::CurrentMediaTimestamp() const { + int silence_frames = 0; + for (size_t i = 0; i < buffered_audio_.size(); ++i) { + // Account for silence ahead of the buffer closest to being played. + if (buffered_audio_[i].playback_rate == 0) { + silence_frames += buffered_audio_[i].frames; + continue; + } + + // Multiply by playback rate as frames represent time-scaled audio. + return buffered_audio_[i].endpoint_timestamp - + base::TimeDelta::FromMicroseconds( + ((buffered_audio_[i].frames * buffered_audio_[i].playback_rate) + + silence_frames) / + sample_rate_ * base::Time::kMicrosecondsPerSecond); + } + + // Either: + // 1) AudioClock is uninitialziated and we'll return kNoTimestamp() + // 2) All previously buffered audio has been replaced by silence, + // meaning media time is now at the last endpoint + return last_endpoint_timestamp_; +} + +void AudioClock::TrimBufferedAudioToMatchDelay(int delay_frames) { + if (buffered_audio_.empty()) + return; + + size_t i = buffered_audio_.size() - 1; + while (true) { + if (buffered_audio_[i].frames <= delay_frames) { + // Reached the end before accounting for all of |delay_frames|. This + // means we haven't written enough audio data yet to account for hardware + // delay. In this case, do nothing. + if (i == 0) + return; + + // Keep accounting for |delay_frames|. + delay_frames -= buffered_audio_[i].frames; + --i; + continue; + } + + // All of |delay_frames| has been accounted for: adjust amount of frames + // left in current buffer. All preceeding elements with index < |i| should + // be considered played out and hence discarded. + buffered_audio_[i].frames = delay_frames; + break; + } + + // At this point |i| points at what will be the new head of |buffered_audio_| + // however if it contains no audio it should be removed as well. + if (buffered_audio_[i].frames == 0) + ++i; + + buffered_audio_.erase(buffered_audio_.begin(), buffered_audio_.begin() + i); +} + +void AudioClock::PushBufferedAudio(int frames, + float playback_rate, + base::TimeDelta endpoint_timestamp) { + if (playback_rate == 0) + DCHECK(endpoint_timestamp == kNoTimestamp()); + + if (frames == 0) + return; + + // Avoid creating extra elements where possible. + if (!buffered_audio_.empty() && + buffered_audio_.back().playback_rate == playback_rate) { + buffered_audio_.back().frames += frames; + buffered_audio_.back().endpoint_timestamp = endpoint_timestamp; + return; + } + + buffered_audio_.push_back( + BufferedAudio(frames, playback_rate, endpoint_timestamp)); +} + +AudioClock::BufferedAudio::BufferedAudio(int frames, + float playback_rate, + base::TimeDelta endpoint_timestamp) + : frames(frames), + playback_rate(playback_rate), + endpoint_timestamp(endpoint_timestamp) { +} + +} // namespace media diff --git a/media/filters/audio_clock.h b/media/filters/audio_clock.h new file mode 100644 index 0000000..a0d8212 --- /dev/null +++ b/media/filters/audio_clock.h @@ -0,0 +1,76 @@ +// Copyright 2014 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef MEDIA_FILTERS_AUDIO_CLOCK_H_ +#define MEDIA_FILTERS_AUDIO_CLOCK_H_ + +#include <deque> + +#include "base/time/time.h" +#include "media/base/media_export.h" + +namespace media { + +// Models a queue of buffered audio in a playback pipeline for use with +// estimating the amount of delay in wall clock time. Takes changes in playback +// rate into account to handle scenarios where multiple rates may be present in +// a playback pipeline with large delay. +class MEDIA_EXPORT AudioClock { + public: + explicit AudioClock(int sample_rate); + ~AudioClock(); + + // |frames| amount of audio data scaled to |playback_rate| was written. + // |delay_frames| is the current amount of hardware delay. + // |timestamp| is the endpoint media timestamp of the audio data written. + void WroteAudio(int frames, + int delay_frames, + float playback_rate, + base::TimeDelta timestamp); + + // |frames| amount of silence was written. + // |delay_frames| is the current amount of hardware delay. + void WroteSilence(int frames, int delay_frames); + + // Calculates the current media timestamp taking silence and changes in + // playback rate into account. + base::TimeDelta CurrentMediaTimestamp() const; + + // Returns the last endpoint timestamp provided to WroteAudio(). + base::TimeDelta last_endpoint_timestamp() const { + return last_endpoint_timestamp_; + } + + private: + void TrimBufferedAudioToMatchDelay(int delay_frames); + void PushBufferedAudio(int frames, + float playback_rate, + base::TimeDelta endpoint_timestamp); + + const int sample_rate_; + + // Initially set to kNoTimestamp(), otherwise is the last endpoint timestamp + // delivered to WroteAudio(). A copy is kept outside of |buffered_audio_| to + // handle the case where all of |buffered_audio_| has been replaced with + // silence. + base::TimeDelta last_endpoint_timestamp_; + + struct BufferedAudio { + BufferedAudio(int frames, + float playback_rate, + base::TimeDelta endpoint_timestamp); + + int frames; + float playback_rate; + base::TimeDelta endpoint_timestamp; + }; + + std::deque<BufferedAudio> buffered_audio_; + + DISALLOW_COPY_AND_ASSIGN(AudioClock); +}; + +} // namespace media + +#endif // MEDIA_FILTERS_AUDIO_CLOCK_H_ diff --git a/media/filters/audio_clock_unittest.cc b/media/filters/audio_clock_unittest.cc new file mode 100644 index 0000000..a924a24 --- /dev/null +++ b/media/filters/audio_clock_unittest.cc @@ -0,0 +1,177 @@ +// Copyright 2014 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "media/base/audio_timestamp_helper.h" +#include "media/base/buffers.h" +#include "media/filters/audio_clock.h" +#include "testing/gtest/include/gtest/gtest.h" + +namespace media { + +class AudioClockTest : public testing::Test { + public: + AudioClockTest() + : sample_rate_(10), + timestamp_helper_(sample_rate_), + clock_(sample_rate_) { + timestamp_helper_.SetBaseTimestamp(base::TimeDelta()); + } + + virtual ~AudioClockTest() {} + + void WroteAudio(int frames, int delay_frames, float playback_rate) { + timestamp_helper_.AddFrames(static_cast<int>(frames * playback_rate)); + clock_.WroteAudio( + frames, delay_frames, playback_rate, timestamp_helper_.GetTimestamp()); + } + + void WroteSilence(int frames, int delay_frames) { + clock_.WroteSilence(frames, delay_frames); + } + + int CurrentMediaTimestampInMilliseconds() { + return clock_.CurrentMediaTimestamp().InMilliseconds(); + } + + int LastEndpointTimestampInMilliseconds() { + return clock_.last_endpoint_timestamp().InMilliseconds(); + } + + const int sample_rate_; + AudioTimestampHelper timestamp_helper_; + AudioClock clock_; + + private: + DISALLOW_COPY_AND_ASSIGN(AudioClockTest); +}; + +TEST_F(AudioClockTest, TimestampsStartAtNoTimestamp) { + EXPECT_EQ(kNoTimestamp(), clock_.CurrentMediaTimestamp()); + EXPECT_EQ(kNoTimestamp(), clock_.last_endpoint_timestamp()); +} + +TEST_F(AudioClockTest, Playback) { + // The first time we write data we should expect a negative time matching the + // current delay. + WroteAudio(10, 20, 1.0); + EXPECT_EQ(-2000, CurrentMediaTimestampInMilliseconds()); + EXPECT_EQ(1000, LastEndpointTimestampInMilliseconds()); + + // The media time should keep advancing as we write data. + WroteAudio(10, 20, 1.0); + EXPECT_EQ(-1000, CurrentMediaTimestampInMilliseconds()); + EXPECT_EQ(2000, LastEndpointTimestampInMilliseconds()); + + WroteAudio(10, 20, 1.0); + EXPECT_EQ(0, CurrentMediaTimestampInMilliseconds()); + EXPECT_EQ(3000, LastEndpointTimestampInMilliseconds()); + + WroteAudio(10, 20, 1.0); + EXPECT_EQ(1000, CurrentMediaTimestampInMilliseconds()); + EXPECT_EQ(4000, LastEndpointTimestampInMilliseconds()); + + // Introduce a rate change to slow down time. Current time will keep advancing + // by one second until it hits the slowed down audio. + WroteAudio(10, 20, 0.5); + EXPECT_EQ(2000, CurrentMediaTimestampInMilliseconds()); + EXPECT_EQ(4500, LastEndpointTimestampInMilliseconds()); + + WroteAudio(10, 20, 0.5); + EXPECT_EQ(3000, CurrentMediaTimestampInMilliseconds()); + EXPECT_EQ(5000, LastEndpointTimestampInMilliseconds()); + + WroteAudio(10, 20, 0.5); + EXPECT_EQ(4000, CurrentMediaTimestampInMilliseconds()); + EXPECT_EQ(5500, LastEndpointTimestampInMilliseconds()); + + WroteAudio(10, 20, 0.5); + EXPECT_EQ(4500, CurrentMediaTimestampInMilliseconds()); + EXPECT_EQ(6000, LastEndpointTimestampInMilliseconds()); + + // Introduce a rate change to speed up time. Current time will keep advancing + // by half a second until it hits the the sped up audio. + WroteAudio(10, 20, 2); + EXPECT_EQ(5000, CurrentMediaTimestampInMilliseconds()); + EXPECT_EQ(8000, LastEndpointTimestampInMilliseconds()); + + WroteAudio(10, 20, 2); + EXPECT_EQ(5500, CurrentMediaTimestampInMilliseconds()); + EXPECT_EQ(10000, LastEndpointTimestampInMilliseconds()); + + WroteAudio(10, 20, 2); + EXPECT_EQ(6000, CurrentMediaTimestampInMilliseconds()); + EXPECT_EQ(12000, LastEndpointTimestampInMilliseconds()); + + WroteAudio(10, 20, 2); + EXPECT_EQ(8000, CurrentMediaTimestampInMilliseconds()); + EXPECT_EQ(14000, LastEndpointTimestampInMilliseconds()); + + // Write silence to simulate reaching end of stream. + WroteSilence(10, 20); + EXPECT_EQ(10000, CurrentMediaTimestampInMilliseconds()); + EXPECT_EQ(14000, LastEndpointTimestampInMilliseconds()); + + WroteSilence(10, 20); + EXPECT_EQ(12000, CurrentMediaTimestampInMilliseconds()); + EXPECT_EQ(14000, LastEndpointTimestampInMilliseconds()); + + WroteSilence(10, 20); + EXPECT_EQ(14000, CurrentMediaTimestampInMilliseconds()); + EXPECT_EQ(14000, LastEndpointTimestampInMilliseconds()); + + // At this point media time should stop increasing. + WroteSilence(10, 20); + EXPECT_EQ(14000, CurrentMediaTimestampInMilliseconds()); + EXPECT_EQ(14000, LastEndpointTimestampInMilliseconds()); +} + +TEST_F(AudioClockTest, AlternatingAudioAndSilence) { + // Buffer #1: [0, 1000) + WroteAudio(10, 20, 1.0); + EXPECT_EQ(-2000, CurrentMediaTimestampInMilliseconds()); + + // Buffer #2: 1000ms of silence + WroteSilence(10, 20); + EXPECT_EQ(-1000, CurrentMediaTimestampInMilliseconds()); + + // Buffer #3: [1000, 2000), buffer #1 is at front + WroteAudio(10, 20, 1.0); + EXPECT_EQ(0, CurrentMediaTimestampInMilliseconds()); + + // Buffer #4: 1000ms of silence, time shouldn't advance + WroteSilence(10, 20); + EXPECT_EQ(0, CurrentMediaTimestampInMilliseconds()); + + // Buffer #5: [2000, 3000), buffer #3 is at front + WroteAudio(10, 20, 1.0); + EXPECT_EQ(1000, CurrentMediaTimestampInMilliseconds()); +} + +TEST_F(AudioClockTest, ZeroDelay) { + // The first time we write data we should expect the first timestamp + // immediately. + WroteAudio(10, 0, 1.0); + EXPECT_EQ(0, CurrentMediaTimestampInMilliseconds()); + EXPECT_EQ(1000, LastEndpointTimestampInMilliseconds()); + + // Ditto for all subsequent buffers. + WroteAudio(10, 0, 1.0); + EXPECT_EQ(1000, CurrentMediaTimestampInMilliseconds()); + EXPECT_EQ(2000, LastEndpointTimestampInMilliseconds()); + + WroteAudio(10, 0, 1.0); + EXPECT_EQ(2000, CurrentMediaTimestampInMilliseconds()); + EXPECT_EQ(3000, LastEndpointTimestampInMilliseconds()); + + // Ditto for silence. + WroteSilence(10, 0); + EXPECT_EQ(3000, CurrentMediaTimestampInMilliseconds()); + EXPECT_EQ(3000, LastEndpointTimestampInMilliseconds()); + + WroteSilence(10, 0); + EXPECT_EQ(3000, CurrentMediaTimestampInMilliseconds()); + EXPECT_EQ(3000, LastEndpointTimestampInMilliseconds()); +} + +} // namespace media diff --git a/media/filters/audio_renderer_impl.cc b/media/filters/audio_renderer_impl.cc index bb80661..2b82162 100644 --- a/media/filters/audio_renderer_impl.cc +++ b/media/filters/audio_renderer_impl.cc @@ -20,6 +20,7 @@ #include "media/base/audio_splicer.h" #include "media/base/bind_to_current_loop.h" #include "media/base/demuxer_stream.h" +#include "media/filters/audio_clock.h" #include "media/filters/decrypting_demuxer_stream.h" namespace media { @@ -57,8 +58,6 @@ AudioRendererImpl::AudioRendererImpl( pending_read_(false), received_end_of_stream_(false), rendered_end_of_stream_(false), - audio_time_buffered_(kNoTimestamp()), - current_time_(kNoTimestamp()), underflow_disabled_(false), preroll_aborted_(false), weak_factory_(this) { @@ -169,8 +168,7 @@ void AudioRendererImpl::ResetDecoderDone() { DCHECK_EQ(state_, kPaused); DCHECK(!flush_cb_.is_null()); - audio_time_buffered_ = kNoTimestamp(); - current_time_ = kNoTimestamp(); + audio_clock_.reset(new AudioClock(audio_parameters_.sample_rate())); received_end_of_stream_ = false; rendered_end_of_stream_ = false; preroll_aborted_ = false; @@ -288,6 +286,8 @@ void AudioRendererImpl::Initialize(DemuxerStream* stream, hardware_config_->GetHighLatencyBufferSize()); } + audio_clock_.reset(new AudioClock(audio_parameters_.sample_rate())); + audio_buffer_stream_.Initialize( stream, false, @@ -565,27 +565,33 @@ bool AudioRendererImpl::IsBeforePrerollTime( int AudioRendererImpl::Render(AudioBus* audio_bus, int audio_delay_milliseconds) { const int requested_frames = audio_bus->frames(); - base::TimeDelta current_time = kNoTimestamp(); - base::TimeDelta max_time = kNoTimestamp(); base::TimeDelta playback_delay = base::TimeDelta::FromMilliseconds( audio_delay_milliseconds); - + const int delay_frames = static_cast<int>(playback_delay.InSecondsF() * + audio_parameters_.sample_rate()); int frames_written = 0; + base::Closure time_cb; base::Closure underflow_cb; { base::AutoLock auto_lock(lock_); // Ensure Stop() hasn't destroyed our |algorithm_| on the pipeline thread. - if (!algorithm_) + if (!algorithm_) { + audio_clock_->WroteSilence(requested_frames, delay_frames); return 0; + } float playback_rate = algorithm_->playback_rate(); - if (playback_rate == 0) + if (playback_rate == 0) { + audio_clock_->WroteSilence(requested_frames, delay_frames); return 0; + } // Mute audio by returning 0 when not playing. - if (state_ != kPlaying) + if (state_ != kPlaying) { + audio_clock_->WroteSilence(requested_frames, delay_frames); return 0; + } // We use the following conditions to determine end of playback: // 1) Algorithm can not fill the audio callback buffer @@ -602,8 +608,15 @@ int AudioRendererImpl::Render(AudioBus* audio_bus, // 3) We are in the kPlaying state // // Otherwise the buffer has data we can send to the device. - const base::TimeDelta time_before_filling = algorithm_->GetTime(); - frames_written = algorithm_->FillBuffer(audio_bus, requested_frames); + const base::TimeDelta media_timestamp_before_filling = + audio_clock_->CurrentMediaTimestamp(); + if (algorithm_->frames_buffered() > 0) { + frames_written = algorithm_->FillBuffer(audio_bus, requested_frames); + audio_clock_->WroteAudio( + frames_written, delay_frames, playback_rate, algorithm_->GetTime()); + } + audio_clock_->WroteSilence(requested_frames - frames_written, delay_frames); + if (frames_written == 0) { const base::TimeTicks now = now_cb_.Run(); @@ -628,55 +641,24 @@ int AudioRendererImpl::Render(AudioBus* audio_bus, weak_factory_.GetWeakPtr())); } - // Adjust the delay according to playback rate. - base::TimeDelta adjusted_playback_delay = base::TimeDelta::FromMicroseconds( - ceil(playback_delay.InMicroseconds() * playback_rate)); - - // The |audio_time_buffered_| is the ending timestamp of the last frame - // buffered at the audio device. |playback_delay| is the amount of time - // buffered at the audio device. The current time can be computed by their - // difference. - if (audio_time_buffered_ != kNoTimestamp()) { - base::TimeDelta previous_time = current_time_; - current_time_ = audio_time_buffered_ - adjusted_playback_delay; - - // Time can change in one of two ways: - // 1) The time of the audio data at the audio device changed, or - // 2) The playback delay value has changed - // - // We only want to set |current_time| (and thus execute |time_cb_|) if - // time has progressed and we haven't signaled end of stream yet. - // - // Why? The current latency of the system results in getting the last call - // to FillBuffer() later than we'd like, which delays firing the 'ended' - // event, which delays the looping/trigging performance of short sound - // effects. - // - // TODO(scherkus): revisit this and switch back to relying on playback - // delay after we've revamped our audio IPC subsystem. - if (current_time_ > previous_time && !rendered_end_of_stream_) { - current_time = current_time_; - } - } else if (frames_written > 0) { - // Nothing has been buffered yet, so use the first buffer's timestamp. - DCHECK(time_before_filling != kNoTimestamp()); - current_time_ = current_time = - time_before_filling - adjusted_playback_delay; + // We only want to execute |time_cb_| if time has progressed and we haven't + // signaled end of stream yet. + if (media_timestamp_before_filling != + audio_clock_->CurrentMediaTimestamp() && + !rendered_end_of_stream_) { + time_cb = base::Bind(time_cb_, + audio_clock_->CurrentMediaTimestamp(), + audio_clock_->last_endpoint_timestamp()); } - // The call to FillBuffer() on |algorithm_| has increased the amount of - // buffered audio data. Update the new amount of time buffered. - max_time = algorithm_->GetTime(); - audio_time_buffered_ = max_time; - if (frames_written > 0) { UpdateEarliestEndTime_Locked( frames_written, playback_delay, now_cb_.Run()); } } - if (current_time != kNoTimestamp() && max_time != kNoTimestamp()) - time_cb_.Run(current_time, max_time); + if (!time_cb.is_null()) + time_cb.Run(); if (!underflow_cb.is_null()) underflow_cb.Run(); diff --git a/media/filters/audio_renderer_impl.h b/media/filters/audio_renderer_impl.h index f6f41f1..4e8ad01 100644 --- a/media/filters/audio_renderer_impl.h +++ b/media/filters/audio_renderer_impl.h @@ -37,11 +37,12 @@ class SingleThreadTaskRunner; namespace media { -class AudioBus; class AudioBufferConverter; +class AudioBus; +class AudioClock; +class AudioHardwareConfig; class AudioSplicer; class DecryptingDemuxerStream; -class AudioHardwareConfig; class MEDIA_EXPORT AudioRendererImpl : public AudioRenderer, @@ -246,10 +247,7 @@ class MEDIA_EXPORT AudioRendererImpl bool received_end_of_stream_; bool rendered_end_of_stream_; - // The timestamp of the last frame (i.e. furthest in the future) buffered as - // well as the current time that takes current playback delay into account. - base::TimeDelta audio_time_buffered_; - base::TimeDelta current_time_; + scoped_ptr<AudioClock> audio_clock_; base::TimeDelta preroll_timestamp_; diff --git a/media/filters/audio_renderer_impl_unittest.cc b/media/filters/audio_renderer_impl_unittest.cc index 05fb4de..8a9af82 100644 --- a/media/filters/audio_renderer_impl_unittest.cc +++ b/media/filters/audio_renderer_impl_unittest.cc @@ -968,15 +968,33 @@ TEST_F(AudioRendererImplTest, TimeUpdatesOnFirstBuffer) { EXPECT_TRUE(ConsumeBufferedData(kFramesToConsume, NULL)); WaitForPendingRead(); - // Ensure we received a time update for the first buffer and it's zero. + // ConsumeBufferedData() uses an audio delay of zero, so ensure we received + // a time update that's equal to |kFramesToConsume| from above. timestamp_helper.SetBaseTimestamp(base::TimeDelta()); - EXPECT_EQ(timestamp_helper.base_timestamp(), last_time_update()); timestamp_helper.AddFrames(kFramesToConsume); + EXPECT_EQ(timestamp_helper.GetTimestamp(), last_time_update()); - // ConsumeBufferedData() uses an audio delay of zero, so the next buffer - // should have a timestamp equal to the duration of |kFramesToConsume|. + // The next time update should match the remaining frames_buffered(). + timestamp_helper.AddFrames(frames_buffered()); EXPECT_TRUE(ConsumeBufferedData(frames_buffered(), NULL)); EXPECT_EQ(timestamp_helper.GetTimestamp(), last_time_update()); } +TEST_F(AudioRendererImplTest, ImmediateEndOfStream) { + Initialize(); + { + SCOPED_TRACE("Preroll()"); + WaitableMessageLoopEvent event; + renderer_->Preroll(base::TimeDelta(), event.GetPipelineStatusCB()); + WaitForPendingRead(); + DeliverEndOfStream(); + event.RunAndWaitForStatus(PIPELINE_OK); + } + Play(); + + // Read a single frame. We shouldn't be able to satisfy it. + EXPECT_FALSE(ConsumeBufferedData(1, NULL)); + WaitForEnded(); +} + } // namespace media |