summaryrefslogtreecommitdiffstats
path: root/media
diff options
context:
space:
mode:
authorvrk@chromium.org <vrk@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-04-06 03:17:44 +0000
committervrk@chromium.org <vrk@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-04-06 03:17:44 +0000
commite4fc09e874d7a7fc87dc1565452d32985949a3cf (patch)
tree8a8a5f596294a3db7c6aa3998b8562a4b24e60e9 /media
parentf2ebbf06167ad4ff8cb23109b3652c8c4b7ff5f7 (diff)
downloadchromium_src-e4fc09e874d7a7fc87dc1565452d32985949a3cf.zip
chromium_src-e4fc09e874d7a7fc87dc1565452d32985949a3cf.tar.gz
chromium_src-e4fc09e874d7a7fc87dc1565452d32985949a3cf.tar.bz2
Merge AudioRendererImpl and AudioRendererBase; add NullAudioSink
This CL removes AudioRendererImpl and replaces it with AudioRendererBase. NullAudioRenderer is also removed and replaced with NullAudioSink. Also, a subtle bug is fixed in AudioRendererBase to allow for smooth video playback when running Chrome with the --disable-audio flag. BUG=119549,116645 TEST=media_unittests, playing video on Chrome/content_shell with and without --disable-audio flag should look identical Review URL: http://codereview.chromium.org/9826023 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@131089 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'media')
-rw-r--r--media/audio/audio_util.cc32
-rw-r--r--media/audio/audio_util.h4
-rw-r--r--media/audio/null_audio_sink.cc110
-rw-r--r--media/audio/null_audio_sink.h65
-rw-r--r--media/base/audio_renderer_sink.h10
-rw-r--r--media/base/pipeline.cc2
-rw-r--r--media/filters/audio_renderer_base.cc302
-rw-r--r--media/filters/audio_renderer_base.h125
-rw-r--r--media/filters/audio_renderer_base_unittest.cc81
-rw-r--r--media/filters/null_audio_renderer.cc95
-rw-r--r--media/filters/null_audio_renderer.h65
-rw-r--r--media/filters/pipeline_integration_test_base.cc5
-rw-r--r--media/media.gyp4
-rw-r--r--media/tools/player_wtl/movie.cc6
-rw-r--r--media/tools/player_x11/player_x11.cc6
15 files changed, 589 insertions, 323 deletions
diff --git a/media/audio/audio_util.cc b/media/audio/audio_util.cc
index c15e845..01d71ee 100644
--- a/media/audio/audio_util.cc
+++ b/media/audio/audio_util.cc
@@ -427,6 +427,38 @@ ChannelLayout GetAudioInputHardwareChannelLayout(const std::string& device_id) {
#endif
}
+// Computes a buffer size based on the given |sample_rate|. Must be used in
+// conjunction with AUDIO_PCM_LINEAR.
+size_t GetHighLatencyOutputBufferSize(int sample_rate) {
+ // TODO(vrk/crogers): The buffer sizes that this function computes is probably
+ // overly conservative. However, reducing the buffer size to 2048-8192 bytes
+ // caused crbug.com/108396. This computation should be revisited while making
+ // sure crbug.com/108396 doesn't happen again.
+
+ // The minimum number of samples in a hardware packet.
+ // This value is selected so that we can handle down to 5khz sample rate.
+ static const size_t kMinSamplesPerHardwarePacket = 1024;
+
+ // The maximum number of samples in a hardware packet.
+ // This value is selected so that we can handle up to 192khz sample rate.
+ static const size_t kMaxSamplesPerHardwarePacket = 64 * 1024;
+
+ // This constant governs the hardware audio buffer size, this value should be
+ // chosen carefully.
+ // This value is selected so that we have 8192 samples for 48khz streams.
+ static const size_t kMillisecondsPerHardwarePacket = 170;
+
+ // Select the number of samples that can provide at least
+ // |kMillisecondsPerHardwarePacket| worth of audio data.
+ size_t samples = kMinSamplesPerHardwarePacket;
+ while (samples <= kMaxSamplesPerHardwarePacket &&
+ samples * base::Time::kMillisecondsPerSecond <
+ sample_rate * kMillisecondsPerHardwarePacket) {
+ samples *= 2;
+ }
+ return samples;
+}
+
// When transferring data in the shared memory, first word is size of data
// in bytes. Actual data starts immediately after it.
diff --git a/media/audio/audio_util.h b/media/audio/audio_util.h
index 2fccc1e..df5683f 100644
--- a/media/audio/audio_util.h
+++ b/media/audio/audio_util.h
@@ -107,6 +107,10 @@ MEDIA_EXPORT size_t GetAudioHardwareBufferSize();
MEDIA_EXPORT ChannelLayout GetAudioInputHardwareChannelLayout(
const std::string& device_id);
+// Computes a buffer size based on the given |sample_rate|. Must be used in
+// conjunction with AUDIO_PCM_LINEAR.
+MEDIA_EXPORT size_t GetHighLatencyOutputBufferSize(int sample_rate);
+
// Functions that handle data buffer passed between processes in the shared
// memory. Called on both IPC sides.
diff --git a/media/audio/null_audio_sink.cc b/media/audio/null_audio_sink.cc
new file mode 100644
index 0000000..e8cd4eb
--- /dev/null
+++ b/media/audio/null_audio_sink.cc
@@ -0,0 +1,110 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/audio/null_audio_sink.h"
+
+#include "base/bind.h"
+#include "base/threading/platform_thread.h"
+
+namespace media {
+
+NullAudioSink::NullAudioSink()
+ : initialized_(false),
+ playback_rate_(0.0),
+ playing_(false),
+ callback_(NULL),
+ thread_("NullAudioThread") {
+}
+
+NullAudioSink::~NullAudioSink() {
+ DCHECK(!thread_.IsRunning());
+ for (size_t i = 0; i < audio_data_.size(); ++i)
+ delete [] audio_data_[i];
+}
+
+
+void NullAudioSink::Start() {
+ if (!thread_.Start())
+ return;
+
+ thread_.message_loop()->PostTask(FROM_HERE, base::Bind(
+ &NullAudioSink::FillBufferTask, this));
+}
+
+void NullAudioSink::Stop() {
+ SetPlaying(false);
+ thread_.Stop();
+}
+
+void NullAudioSink::Play() {
+ SetPlaying(true);
+}
+
+void NullAudioSink::Pause(bool /* flush */) {
+ SetPlaying(false);
+}
+
+void NullAudioSink::SetPlaybackRate(float rate) {
+ base::AutoLock auto_lock(lock_);
+ playback_rate_ = rate;
+}
+
+bool NullAudioSink::SetVolume(double volume) {
+ // Audio is always muted.
+ return volume == 0.0;
+}
+
+void NullAudioSink::GetVolume(double* volume) {
+ // Audio is always muted.
+ *volume = 0.0;
+}
+
+void NullAudioSink::SetPlaying(bool is_playing) {
+ base::AutoLock auto_lock(lock_);
+ playing_ = is_playing;
+}
+
+void NullAudioSink::Initialize(const AudioParameters& params,
+ RenderCallback* callback) {
+ DCHECK(!initialized_);
+ params_ = params;
+
+ audio_data_.reserve(params.channels());
+ for (int i = 0; i < params.channels(); ++i) {
+ float* channel_data = new float[params.frames_per_buffer()];
+ audio_data_.push_back(channel_data);
+ }
+
+ callback_ = callback;
+ initialized_ = true;
+}
+
+void NullAudioSink::FillBufferTask() {
+ base::AutoLock auto_lock(lock_);
+
+ base::TimeDelta delay;
+ // Only consume buffers when actually playing.
+ if (playing_) {
+ DCHECK_GT(playback_rate_, 0.0f);
+ int requested_frames = params_.frames_per_buffer();
+ int frames_received = callback_->Render(audio_data_, requested_frames, 0);
+ int frames_per_millisecond =
+ params_.sample_rate() / base::Time::kMillisecondsPerSecond;
+
+ // Calculate our sleep duration, taking playback rate into consideration.
+ delay = base::TimeDelta::FromMilliseconds(
+ frames_received / (frames_per_millisecond * playback_rate_));
+ } else {
+ // If paused, sleep for 10 milliseconds before polling again.
+ delay = base::TimeDelta::FromMilliseconds(10);
+ }
+
+ // Sleep for at least one millisecond so we don't spin the CPU.
+ MessageLoop::current()->PostDelayedTask(
+ FROM_HERE,
+ base::Bind(&NullAudioSink::FillBufferTask, this),
+ std::max(delay, base::TimeDelta::FromMilliseconds(1)));
+}
+
+} // namespace media
diff --git a/media/audio/null_audio_sink.h b/media/audio/null_audio_sink.h
new file mode 100644
index 0000000..32245eb
--- /dev/null
+++ b/media/audio/null_audio_sink.h
@@ -0,0 +1,65 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEDIA_FILTERS_NULL_AUDIO_RENDERER_H_
+#define MEDIA_FILTERS_NULL_AUDIO_RENDERER_H_
+
+// NullAudioSink effectively uses an extra thread to "throw away" the
+// audio data at a rate resembling normal playback speed. It's just like
+// decoding to /dev/null!
+//
+// NullAudioSink can also be used in situations where the client has no
+// audio device or we haven't written an audio implementation for a particular
+// platform yet.
+
+#include <vector>
+
+#include "base/threading/thread.h"
+#include "media/base/audio_renderer_sink.h"
+
+namespace media {
+
+class MEDIA_EXPORT NullAudioSink
+ : NON_EXPORTED_BASE(public AudioRendererSink) {
+ public:
+ NullAudioSink();
+ virtual ~NullAudioSink();
+
+ // AudioRendererSink implementation.
+ virtual void Initialize(const AudioParameters& params,
+ RenderCallback* callback) OVERRIDE;
+ virtual void Start() OVERRIDE;
+ virtual void Stop() OVERRIDE;
+ virtual void Pause(bool flush) OVERRIDE;
+ virtual void Play() OVERRIDE;
+ virtual void SetPlaybackRate(float rate) OVERRIDE;
+ virtual bool SetVolume(double volume) OVERRIDE;
+ virtual void GetVolume(double* volume) OVERRIDE;
+
+ private:
+ // Audio thread task that periodically calls FillBuffer() to consume
+ // audio data.
+ void FillBufferTask();
+
+ void SetPlaying(bool is_playing);
+
+ // A buffer passed to FillBuffer to advance playback.
+ std::vector<float*> audio_data_;
+
+ AudioParameters params_;
+ bool initialized_;
+ float playback_rate_;
+ bool playing_;
+ RenderCallback* callback_;
+
+ // Separate thread used to throw away data.
+ base::Thread thread_;
+ base::Lock lock_;
+
+ DISALLOW_COPY_AND_ASSIGN(NullAudioSink);
+};
+
+} // namespace media
+
+#endif // MEDIA_FILTERS_NULL_AUDIO_RENDERER_H_
diff --git a/media/base/audio_renderer_sink.h b/media/base/audio_renderer_sink.h
index 01eb185..49ddbf5 100644
--- a/media/base/audio_renderer_sink.h
+++ b/media/base/audio_renderer_sink.h
@@ -28,9 +28,9 @@ class AudioRendererSink
// continuous stream). That actual number of frames is passed to host
// together with PCM audio data and host is free to use or ignore it.
// TODO(crogers): use base:Callback instead.
- virtual size_t Render(const std::vector<float*>& audio_data,
- size_t number_of_frames,
- size_t audio_delay_milliseconds) = 0;
+ virtual int Render(const std::vector<float*>& audio_data,
+ int number_of_frames,
+ int audio_delay_milliseconds) = 0;
// Signals an error has occurred.
virtual void OnRenderError() = 0;
@@ -58,6 +58,10 @@ class AudioRendererSink
// Resumes playback after calling Pause().
virtual void Play() = 0;
+ // Called to inform the sink of a change in playback rate. Override if
+ // subclass needs the playback rate.
+ virtual void SetPlaybackRate(float rate) {};
+
// Sets the playback volume, with range [0.0, 1.0] inclusive.
// Returns |true| on success.
virtual bool SetVolume(double volume) = 0;
diff --git a/media/base/pipeline.cc b/media/base/pipeline.cc
index dd0822e..699aba7 100644
--- a/media/base/pipeline.cc
+++ b/media/base/pipeline.cc
@@ -436,7 +436,7 @@ base::TimeDelta Pipeline::GetDuration() const {
}
void Pipeline::OnAudioTimeUpdate(base::TimeDelta time,
- base::TimeDelta max_time) {
+ base::TimeDelta max_time) {
DCHECK(time <= max_time);
DCHECK(IsRunning());
base::AutoLock auto_lock(lock_);
diff --git a/media/filters/audio_renderer_base.cc b/media/filters/audio_renderer_base.cc
index eda4d9e..21d9bc5 100644
--- a/media/filters/audio_renderer_base.cc
+++ b/media/filters/audio_renderer_base.cc
@@ -4,23 +4,27 @@
#include "media/filters/audio_renderer_base.h"
-#include <algorithm>
-#include <string>
+#include <math.h>
#include "base/bind.h"
#include "base/callback.h"
#include "base/callback_helpers.h"
#include "base/logging.h"
#include "media/base/filter_host.h"
+#include "media/audio/audio_util.h"
namespace media {
-AudioRendererBase::AudioRendererBase()
+AudioRendererBase::AudioRendererBase(media::AudioRendererSink* sink)
: state_(kUninitialized),
pending_read_(false),
received_end_of_stream_(false),
rendered_end_of_stream_(false),
bytes_per_frame_(0),
+ bytes_per_second_(0),
+ stopped_(false),
+ sink_(sink),
+ is_initialized_(false),
read_cb_(base::Bind(&AudioRendererBase::DecodedAudioReady,
base::Unretained(this))) {
}
@@ -32,25 +36,53 @@ AudioRendererBase::~AudioRendererBase() {
}
void AudioRendererBase::Play(const base::Closure& callback) {
- base::AutoLock auto_lock(lock_);
- DCHECK_EQ(kPaused, state_);
- state_ = kPlaying;
- callback.Run();
-}
+ {
+ base::AutoLock auto_lock(lock_);
+ DCHECK_EQ(kPaused, state_);
+ state_ = kPlaying;
+ callback.Run();
+ }
-void AudioRendererBase::Pause(const base::Closure& callback) {
- base::AutoLock auto_lock(lock_);
- DCHECK(state_ == kPlaying || state_ == kUnderflow || state_ == kRebuffering);
- pause_cb_ = callback;
- state_ = kPaused;
+ if (stopped_)
+ return;
- // Pause only when we've completed our pending read.
- if (!pending_read_) {
- pause_cb_.Run();
- pause_cb_.Reset();
+ if (GetPlaybackRate() != 0.0f) {
+ DoPlay();
} else {
+ DoPause();
+ }
+}
+
+void AudioRendererBase::DoPlay() {
+ earliest_end_time_ = base::Time::Now();
+ DCHECK(sink_.get());
+ sink_->Play();
+}
+
+void AudioRendererBase::Pause(const base::Closure& callback) {
+ {
+ base::AutoLock auto_lock(lock_);
+ DCHECK(state_ == kPlaying || state_ == kUnderflow ||
+ state_ == kRebuffering);
+ pause_cb_ = callback;
state_ = kPaused;
+
+ // Pause only when we've completed our pending read.
+ if (!pending_read_) {
+ pause_cb_.Run();
+ pause_cb_.Reset();
+ }
}
+
+ if (stopped_)
+ return;
+
+ DoPause();
+}
+
+void AudioRendererBase::DoPause() {
+ DCHECK(sink_.get());
+ sink_->Pause(false);
}
void AudioRendererBase::Flush(const base::Closure& callback) {
@@ -58,7 +90,12 @@ void AudioRendererBase::Flush(const base::Closure& callback) {
}
void AudioRendererBase::Stop(const base::Closure& callback) {
- OnStop();
+ if (!stopped_) {
+ DCHECK(sink_.get());
+ sink_->Stop();
+
+ stopped_ = true;
+ }
{
base::AutoLock auto_lock(lock_);
state_ = kStopped;
@@ -82,12 +119,24 @@ void AudioRendererBase::Seek(base::TimeDelta time, const PipelineStatusCB& cb) {
seek_timestamp_ = time;
// Throw away everything and schedule our reads.
- last_fill_buffer_time_ = base::TimeDelta();
+ audio_time_buffered_ = base::TimeDelta();
received_end_of_stream_ = false;
rendered_end_of_stream_ = false;
// |algorithm_| will request more reads.
algorithm_->FlushBuffers();
+
+ if (stopped_)
+ return;
+
+ DoSeek();
+}
+
+void AudioRendererBase::DoSeek() {
+ earliest_end_time_ = base::Time::Now();
+
+ // Pause and flush the stream when we seek to a new location.
+ sink_->Pause(true);
}
void AudioRendererBase::Initialize(const scoped_refptr<AudioDecoder>& decoder,
@@ -120,16 +169,32 @@ void AudioRendererBase::Initialize(const scoped_refptr<AudioDecoder>& decoder,
bool config_ok = algorithm_->ValidateConfig(channels, sample_rate,
bits_per_channel);
- if (config_ok)
- algorithm_->Initialize(channels, sample_rate, bits_per_channel, 0.0f, cb);
-
- // Give the subclass an opportunity to initialize itself.
- if (!config_ok || !OnInitialize(bits_per_channel, channel_layout,
- sample_rate)) {
+ if (!config_ok || is_initialized_) {
init_cb.Run(PIPELINE_ERROR_INITIALIZATION_FAILED);
return;
}
+ if (config_ok)
+ algorithm_->Initialize(channels, sample_rate, bits_per_channel, 0.0f, cb);
+
+ // We use the AUDIO_PCM_LINEAR flag because AUDIO_PCM_LOW_LATENCY
+ // does not currently support all the sample-rates that we require.
+ // Please see: http://code.google.com/p/chromium/issues/detail?id=103627
+ // for more details.
+ audio_parameters_ = AudioParameters(
+ AudioParameters::AUDIO_PCM_LINEAR, channel_layout, sample_rate,
+ bits_per_channel, GetHighLatencyOutputBufferSize(sample_rate));
+
+ bytes_per_second_ = audio_parameters_.GetBytesPerSecond();
+
+ DCHECK(sink_.get());
+ DCHECK(!is_initialized_);
+
+ sink_->Initialize(audio_parameters_, this);
+
+ sink_->Start();
+ is_initialized_ = true;
+
// Finally, execute the start callback.
state_ = kPaused;
init_cb.Run(PIPELINE_OK);
@@ -152,6 +217,12 @@ void AudioRendererBase::ResumeAfterUnderflow(bool buffer_more_audio) {
}
}
+void AudioRendererBase::SetVolume(float volume) {
+ if (stopped_)
+ return;
+ sink_->SetVolume(volume);
+}
+
void AudioRendererBase::DecodedAudioReady(scoped_refptr<Buffer> buffer) {
base::AutoLock auto_lock(lock_);
DCHECK(state_ == kPaused || state_ == kSeeking || state_ == kPlaying ||
@@ -203,12 +274,115 @@ void AudioRendererBase::DecodedAudioReady(scoped_refptr<Buffer> buffer) {
}
}
+void AudioRendererBase::SignalEndOfStream() {
+ DCHECK(received_end_of_stream_);
+ if (!rendered_end_of_stream_) {
+ rendered_end_of_stream_ = true;
+ host()->NotifyEnded();
+ }
+}
+
+void AudioRendererBase::ScheduleRead_Locked() {
+ lock_.AssertAcquired();
+ if (pending_read_ || state_ == kPaused)
+ return;
+ pending_read_ = true;
+ decoder_->Read(read_cb_);
+}
+
+void AudioRendererBase::SetPlaybackRate(float playback_rate) {
+ DCHECK_LE(0.0f, playback_rate);
+
+ if (!stopped_) {
+ // Notify sink of new playback rate.
+ sink_->SetPlaybackRate(playback_rate);
+
+ // We have two cases here:
+ // Play: GetPlaybackRate() == 0.0 && playback_rate != 0.0
+ // Pause: GetPlaybackRate() != 0.0 && playback_rate == 0.0
+ if (GetPlaybackRate() == 0.0f && playback_rate != 0.0f) {
+ DoPlay();
+ } else if (GetPlaybackRate() != 0.0f && playback_rate == 0.0f) {
+ // Pause is easy, we can always pause.
+ DoPause();
+ }
+ }
+
+ base::AutoLock auto_lock(lock_);
+ algorithm_->SetPlaybackRate(playback_rate);
+}
+
+float AudioRendererBase::GetPlaybackRate() {
+ base::AutoLock auto_lock(lock_);
+ return algorithm_->playback_rate();
+}
+
+bool AudioRendererBase::IsBeforeSeekTime(const scoped_refptr<Buffer>& buffer) {
+ return (state_ == kSeeking) && buffer && !buffer->IsEndOfStream() &&
+ (buffer->GetTimestamp() + buffer->GetDuration()) < seek_timestamp_;
+}
+
+int AudioRendererBase::Render(const std::vector<float*>& audio_data,
+ int number_of_frames,
+ int audio_delay_milliseconds) {
+ if (stopped_ || GetPlaybackRate() == 0.0f) {
+ // Output silence if stopped.
+ for (size_t i = 0; i < audio_data.size(); ++i)
+ memset(audio_data[i], 0, sizeof(float) * number_of_frames);
+ return 0;
+ }
+
+ // Adjust the playback delay.
+ base::TimeDelta request_delay =
+ base::TimeDelta::FromMilliseconds(audio_delay_milliseconds);
+
+ // Finally we need to adjust the delay according to playback rate.
+ if (GetPlaybackRate() != 1.0f) {
+ request_delay = base::TimeDelta::FromMicroseconds(
+ static_cast<int64>(ceil(request_delay.InMicroseconds() *
+ GetPlaybackRate())));
+ }
+
+ int bytes_per_frame = audio_parameters_.GetBytesPerFrame();
+
+ const int buf_size = number_of_frames * bytes_per_frame;
+ scoped_array<uint8> buf(new uint8[buf_size]);
+
+ int frames_filled = FillBuffer(buf.get(), number_of_frames, request_delay);
+ int bytes_filled = frames_filled * bytes_per_frame;
+ DCHECK_LE(bytes_filled, buf_size);
+ UpdateEarliestEndTime(bytes_filled, request_delay, base::Time::Now());
+
+ // Deinterleave each audio channel.
+ int channels = audio_data.size();
+ for (int channel_index = 0; channel_index < channels; ++channel_index) {
+ media::DeinterleaveAudioChannel(buf.get(),
+ audio_data[channel_index],
+ channels,
+ channel_index,
+ bytes_per_frame / channels,
+ frames_filled);
+
+ // If FillBuffer() didn't give us enough data then zero out the remainder.
+ if (frames_filled < number_of_frames) {
+ int frames_to_zero = number_of_frames - frames_filled;
+ memset(audio_data[channel_index] + frames_filled,
+ 0,
+ sizeof(float) * frames_to_zero);
+ }
+ }
+ return frames_filled;
+}
+
uint32 AudioRendererBase::FillBuffer(uint8* dest,
uint32 requested_frames,
const base::TimeDelta& playback_delay) {
- // The timestamp of the last buffer written during the last call to
- // FillBuffer().
- base::TimeDelta last_fill_buffer_time;
+ // The |audio_time_buffered_| is the ending timestamp of the last frame
+ // buffered at the audio device. |playback_delay| is the amount of time
+ // buffered at the audio device. The current time can be computed by their
+ // difference.
+ base::TimeDelta current_time = audio_time_buffered_ - playback_delay;
+
size_t frames_written = 0;
base::Closure underflow_cb;
{
@@ -232,10 +406,6 @@ uint32 AudioRendererBase::FillBuffer(uint8* dest,
return zeros_to_write / bytes_per_frame_;
}
- // Save a local copy of last fill buffer time and reset the member.
- last_fill_buffer_time = last_fill_buffer_time_;
- last_fill_buffer_time_ = base::TimeDelta();
-
// Use three conditions to determine the end of playback:
// 1. Algorithm needs more audio data.
// 2. We've received an end of stream buffer.
@@ -251,7 +421,9 @@ uint32 AudioRendererBase::FillBuffer(uint8* dest,
// 3. Have not received an end of stream buffer.
if (algorithm_->NeedsMoreData()) {
if (received_end_of_stream_) {
- OnRenderEndOfStream();
+ // TODO(enal): schedule callback instead of polling.
+ if (base::Time::Now() >= earliest_end_time_)
+ SignalEndOfStream();
} else if (state_ == kPlaying) {
state_ = kUnderflow;
underflow_cb = underflow_cb_;
@@ -260,17 +432,17 @@ uint32 AudioRendererBase::FillBuffer(uint8* dest,
// Otherwise fill the buffer.
frames_written = algorithm_->FillBuffer(dest, requested_frames);
}
-
- // Get the current time.
- last_fill_buffer_time_ = algorithm_->GetTime();
}
- // Update the pipeline's time if it was set last time.
- base::TimeDelta new_current_time = last_fill_buffer_time - playback_delay;
- if (last_fill_buffer_time.InMicroseconds() > 0 &&
- (last_fill_buffer_time != last_fill_buffer_time_ ||
- new_current_time > host()->GetTime())) {
- time_cb_.Run(new_current_time, last_fill_buffer_time);
+ base::TimeDelta previous_time_buffered = audio_time_buffered_;
+ // The call to FillBuffer() on |algorithm_| has increased the amount of
+ // buffered audio data. Update the new amount of time buffered.
+ audio_time_buffered_ = algorithm_->GetTime();
+
+ if (previous_time_buffered.InMicroseconds() > 0 &&
+ (previous_time_buffered != audio_time_buffered_ ||
+ current_time > host()->GetTime())) {
+ time_cb_.Run(current_time, audio_time_buffered_);
}
if (!underflow_cb.is_null())
@@ -279,35 +451,33 @@ uint32 AudioRendererBase::FillBuffer(uint8* dest,
return frames_written;
}
-void AudioRendererBase::SignalEndOfStream() {
- DCHECK(received_end_of_stream_);
- if (!rendered_end_of_stream_) {
- rendered_end_of_stream_ = true;
- host()->NotifyEnded();
+void AudioRendererBase::UpdateEarliestEndTime(int bytes_filled,
+ base::TimeDelta request_delay,
+ base::Time time_now) {
+ if (bytes_filled != 0) {
+ base::TimeDelta predicted_play_time = ConvertToDuration(bytes_filled);
+ float playback_rate = GetPlaybackRate();
+ if (playback_rate != 1.0f) {
+ predicted_play_time = base::TimeDelta::FromMicroseconds(
+ static_cast<int64>(ceil(predicted_play_time.InMicroseconds() *
+ playback_rate)));
+ }
+ earliest_end_time_ =
+ std::max(earliest_end_time_,
+ time_now + request_delay + predicted_play_time);
}
}
-void AudioRendererBase::ScheduleRead_Locked() {
- lock_.AssertAcquired();
- if (pending_read_ || state_ == kPaused)
- return;
- pending_read_ = true;
- decoder_->Read(read_cb_);
-}
-
-void AudioRendererBase::SetPlaybackRate(float playback_rate) {
- base::AutoLock auto_lock(lock_);
- algorithm_->SetPlaybackRate(playback_rate);
-}
-
-float AudioRendererBase::GetPlaybackRate() {
- base::AutoLock auto_lock(lock_);
- return algorithm_->playback_rate();
+base::TimeDelta AudioRendererBase::ConvertToDuration(int bytes) {
+ if (bytes_per_second_) {
+ return base::TimeDelta::FromMicroseconds(
+ base::Time::kMicrosecondsPerSecond * bytes / bytes_per_second_);
+ }
+ return base::TimeDelta();
}
-bool AudioRendererBase::IsBeforeSeekTime(const scoped_refptr<Buffer>& buffer) {
- return (state_ == kSeeking) && buffer && !buffer->IsEndOfStream() &&
- (buffer->GetTimestamp() + buffer->GetDuration()) < seek_timestamp_;
+void AudioRendererBase::OnRenderError() {
+ host()->DisableAudioRenderer();
}
} // namespace media
diff --git a/media/filters/audio_renderer_base.h b/media/filters/audio_renderer_base.h
index 4059622..776bee0 100644
--- a/media/filters/audio_renderer_base.h
+++ b/media/filters/audio_renderer_base.h
@@ -2,19 +2,20 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// AudioRendererBase takes care of the tricky queuing work and provides simple
-// methods for subclasses to peek and poke at audio data. In addition to
-// AudioRenderer interface methods this classes doesn't implement, subclasses
-// must also implement the following methods:
-// OnInitialized
-// OnStop
-// OnRenderEndOfStream
+// Audio rendering unit utilizing an AudioRendererSink to output data.
//
-// The general assumption is that subclasses start a callback-based audio thread
-// which needs to be filled with decoded audio data. AudioDecoderBase provides
-// FillBuffer which handles filling the provided buffer, dequeuing items,
-// scheduling additional reads and updating the clock. In a sense,
-// AudioRendererBase is the producer and the subclass is the consumer.
+// This class lives inside three threads during it's lifetime, namely:
+// 1. Render thread.
+// This object is created on the render thread.
+// 2. Pipeline thread
+// Initialize() is called here with the audio format.
+// Play/Pause/Seek also happens here.
+// 3. Audio thread created by the AudioRendererSink.
+// Render() is called here where audio data is decoded into raw PCM data.
+//
+// AudioRendererBase talks to an AudioRendererAlgorithmBase that takes care of
+// queueing audio data and stretching/shrinking audio data when playback rate !=
+// 1.0 or 0.0.
#ifndef MEDIA_FILTERS_AUDIO_RENDERER_BASE_H_
#define MEDIA_FILTERS_AUDIO_RENDERER_BASE_H_
@@ -23,22 +24,29 @@
#include "base/synchronization/lock.h"
#include "media/base/audio_decoder.h"
+#include "media/base/audio_renderer_sink.h"
#include "media/base/buffers.h"
#include "media/base/filters.h"
#include "media/filters/audio_renderer_algorithm_base.h"
namespace media {
-class MEDIA_EXPORT AudioRendererBase : public AudioRenderer {
+class MEDIA_EXPORT AudioRendererBase
+ : public AudioRenderer,
+ NON_EXPORTED_BASE(public media::AudioRendererSink::RenderCallback) {
public:
- AudioRendererBase();
+ // Methods called on Render thread ------------------------------------------
+ // An AudioRendererSink is used as the destination for the rendered audio.
+ explicit AudioRendererBase(media::AudioRendererSink* sink);
virtual ~AudioRendererBase();
+ // Methods called on pipeline thread ----------------------------------------
// Filter implementation.
virtual void Play(const base::Closure& callback) OVERRIDE;
virtual void Pause(const base::Closure& callback) OVERRIDE;
virtual void Flush(const base::Closure& callback) OVERRIDE;
virtual void Stop(const base::Closure& callback) OVERRIDE;
+ virtual void SetPlaybackRate(float rate) OVERRIDE;
virtual void Seek(base::TimeDelta time, const PipelineStatusCB& cb) OVERRIDE;
// AudioRenderer implementation.
@@ -48,26 +56,13 @@ class MEDIA_EXPORT AudioRendererBase : public AudioRenderer {
const TimeCB& time_cb) OVERRIDE;
virtual bool HasEnded() OVERRIDE;
virtual void ResumeAfterUnderflow(bool buffer_more_audio) OVERRIDE;
+ virtual void SetVolume(float volume) OVERRIDE;
- protected:
+ private:
+ friend class AudioRendererBaseTest;
FRIEND_TEST_ALL_PREFIXES(AudioRendererBaseTest, EndOfStream);
FRIEND_TEST_ALL_PREFIXES(AudioRendererBaseTest, Underflow_EndOfStream);
- // Subclasses should return true if they were able to initialize, false
- // otherwise.
- virtual bool OnInitialize(int bits_per_channel,
- ChannelLayout channel_layout,
- int sample_rate) = 0;
-
- // Called by Stop(). Subclasses should perform any necessary cleanup during
- // this time, such as stopping any running threads.
- virtual void OnStop() = 0;
-
- // Method called by FillBuffer() when it finds that it reached end of stream.
- // FillBuffer() cannot immediately signal end of stream event because browser
- // may have buffered data.
- virtual void OnRenderEndOfStream() = 0;
-
// Callback from the audio decoder delivering decoded audio samples.
void DecodedAudioReady(scoped_refptr<Buffer> buffer);
@@ -88,27 +83,39 @@ class MEDIA_EXPORT AudioRendererBase : public AudioRenderer {
// should the filled buffer be played. If FillBuffer() is called as the audio
// hardware plays the buffer, then |playback_delay| should be zero.
//
- // FillBuffer() calls OnRenderEndOfStream() when it reaches end of stream.
- // It is responsibility of derived class to provide implementation of
- // OnRenderEndOfStream() that calls SignalEndOfStream() when all the hardware
- // buffers become empty (i.e. when all the data written to the device has
- // been played).
+ // FillBuffer() calls SignalEndOfStream() when it reaches end of stream.
//
// Safe to call on any thread.
uint32 FillBuffer(uint8* dest,
uint32 requested_frames,
const base::TimeDelta& playback_delay);
- // Called by OnRenderEndOfStream() or some callback scheduled by derived class
- // to signal end of stream.
+ // Called at the end of stream when all the hardware buffers become empty
+ // (i.e. when all the data written to the device has been played).
void SignalEndOfStream();
- // Get/Set the playback rate of |algorithm_|.
- virtual void SetPlaybackRate(float playback_rate) OVERRIDE;
- virtual float GetPlaybackRate();
+ // Get the playback rate of |algorithm_|.
+ float GetPlaybackRate();
- private:
- friend class AudioRendererBaseTest;
+ // Convert number of bytes to duration of time using information about the
+ // number of channels, sample rate and sample bits.
+ base::TimeDelta ConvertToDuration(int bytes);
+
+ // Estimate earliest time when current buffer can stop playing.
+ void UpdateEarliestEndTime(int bytes_filled,
+ base::TimeDelta request_delay,
+ base::Time time_now);
+
+ // Methods called on pipeline thread ----------------------------------------
+ void DoPlay();
+ void DoPause();
+ void DoSeek();
+
+ // media::AudioRendererSink::RenderCallback implementation.
+ virtual int Render(const std::vector<float*>& audio_data,
+ int number_of_frames,
+ int audio_delay_milliseconds) OVERRIDE;
+ virtual void OnRenderError() OVERRIDE;
// Helper method that schedules an asynchronous read from the decoder and
// increments |pending_reads_|.
@@ -148,9 +155,9 @@ class MEDIA_EXPORT AudioRendererBase : public AudioRenderer {
bool received_end_of_stream_;
bool rendered_end_of_stream_;
- // Audio time at end of last call to FillBuffer().
+ // The timestamp of the last frame (i.e. furthest in the future) buffered.
// TODO(ralphl): Update this value after seeking.
- base::TimeDelta last_fill_buffer_time_;
+ base::TimeDelta audio_time_buffered_;
// Filter callbacks.
base::Closure pause_cb_;
@@ -164,6 +171,36 @@ class MEDIA_EXPORT AudioRendererBase : public AudioRenderer {
uint32 bytes_per_frame_;
+ // Used to calculate audio delay given bytes.
+ uint32 bytes_per_second_;
+
+ // A flag that indicates this filter is called to stop.
+ bool stopped_;
+
+ // The sink (destination) for rendered audio.
+ scoped_refptr<media::AudioRendererSink> sink_;
+
+ // Set to true when OnInitialize() is called.
+ bool is_initialized_;
+
+ // We're supposed to know amount of audio data OS or hardware buffered, but
+ // that is not always so -- on my Linux box
+ // AudioBuffersState::hardware_delay_bytes never reaches 0.
+ //
+ // As a result we cannot use it to find when stream ends. If we just ignore
+ // buffered data we will notify host that stream ended before it is actually
+ // did so, I've seen it done ~140ms too early when playing ~150ms file.
+ //
+ // Instead of trying to invent OS-specific solution for each and every OS we
+ // are supporting, use simple workaround: every time we fill the buffer we
+ // remember when it should stop playing, and do not assume that buffer is
+ // empty till that time. Workaround is not bulletproof, as we don't exactly
+ // know when that particular data would start playing, but it is much better
+ // than nothing.
+ base::Time earliest_end_time_;
+
+ AudioParameters audio_parameters_;
+
AudioDecoder::ReadCB read_cb_;
DISALLOW_COPY_AND_ASSIGN(AudioRendererBase);
diff --git a/media/filters/audio_renderer_base_unittest.cc b/media/filters/audio_renderer_base_unittest.cc
index 9880e5c..ee756f1 100644
--- a/media/filters/audio_renderer_base_unittest.cc
+++ b/media/filters/audio_renderer_base_unittest.cc
@@ -16,10 +16,26 @@ using ::testing::_;
using ::testing::AnyNumber;
using ::testing::Invoke;
using ::testing::Return;
-using ::testing::ReturnPointee;
-using ::testing::SaveArg;
+using ::testing::NiceMock;
using ::testing::StrictMock;
+namespace {
+
+class MockAudioSink : public media::AudioRendererSink {
+ public:
+ MOCK_METHOD2(Initialize, void(const media::AudioParameters& params,
+ RenderCallback* callback));
+ MOCK_METHOD0(Start, void());
+ MOCK_METHOD0(Stop, void());
+ MOCK_METHOD1(Pause, void(bool flush));
+ MOCK_METHOD0(Play, void());
+ MOCK_METHOD1(SetPlaybackRate, void(float rate));
+ MOCK_METHOD1(SetVolume, bool(double volume));
+ MOCK_METHOD1(GetVolume, void(double* volume));
+};
+
+} // namespace
+
namespace media {
// Constants for distinguishing between muted audio and playing audio when using
@@ -27,30 +43,11 @@ namespace media {
static uint8 kMutedAudio = 0x00;
static uint8 kPlayingAudio = 0x99;
-// Mocked subclass of AudioRendererBase for testing purposes.
-class MockAudioRendererBase : public AudioRendererBase {
- public:
- MockAudioRendererBase()
- : AudioRendererBase() {}
- virtual ~MockAudioRendererBase() {}
-
- // AudioRenderer implementation.
- MOCK_METHOD1(SetVolume, void(float volume));
-
- // AudioRendererBase implementation.
- MOCK_METHOD3(OnInitialize, bool(int, ChannelLayout, int));
- MOCK_METHOD0(OnStop, void());
- MOCK_METHOD0(OnRenderEndOfStream, void());
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MockAudioRendererBase);
-};
-
class AudioRendererBaseTest : public ::testing::Test {
public:
// Give the decoder some non-garbage media properties.
AudioRendererBaseTest()
- : renderer_(new MockAudioRendererBase()),
+ : renderer_(new AudioRendererBase(new NiceMock<MockAudioSink>())),
decoder_(new MockAudioDecoder()) {
renderer_->set_host(&host_);
@@ -59,13 +56,7 @@ class AudioRendererBaseTest : public ::testing::Test {
.WillByDefault(Invoke(this, &AudioRendererBaseTest::SaveReadCallback));
// Set up audio properties.
- ON_CALL(*decoder_, bits_per_channel())
- .WillByDefault(Return(16));
- ON_CALL(*decoder_, channel_layout())
- .WillByDefault(Return(CHANNEL_LAYOUT_MONO));
- ON_CALL(*decoder_, samples_per_second())
- .WillByDefault(Return(44100));
-
+ SetSupportedAudioDecoderProperties();
EXPECT_CALL(*decoder_, bits_per_channel())
.Times(AnyNumber());
EXPECT_CALL(*decoder_, channel_layout())
@@ -75,7 +66,6 @@ class AudioRendererBaseTest : public ::testing::Test {
}
virtual ~AudioRendererBaseTest() {
- EXPECT_CALL(*renderer_, OnStop());
renderer_->Stop(NewExpectedClosure());
}
@@ -91,6 +81,24 @@ class AudioRendererBaseTest : public ::testing::Test {
base::Unretained(this));
}
+ void SetSupportedAudioDecoderProperties() {
+ ON_CALL(*decoder_, bits_per_channel())
+ .WillByDefault(Return(16));
+ ON_CALL(*decoder_, channel_layout())
+ .WillByDefault(Return(CHANNEL_LAYOUT_MONO));
+ ON_CALL(*decoder_, samples_per_second())
+ .WillByDefault(Return(44100));
+ }
+
+ void SetUnsupportedAudioDecoderProperties() {
+ ON_CALL(*decoder_, bits_per_channel())
+ .WillByDefault(Return(3));
+ ON_CALL(*decoder_, channel_layout())
+ .WillByDefault(Return(CHANNEL_LAYOUT_UNSUPPORTED));
+ ON_CALL(*decoder_, samples_per_second())
+ .WillByDefault(Return(0));
+ }
+
void OnAudioTimeCallback(
base::TimeDelta current_time, base::TimeDelta max_time) {
CHECK(current_time <= max_time);
@@ -102,8 +110,6 @@ class AudioRendererBaseTest : public ::testing::Test {
}
void Initialize() {
- EXPECT_CALL(*renderer_, OnInitialize(_, _, _))
- .WillOnce(Return(true));
renderer_->Initialize(
decoder_, NewExpectedStatusCB(PIPELINE_OK), NewUnderflowClosure(),
NewAudioTimeClosure());
@@ -218,7 +224,7 @@ class AudioRendererBaseTest : public ::testing::Test {
}
// Fixture members.
- scoped_refptr<MockAudioRendererBase> renderer_;
+ scoped_refptr<AudioRendererBase> renderer_;
scoped_refptr<MockAudioDecoder> decoder_;
StrictMock<MockFilterHost> host_;
AudioDecoder::ReadCB read_cb_;
@@ -234,8 +240,7 @@ class AudioRendererBaseTest : public ::testing::Test {
};
TEST_F(AudioRendererBaseTest, Initialize_Failed) {
- EXPECT_CALL(*renderer_, OnInitialize(_, _, _))
- .WillOnce(Return(false));
+ SetUnsupportedAudioDecoderProperties();
renderer_->Initialize(
decoder_,
NewExpectedStatusCB(PIPELINE_ERROR_INITIALIZATION_FAILED),
@@ -246,8 +251,6 @@ TEST_F(AudioRendererBaseTest, Initialize_Failed) {
}
TEST_F(AudioRendererBaseTest, Initialize_Successful) {
- EXPECT_CALL(*renderer_, OnInitialize(_, _, _))
- .WillOnce(Return(true));
renderer_->Initialize(decoder_, NewExpectedStatusCB(PIPELINE_OK),
NewUnderflowClosure(), NewAudioTimeClosure());
@@ -285,8 +288,6 @@ TEST_F(AudioRendererBaseTest, EndOfStream) {
EXPECT_FALSE(renderer_->HasEnded());
// Drain internal buffer, now we should report ended.
- EXPECT_CALL(*renderer_, OnRenderEndOfStream())
- .WillOnce(Invoke(renderer_.get(), &AudioRendererBase::SignalEndOfStream));
EXPECT_CALL(host_, NotifyEnded());
EXPECT_TRUE(ConsumeBufferedData(bytes_buffered(), NULL));
EXPECT_TRUE(renderer_->HasEnded());
@@ -367,8 +368,6 @@ TEST_F(AudioRendererBaseTest, Underflow_EndOfStream) {
// stop reading after receiving an end of stream buffer. It should have also
// called NotifyEnded() http://crbug.com/106641
DeliverEndOfStream();
- EXPECT_CALL(*renderer_, OnRenderEndOfStream())
- .WillOnce(Invoke(renderer_.get(), &AudioRendererBase::SignalEndOfStream));
EXPECT_CALL(host_, NotifyEnded());
EXPECT_CALL(host_, GetTime()).WillOnce(Return(base::TimeDelta()));
diff --git a/media/filters/null_audio_renderer.cc b/media/filters/null_audio_renderer.cc
deleted file mode 100644
index f7ee27c..0000000
--- a/media/filters/null_audio_renderer.cc
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <algorithm>
-#include <cmath>
-
-#include "base/bind.h"
-#include "base/logging.h"
-#include "base/threading/platform_thread.h"
-#include "media/base/filter_host.h"
-#include "media/filters/null_audio_renderer.h"
-
-namespace media {
-
-// How "long" our buffer should be in terms of milliseconds. In OnInitialize
-// we calculate the size of one second of audio data and use this number to
-// allocate a buffer to pass to FillBuffer.
-static const size_t kBufferSizeInMilliseconds = 100;
-
-NullAudioRenderer::NullAudioRenderer()
- : AudioRendererBase(),
- bytes_per_millisecond_(0),
- buffer_size_(0),
- bytes_per_frame_(0),
- thread_("AudioThread") {
-}
-
-NullAudioRenderer::~NullAudioRenderer() {
- DCHECK(!thread_.IsRunning());
-}
-
-void NullAudioRenderer::SetVolume(float volume) {
- // Do nothing.
-}
-
-bool NullAudioRenderer::OnInitialize(int bits_per_channel,
- ChannelLayout channel_layout,
- int sample_rate) {
- // Calculate our bytes per millisecond value and allocate our buffer.
- int channels = ChannelLayoutToChannelCount(channel_layout);
- int bytes_per_channel = bits_per_channel / 8;
- bytes_per_frame_ = channels * bytes_per_channel;
-
- bytes_per_millisecond_ = (bytes_per_frame_ * sample_rate) /
- base::Time::kMillisecondsPerSecond;
-
- buffer_size_ =
- bytes_per_millisecond_ * kBufferSizeInMilliseconds;
-
- buffer_.reset(new uint8[buffer_size_]);
- DCHECK(buffer_.get());
-
- if (!thread_.Start())
- return false;
-
- thread_.message_loop()->PostTask(FROM_HERE, base::Bind(
- &NullAudioRenderer::FillBufferTask, this));
- return true;
-}
-
-void NullAudioRenderer::OnStop() {
- thread_.Stop();
-}
-
-void NullAudioRenderer::FillBufferTask() {
- base::TimeDelta delay;
-
- // Only consume buffers when actually playing.
- if (GetPlaybackRate() > 0.0f) {
- size_t requested_frames = buffer_size_ / bytes_per_frame_;
- size_t frames = FillBuffer(
- buffer_.get(), requested_frames, base::TimeDelta());
- size_t bytes = frames * bytes_per_frame_;
-
- // Calculate our sleep duration, taking playback rate into consideration.
- delay = base::TimeDelta::FromMilliseconds(
- bytes / (bytes_per_millisecond_ * GetPlaybackRate()));
- } else {
- // If paused, sleep for 10 milliseconds before polling again.
- delay = base::TimeDelta::FromMilliseconds(10);
- }
-
- // Sleep for at least one millisecond so we don't spin the CPU.
- MessageLoop::current()->PostDelayedTask(
- FROM_HERE,
- base::Bind(&NullAudioRenderer::FillBufferTask, this),
- std::max(delay, base::TimeDelta::FromMilliseconds(1)));
-}
-
-void NullAudioRenderer::OnRenderEndOfStream() {
- SignalEndOfStream();
-}
-
-} // namespace media
diff --git a/media/filters/null_audio_renderer.h b/media/filters/null_audio_renderer.h
deleted file mode 100644
index 03500ca..0000000
--- a/media/filters/null_audio_renderer.h
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef MEDIA_FILTERS_NULL_AUDIO_RENDERER_H_
-#define MEDIA_FILTERS_NULL_AUDIO_RENDERER_H_
-
-// NullAudioRenderer effectively uses an extra thread to "throw away" the
-// audio data at a rate resembling normal playback speed. It's just like
-// decoding to /dev/null!
-//
-// NullAudioRenderer can also be used in situations where the client has no
-// audio device or we haven't written an audio implementation for a particular
-// platform yet.
-
-#include <deque>
-
-#include "base/memory/scoped_ptr.h"
-#include "base/threading/thread.h"
-#include "media/base/buffers.h"
-#include "media/base/filters.h"
-#include "media/filters/audio_renderer_base.h"
-
-namespace media {
-
-class MEDIA_EXPORT NullAudioRenderer : public AudioRendererBase {
- public:
- NullAudioRenderer();
- virtual ~NullAudioRenderer();
-
- // AudioRenderer implementation.
- virtual void SetVolume(float volume) OVERRIDE;
-
- protected:
- // AudioRendererBase implementation.
- virtual bool OnInitialize(int bits_per_channel,
- ChannelLayout channel_layout,
- int sample_rate) OVERRIDE;
- virtual void OnStop() OVERRIDE;
- virtual void OnRenderEndOfStream() OVERRIDE;
-
- private:
- // Audio thread task that periodically calls FillBuffer() to consume
- // audio data.
- void FillBufferTask();
-
- // A number to convert bytes written in FillBuffer to milliseconds based on
- // the audio format.
- size_t bytes_per_millisecond_;
-
- // A buffer passed to FillBuffer to advance playback.
- scoped_array<uint8> buffer_;
- size_t buffer_size_;
-
- size_t bytes_per_frame_;
-
- // Separate thread used to throw away data.
- base::Thread thread_;
-
- DISALLOW_COPY_AND_ASSIGN(NullAudioRenderer);
-};
-
-} // namespace media
-
-#endif // MEDIA_FILTERS_NULL_AUDIO_RENDERER_H_
diff --git a/media/filters/pipeline_integration_test_base.cc b/media/filters/pipeline_integration_test_base.cc
index 6fcead0..fb348f1 100644
--- a/media/filters/pipeline_integration_test_base.cc
+++ b/media/filters/pipeline_integration_test_base.cc
@@ -6,12 +6,13 @@
#include "base/bind.h"
#include "media/base/media_log.h"
+#include "media/audio/null_audio_sink.h"
+#include "media/filters/audio_renderer_base.h"
#include "media/filters/chunk_demuxer.h"
#include "media/filters/ffmpeg_audio_decoder.h"
#include "media/filters/ffmpeg_demuxer.h"
#include "media/filters/ffmpeg_video_decoder.h"
#include "media/filters/file_data_source.h"
-#include "media/filters/null_audio_renderer.h"
using ::testing::AnyNumber;
@@ -177,7 +178,7 @@ PipelineIntegrationTestBase::CreateFilterCollection(
base::Unretained(this)),
false);
collection->AddVideoRenderer(renderer_);
- collection->AddAudioRenderer(new NullAudioRenderer());
+ collection->AddAudioRenderer(new AudioRendererBase(new NullAudioSink()));
return collection.Pass();
}
diff --git a/media/media.gyp b/media/media.gyp
index 47d1337..7b5ca06 100644
--- a/media/media.gyp
+++ b/media/media.gyp
@@ -93,6 +93,8 @@
'audio/mac/audio_manager_mac.h',
'audio/mac/audio_output_mac.cc',
'audio/mac/audio_output_mac.h',
+ 'audio/null_audio_sink.cc',
+ 'audio/null_audio_sink.h',
'audio/pulse/pulse_output.cc',
'audio/pulse/pulse_output.h',
'audio/simple_sources.cc',
@@ -209,8 +211,6 @@
'filters/gpu_video_decoder.h',
'filters/in_memory_url_protocol.cc',
'filters/in_memory_url_protocol.h',
- 'filters/null_audio_renderer.cc',
- 'filters/null_audio_renderer.h',
'filters/video_frame_generator.cc',
'filters/video_frame_generator.h',
'filters/video_renderer_base.cc',
diff --git a/media/tools/player_wtl/movie.cc b/media/tools/player_wtl/movie.cc
index 781f377..110acc0 100644
--- a/media/tools/player_wtl/movie.cc
+++ b/media/tools/player_wtl/movie.cc
@@ -9,15 +9,16 @@
#include "base/threading/platform_thread.h"
#include "base/utf_string_conversions.h"
#include "media/audio/audio_manager.h"
+#include "media/audio/null_audio_sink.h"
#include "media/base/filter_collection.h"
#include "media/base/media_log.h"
#include "media/base/message_loop_factory.h"
#include "media/base/pipeline.h"
+#include "media/filters/audio_renderer_base.h"
#include "media/filters/ffmpeg_audio_decoder.h"
#include "media/filters/ffmpeg_demuxer.h"
#include "media/filters/ffmpeg_video_decoder.h"
#include "media/filters/file_data_source.h"
-#include "media/filters/null_audio_renderer.h"
#include "media/filters/video_renderer_base.h"
namespace media {
@@ -83,7 +84,8 @@ bool Movie::Open(const wchar_t* url, VideoRendererBase* video_renderer) {
"VideoDecoderThread")));
// TODO(vrk): Re-enabled audio. (crbug.com/112159)
- collection->AddAudioRenderer(new media::NullAudioRenderer());
+ collection->AddAudioRenderer(
+ new media::AudioRendererBase(new media::NullAudioSink()));
collection->AddVideoRenderer(video_renderer);
// Create and start our pipeline.
diff --git a/media/tools/player_x11/player_x11.cc b/media/tools/player_x11/player_x11.cc
index ce2ff07..b22e696 100644
--- a/media/tools/player_x11/player_x11.cc
+++ b/media/tools/player_x11/player_x11.cc
@@ -16,6 +16,7 @@
#include "base/threading/platform_thread.h"
#include "base/threading/thread.h"
#include "media/audio/audio_manager.h"
+#include "media/audio/null_audio_sink.h"
#include "media/base/filter_collection.h"
#include "media/base/media.h"
#include "media/base/media_log.h"
@@ -23,11 +24,11 @@
#include "media/base/message_loop_factory.h"
#include "media/base/pipeline.h"
#include "media/base/video_frame.h"
+#include "media/filters/audio_renderer_base.h"
#include "media/filters/ffmpeg_audio_decoder.h"
#include "media/filters/ffmpeg_demuxer.h"
#include "media/filters/ffmpeg_video_decoder.h"
#include "media/filters/file_data_source.h"
-#include "media/filters/null_audio_renderer.h"
#include "media/filters/video_renderer_base.h"
#include "media/tools/player_x11/data_source_logger.h"
#include "media/tools/player_x11/gl_video_renderer.h"
@@ -127,7 +128,8 @@ bool InitPipeline(MessageLoop* message_loop,
true);
collection->AddVideoRenderer(g_video_renderer);
- collection->AddAudioRenderer(new media::NullAudioRenderer());
+ collection->AddAudioRenderer(
+ new media::AudioRendererBase(new media::NullAudioSink()));
// Create the pipeline and start it.
*pipeline = new media::Pipeline(message_loop, new media::MediaLog());