summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorqinmin <qinmin@chromium.org>2015-06-23 16:17:20 -0700
committerCommit bot <commit-bot@chromium.org>2015-06-23 23:17:46 +0000
commitbda540eb1cbfd36dda3eec0206913331ca1ab822 (patch)
treee97775e65e85e0ddefe72130a04d5a93b64b7717
parent223cb85203552633bc746564e63ba0b5105178ae (diff)
downloadchromium_src-bda540eb1cbfd36dda3eec0206913331ca1ab822.zip
chromium_src-bda540eb1cbfd36dda3eec0206913331ca1ab822.tar.gz
chromium_src-bda540eb1cbfd36dda3eec0206913331ca1ab822.tar.bz2
Add a silent audio sink to consume WebAudio data on silence detection.
The current android implementation of WebAudio is not power friendly. It holds the AudioMix wakelock, and causes a lot of battery consumption even if tab is backgrounded. When an AudioContext is created, WebAudio will start enqueueing data to the output device. This happens even when no data is decoded or no audio buffer is appended. This CL adds a SilentAudioSink to consume data when consecutive empty audio buffers are received. In the idle mode, the player will no longer enqueue data to the output device. It regularly check the received data and exits the idle mode if non-empty data are encountered. The intervals to check the data is calculated by the consumption time of the last received buffer. BUG=470153 Review URL: https://codereview.chromium.org/1195633003 Cr-Commit-Position: refs/heads/master@{#335799}
-rw-r--r--content/renderer/media/renderer_webaudiodevice_impl.cc94
-rw-r--r--content/renderer/media/renderer_webaudiodevice_impl.h30
-rw-r--r--media/base/audio_bus.cc10
-rw-r--r--media/base/audio_bus.h3
-rw-r--r--media/base/audio_bus_unittest.cc4
5 files changed, 123 insertions, 18 deletions
diff --git a/content/renderer/media/renderer_webaudiodevice_impl.cc b/content/renderer/media/renderer_webaudiodevice_impl.cc
index 81817f7..5b697db 100644
--- a/content/renderer/media/renderer_webaudiodevice_impl.cc
+++ b/content/renderer/media/renderer_webaudiodevice_impl.cc
@@ -6,9 +6,13 @@
#include "base/command_line.h"
#include "base/logging.h"
+#include "base/single_thread_task_runner.h"
+#include "base/thread_task_runner_handle.h"
+#include "base/time/time.h"
#include "content/renderer/media/audio_device_factory.h"
#include "content/renderer/render_frame_impl.h"
#include "media/audio/audio_output_device.h"
+#include "media/audio/null_audio_sink.h"
#include "media/base/media_switches.h"
#include "third_party/WebKit/public/web/WebLocalFrame.h"
#include "third_party/WebKit/public/web/WebView.h"
@@ -20,24 +24,34 @@ using blink::WebView;
namespace content {
+#if defined(OS_ANDROID)
+static const int kSilenceInSecondsToEnterIdleMode = 30;
+#endif
+
RendererWebAudioDeviceImpl::RendererWebAudioDeviceImpl(
const media::AudioParameters& params,
WebAudioDevice::RenderCallback* callback,
int session_id)
: params_(params),
client_callback_(callback),
- session_id_(session_id) {
+ session_id_(session_id),
+ task_runner_(base::ThreadTaskRunnerHandle::Get()),
+ null_audio_sink_(new media::NullAudioSink(task_runner_)),
+ is_using_null_audio_sink_(false),
+ first_buffer_after_silence_(media::AudioBus::Create(params_)),
+ is_first_buffer_after_silence_(false) {
DCHECK(client_callback_);
+ null_audio_sink_->Initialize(params_, this);
}
RendererWebAudioDeviceImpl::~RendererWebAudioDeviceImpl() {
- DCHECK(!output_device_.get());
+ DCHECK(!output_device_);
}
void RendererWebAudioDeviceImpl::start() {
DCHECK(thread_checker_.CalledOnValidThread());
- if (output_device_.get())
+ if (output_device_)
return; // Already started.
// Assumption: This method is being invoked within a V8 call stack. CHECKs
@@ -54,16 +68,22 @@ void RendererWebAudioDeviceImpl::start() {
render_frame ? render_frame->GetRoutingID(): MSG_ROUTING_NONE);
output_device_->InitializeWithSessionId(params_, this, session_id_);
output_device_->Start();
+ start_null_audio_sink_callback_.Reset(
+ base::Bind(&media::NullAudioSink::Play, null_audio_sink_));
// Note: Default behavior is to auto-play on start.
}
void RendererWebAudioDeviceImpl::stop() {
DCHECK(thread_checker_.CalledOnValidThread());
- if (output_device_.get()) {
+ if (output_device_) {
output_device_->Stop();
output_device_ = NULL;
}
+ null_audio_sink_->Stop();
+ is_using_null_audio_sink_ = false;
+ is_first_buffer_after_silence_ = false;
+ start_null_audio_sink_callback_.Cancel();
}
double RendererWebAudioDeviceImpl::sampleRate() {
@@ -72,21 +92,59 @@ double RendererWebAudioDeviceImpl::sampleRate() {
int RendererWebAudioDeviceImpl::Render(media::AudioBus* dest,
int audio_delay_milliseconds) {
- if (client_callback_) {
- // Wrap the output pointers using WebVector.
- WebVector<float*> web_audio_dest_data(
- static_cast<size_t>(dest->channels()));
- for (int i = 0; i < dest->channels(); ++i)
- web_audio_dest_data[i] = dest->channel(i);
-
- // TODO(xians): Remove the following |web_audio_source_data| after
- // changing the blink interface.
- WebVector<float*> web_audio_source_data(static_cast<size_t>(0));
- client_callback_->render(web_audio_source_data,
- web_audio_dest_data,
- dest->frames());
+#if defined(OS_ANDROID)
+ if (is_first_buffer_after_silence_) {
+ DCHECK(!is_using_null_audio_sink_);
+ first_buffer_after_silence_->CopyTo(dest);
+ is_first_buffer_after_silence_ = false;
+ return dest->frames();
}
-
+#endif
+ // Wrap the output pointers using WebVector.
+ WebVector<float*> web_audio_dest_data(
+ static_cast<size_t>(dest->channels()));
+ for (int i = 0; i < dest->channels(); ++i)
+ web_audio_dest_data[i] = dest->channel(i);
+
+ // TODO(xians): Remove the following |web_audio_source_data| after
+ // changing the blink interface.
+ WebVector<float*> web_audio_source_data(static_cast<size_t>(0));
+ client_callback_->render(web_audio_source_data,
+ web_audio_dest_data,
+ dest->frames());
+
+#if defined(OS_ANDROID)
+ const bool is_zero = dest->AreFramesZero();
+ if (!is_zero) {
+ first_silence_time_ = base::TimeTicks();
+ if (is_using_null_audio_sink_) {
+ // This is called on the main render thread when audio is detected.
+ output_device_->Play();
+ is_using_null_audio_sink_ = false;
+ is_first_buffer_after_silence_ = true;
+ dest->CopyTo(first_buffer_after_silence_.get());
+ task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&media::NullAudioSink::Stop, null_audio_sink_));
+ }
+ } else if (!is_using_null_audio_sink_) {
+ // Called on the audio device thread.
+ const base::TimeTicks now = base::TimeTicks::Now();
+ if (first_silence_time_.is_null())
+ first_silence_time_ = now;
+ if (now - first_silence_time_
+ > base::TimeDelta::FromSeconds(kSilenceInSecondsToEnterIdleMode)) {
+ output_device_->Pause();
+ is_using_null_audio_sink_ = true;
+ // If Stop() is called right after the task is posted, need to cancel
+ // this task.
+ task_runner_->PostDelayedTask(
+ FROM_HERE,
+ start_null_audio_sink_callback_.callback(),
+ params_.GetBufferDuration());
+ }
+ }
+#endif
return dest->frames();
}
diff --git a/content/renderer/media/renderer_webaudiodevice_impl.h b/content/renderer/media/renderer_webaudiodevice_impl.h
index 13ce0f1..3f4e20e 100644
--- a/content/renderer/media/renderer_webaudiodevice_impl.h
+++ b/content/renderer/media/renderer_webaudiodevice_impl.h
@@ -5,6 +5,7 @@
#ifndef CONTENT_RENDERER_MEDIA_RENDERER_WEBAUDIODEVICE_IMPL_H_
#define CONTENT_RENDERER_MEDIA_RENDERER_WEBAUDIODEVICE_IMPL_H_
+#include "base/cancelable_callback.h"
#include "base/memory/ref_counted.h"
#include "base/threading/thread_checker.h"
#include "media/audio/audio_parameters.h"
@@ -12,8 +13,13 @@
#include "third_party/WebKit/public/platform/WebAudioDevice.h"
#include "third_party/WebKit/public/platform/WebVector.h"
+namespace base {
+class SingleThreadTaskRunner;
+}
+
namespace media {
class AudioOutputDevice;
+class NullAudioSink;
}
namespace content {
@@ -53,6 +59,30 @@ class RendererWebAudioDeviceImpl
// ID to allow browser to select the correct input device for unified IO.
int session_id_;
+ // Timeticks when the silence starts.
+ base::TimeTicks first_silence_time_ ;
+
+ // TaskRunner to post callbacks to the render thread.
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+
+ // A fake audio sink object that consumes data when long period of silence
+ // audio is detected. This object lives on the render thread.
+ scoped_refptr<media::NullAudioSink> null_audio_sink_;
+
+ // Whether audio output is directed to |null_audio_sink_|.
+ bool is_using_null_audio_sink_;
+
+ // First audio buffer after silence finishes. We store this buffer so that
+ // it can be sent to the |output_device_| later after switching from
+ // |null_audio_sink_|.
+ scoped_ptr<media::AudioBus> first_buffer_after_silence_;
+
+ bool is_first_buffer_after_silence_;
+
+ // A cancelable task that is posted to start the |null_audio_sink_| after a
+ // period of silence. We do this on android to save battery consumption.
+ base::CancelableClosure start_null_audio_sink_callback_;
+
DISALLOW_COPY_AND_ASSIGN(RendererWebAudioDeviceImpl);
};
diff --git a/media/base/audio_bus.cc b/media/base/audio_bus.cc
index 719dbcd..2e34f1e 100644
--- a/media/base/audio_bus.cc
+++ b/media/base/audio_bus.cc
@@ -218,6 +218,16 @@ void AudioBus::Zero() {
ZeroFrames(frames_);
}
+bool AudioBus::AreFramesZero() const {
+ for (size_t i = 0; i < channel_data_.size(); ++i) {
+ for (int j = 0; j < frames_; ++j) {
+ if (channel_data_[i][j])
+ return false;
+ }
+ }
+ return true;
+}
+
int AudioBus::CalculateMemorySize(const AudioParameters& params) {
return CalculateMemorySizeInternal(
params.channels(), params.frames_per_buffer(), NULL);
diff --git a/media/base/audio_bus.h b/media/base/audio_bus.h
index 25e2839..cf598c2 100644
--- a/media/base/audio_bus.h
+++ b/media/base/audio_bus.h
@@ -101,6 +101,9 @@ class MEDIA_EXPORT AudioBus {
void ZeroFrames(int frames);
void ZeroFramesPartial(int start_frame, int frames);
+ // Checks if all frames are zero.
+ bool AreFramesZero() const;
+
// Scale internal channel values by |volume| >= 0. If an invalid value
// is provided, no adjustment is done.
void Scale(float volume);
diff --git a/media/base/audio_bus_unittest.cc b/media/base/audio_bus_unittest.cc
index 85d6aa3..ea06f0a 100644
--- a/media/base/audio_bus_unittest.cc
+++ b/media/base/audio_bus_unittest.cc
@@ -205,6 +205,7 @@ TEST_F(AudioBusTest, Zero) {
// Fill the bus with dummy data.
for (int i = 0; i < bus->channels(); ++i)
std::fill(bus->channel(i), bus->channel(i) + bus->frames(), i + 1);
+ EXPECT_FALSE(bus->AreFramesZero());
// Zero first half the frames of each channel.
bus->ZeroFrames(kFrameCount / 2);
@@ -214,6 +215,7 @@ TEST_F(AudioBusTest, Zero) {
VerifyValue(bus->channel(i) + kFrameCount / 2,
kFrameCount - kFrameCount / 2, i + 1);
}
+ EXPECT_FALSE(bus->AreFramesZero());
// Fill the bus with dummy data.
for (int i = 0; i < bus->channels(); ++i)
@@ -227,6 +229,7 @@ TEST_F(AudioBusTest, Zero) {
kFrameCount - kFrameCount / 2, 0);
VerifyValue(bus->channel(i), kFrameCount / 2, i + 1);
}
+ EXPECT_FALSE(bus->AreFramesZero());
// Fill the bus with dummy data.
for (int i = 0; i < bus->channels(); ++i)
@@ -238,6 +241,7 @@ TEST_F(AudioBusTest, Zero) {
SCOPED_TRACE("All Zero");
VerifyValue(bus->channel(i), bus->frames(), 0);
}
+ EXPECT_TRUE(bus->AreFramesZero());
}
// Each test vector represents two channels of data in the following arbitrary