summaryrefslogtreecommitdiffstats
path: root/content/renderer
diff options
context:
space:
mode:
authorajose <ajose@chromium.org>2015-11-17 10:03:27 -0800
committerCommit bot <commit-bot@chromium.org>2015-11-17 18:04:10 +0000
commit9b6d878b661f617cdbfc027a8c9b9b444aab948c (patch)
treec87d69178f008c8f0a9b29674272ff7320d37572 /content/renderer
parent4a441685857470500cf8cfa6df2ec63a642cdc56 (diff)
downloadchromium_src-9b6d878b661f617cdbfc027a8c9b9b444aab948c.zip
chromium_src-9b6d878b661f617cdbfc027a8c9b9b444aab948c.tar.gz
chromium_src-9b6d878b661f617cdbfc027a8c9b9b444aab948c.tar.bz2
Add AudioTrackRecorder for audio component of MediaStream recording.
This is the first of ~three CLs for adding the audio component of MediaStreamRecording. These CLs will be: 1) Add AudioTrackRecorder, a MediaStreamAudioSink which will be owned by MediaRecorderHandler 2) Update WebmMuxer to mux Opus output of AudioTrackRecorder 3) Update MediaRecorderHandler to use AudioTrackRecorder BUG=528519 Review URL: https://codereview.chromium.org/1406113002 Cr-Commit-Position: refs/heads/master@{#360108}
Diffstat (limited to 'content/renderer')
-rw-r--r--content/renderer/media/DEPS1
-rw-r--r--content/renderer/media/audio_track_recorder.cc312
-rw-r--r--content/renderer/media/audio_track_recorder.h85
-rw-r--r--content/renderer/media/audio_track_recorder_unittest.cc253
4 files changed, 651 insertions, 0 deletions
diff --git a/content/renderer/media/DEPS b/content/renderer/media/DEPS
index 19fad4b..c7f6f64 100644
--- a/content/renderer/media/DEPS
+++ b/content/renderer/media/DEPS
@@ -3,5 +3,6 @@ include_rules = [
# For video copying, cropping and scaling.
# TODO(wuchengli): remove this when RTCVideoEncoder supports zero copy.
"+third_party/libyuv",
+ '+third_party/opus',
'+third_party/webrtc_overrides',
]
diff --git a/content/renderer/media/audio_track_recorder.cc b/content/renderer/media/audio_track_recorder.cc
new file mode 100644
index 0000000..006bcf3
--- /dev/null
+++ b/content/renderer/media/audio_track_recorder.cc
@@ -0,0 +1,312 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/audio_track_recorder.h"
+
+#include "base/bind.h"
+#include "base/stl_util.h"
+#include "media/audio/audio_parameters.h"
+#include "media/base/audio_bus.h"
+#include "media/base/bind_to_current_loop.h"
+#include "third_party/opus/src/include/opus.h"
+
+// Note that this code follows the Chrome media convention of defining a "frame"
+// as "one multi-channel sample" as opposed to another common definition
+// meaning "a chunk of samples". Here this second definition of "frame" is
+// called a "buffer"; so what might be called "frame duration" is instead
+// "buffer duration", and so on.
+
+namespace content {
+
+namespace {
+
+enum {
+ // This is the recommended value, according to documentation in
+ // third_party/opus/src/include/opus.h, so that the Opus encoder does not
+ // degrade the audio due to memory constraints.
+ OPUS_MAX_PAYLOAD_SIZE = 4000,
+
+ // Support for max sampling rate of 48KHz, 2 channels, 60 ms duration.
+ MAX_SAMPLES_PER_BUFFER = 48 * 2 * 60,
+};
+
+} // anonymous namespace
+
+// Nested class encapsulating opus-related encoding details.
+// AudioEncoder is created and destroyed on ATR's main thread (usually the
+// main render thread) but otherwise should operate entirely on
+// |encoder_thread_|, which is owned by AudioTrackRecorder. Be sure to delete
+// |encoder_thread_| before deleting the AudioEncoder using it.
+class AudioTrackRecorder::AudioEncoder
+ : public base::RefCountedThreadSafe<AudioEncoder> {
+ public:
+ explicit AudioEncoder(const OnEncodedAudioCB& on_encoded_audio_cb)
+ : on_encoded_audio_cb_(on_encoded_audio_cb), opus_encoder_(nullptr) {
+ // AudioEncoder is constructed on the thread that ATR lives on, but should
+ // operate only on the encoder thread after that. Reset
+ // |encoder_thread_checker_| here, as the next call to CalledOnValidThread()
+ // will be from the encoder thread.
+ encoder_thread_checker_.DetachFromThread();
+ }
+
+ void OnSetFormat(const media::AudioParameters& params);
+
+ void EncodeAudio(scoped_ptr<media::AudioBus> audio_bus,
+ const base::TimeTicks& capture_time);
+
+ private:
+ friend class base::RefCountedThreadSafe<AudioEncoder>;
+
+ ~AudioEncoder();
+
+ bool is_initialized() const { return !!opus_encoder_; }
+
+ void DestroyExistingOpusEncoder();
+
+ void TransferSamplesIntoBuffer(const media::AudioBus* audio_bus,
+ int source_offset,
+ int buffer_fill_offset,
+ int num_samples);
+ bool EncodeFromFilledBuffer(std::string* out);
+
+ const OnEncodedAudioCB on_encoded_audio_cb_;
+
+ base::ThreadChecker encoder_thread_checker_;
+
+ // In the case where a call to EncodeAudio() cannot completely fill the
+ // buffer, this points to the position at which to populate data in a later
+ // call.
+ int buffer_fill_end_;
+
+ int frames_per_buffer_;
+
+ // The duration of one set of frames of encoded audio samples.
+ base::TimeDelta buffer_duration_;
+
+ media::AudioParameters audio_params_;
+
+ // Buffer for passing AudioBus data to OpusEncoder.
+ scoped_ptr<float[]> buffer_;
+
+ OpusEncoder* opus_encoder_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioEncoder);
+};
+
+AudioTrackRecorder::AudioEncoder::~AudioEncoder() {
+ // We don't DCHECK that we're on the encoder thread here, as it should have
+ // already been deleted at this point.
+ DestroyExistingOpusEncoder();
+}
+
+void AudioTrackRecorder::AudioEncoder::OnSetFormat(
+ const media::AudioParameters& params) {
+ DCHECK(encoder_thread_checker_.CalledOnValidThread());
+ if (audio_params_.Equals(params))
+ return;
+
+ DestroyExistingOpusEncoder();
+
+ if (!params.IsValid()) {
+ DLOG(ERROR) << "Invalid audio params: " << params.AsHumanReadableString();
+ return;
+ }
+
+ buffer_duration_ = base::TimeDelta::FromMilliseconds(
+ AudioTrackRecorder::GetOpusBufferDuration(params.sample_rate()));
+ if (buffer_duration_ == base::TimeDelta()) {
+ DLOG(ERROR) << "Could not find a valid |buffer_duration| for the given "
+ << "sample rate: " << params.sample_rate();
+ return;
+ }
+
+ frames_per_buffer_ =
+ params.sample_rate() * buffer_duration_.InMilliseconds() / 1000;
+ if (frames_per_buffer_ * params.channels() > MAX_SAMPLES_PER_BUFFER) {
+ DLOG(ERROR) << "Invalid |frames_per_buffer_|: " << frames_per_buffer_;
+ return;
+ }
+
+ // Initialize AudioBus buffer for OpusEncoder.
+ buffer_fill_end_ = 0;
+ buffer_.reset(new float[params.channels() * frames_per_buffer_]);
+
+ // Initialize OpusEncoder.
+ int opus_result;
+ opus_encoder_ = opus_encoder_create(params.sample_rate(), params.channels(),
+ OPUS_APPLICATION_AUDIO, &opus_result);
+ if (opus_result < 0) {
+ DLOG(ERROR) << "Couldn't init opus encoder: " << opus_strerror(opus_result);
+ return;
+ }
+
+ // Note: As of 2013-10-31, the encoder in "auto bitrate" mode would use a
+ // variable bitrate up to 102kbps for 2-channel, 48 kHz audio and a 10 ms
+ // buffer duration. The opus library authors may, of course, adjust this in
+ // later versions.
+ if (opus_encoder_ctl(opus_encoder_, OPUS_SET_BITRATE(OPUS_AUTO)) != OPUS_OK) {
+ DLOG(ERROR) << "Failed to set opus bitrate.";
+ return;
+ }
+
+ audio_params_ = params;
+}
+
+void AudioTrackRecorder::AudioEncoder::EncodeAudio(
+ scoped_ptr<media::AudioBus> audio_bus,
+ const base::TimeTicks& capture_time) {
+ DCHECK(encoder_thread_checker_.CalledOnValidThread());
+ DCHECK_EQ(audio_bus->channels(), audio_params_.channels());
+
+ if (!is_initialized())
+ return;
+
+ base::TimeDelta buffer_fill_duration =
+ buffer_fill_end_ * buffer_duration_ / frames_per_buffer_;
+ base::TimeTicks buffer_capture_time = capture_time - buffer_fill_duration;
+
+ // Encode all audio in |audio_bus| into zero or more packets.
+ int src_pos = 0;
+ while (src_pos < audio_bus->frames()) {
+ const int num_samples_to_xfer = std::min(
+ frames_per_buffer_ - buffer_fill_end_, audio_bus->frames() - src_pos);
+ TransferSamplesIntoBuffer(audio_bus.get(), src_pos, buffer_fill_end_,
+ num_samples_to_xfer);
+ src_pos += num_samples_to_xfer;
+ buffer_fill_end_ += num_samples_to_xfer;
+
+ if (buffer_fill_end_ < frames_per_buffer_)
+ break;
+
+ scoped_ptr<std::string> encoded_data(new std::string());
+ if (EncodeFromFilledBuffer(encoded_data.get())) {
+ on_encoded_audio_cb_.Run(audio_params_, encoded_data.Pass(),
+ buffer_capture_time);
+ }
+
+ // Reset the capture timestamp and internal buffer for next set of frames.
+ buffer_capture_time += buffer_duration_;
+ buffer_fill_end_ = 0;
+ }
+}
+
+void AudioTrackRecorder::AudioEncoder::DestroyExistingOpusEncoder() {
+ // We don't DCHECK that we're on the encoder thread here, as this could be
+ // called from the dtor (main thread) or from OnSetForamt() (render thread);
+ if (opus_encoder_) {
+ opus_encoder_destroy(opus_encoder_);
+ opus_encoder_ = nullptr;
+ }
+}
+
+void AudioTrackRecorder::AudioEncoder::TransferSamplesIntoBuffer(
+ const media::AudioBus* audio_bus,
+ int source_offset,
+ int buffer_fill_offset,
+ int num_samples) {
+ // TODO(ajose): Consider replacing with AudioBus::ToInterleaved().
+ // http://crbug.com/547918
+ DCHECK(encoder_thread_checker_.CalledOnValidThread());
+ DCHECK(is_initialized());
+ // Opus requires channel-interleaved samples in a single array.
+ for (int ch = 0; ch < audio_bus->channels(); ++ch) {
+ const float* src = audio_bus->channel(ch) + source_offset;
+ const float* const src_end = src + num_samples;
+ float* dest =
+ buffer_.get() + buffer_fill_offset * audio_params_.channels() + ch;
+ for (; src < src_end; ++src, dest += audio_params_.channels())
+ *dest = *src;
+ }
+}
+
+bool AudioTrackRecorder::AudioEncoder::EncodeFromFilledBuffer(
+ std::string* out) {
+ DCHECK(encoder_thread_checker_.CalledOnValidThread());
+ DCHECK(is_initialized());
+
+ out->resize(OPUS_MAX_PAYLOAD_SIZE);
+ const opus_int32 result = opus_encode_float(
+ opus_encoder_, buffer_.get(), frames_per_buffer_,
+ reinterpret_cast<uint8*>(string_as_array(out)), OPUS_MAX_PAYLOAD_SIZE);
+ if (result > 1) {
+ // TODO(ajose): Investigate improving this. http://crbug.com/547918
+ out->resize(result);
+ return true;
+ }
+ // If |result| in {0,1}, do nothing; the documentation says that a return
+ // value of zero or one means the packet does not need to be transmitted.
+ // Otherwise, we have an error.
+ DLOG_IF(ERROR, result < 0) << __FUNCTION__
+ << " failed: " << opus_strerror(result);
+ return false;
+}
+
+AudioTrackRecorder::AudioTrackRecorder(
+ const blink::WebMediaStreamTrack& track,
+ const OnEncodedAudioCB& on_encoded_audio_cb)
+ : track_(track),
+ encoder_(new AudioEncoder(media::BindToCurrentLoop(on_encoded_audio_cb))),
+ encoder_thread_("AudioEncoderThread") {
+ DCHECK(main_render_thread_checker_.CalledOnValidThread());
+ DCHECK(!track_.isNull());
+ DCHECK(track_.extraData());
+
+ // Start the |encoder_thread_|. From this point on, |encoder_| should work
+ // only on |encoder_thread_|, as enforced by DCHECKs.
+ DCHECK(!encoder_thread_.IsRunning());
+ encoder_thread_.Start();
+
+ // Connect the source provider to the track as a sink.
+ MediaStreamAudioSink::AddToAudioTrack(this, track_);
+}
+
+AudioTrackRecorder::~AudioTrackRecorder() {
+ DCHECK(main_render_thread_checker_.CalledOnValidThread());
+ MediaStreamAudioSink::RemoveFromAudioTrack(this, track_);
+}
+
+void AudioTrackRecorder::OnSetFormat(const media::AudioParameters& params) {
+ DCHECK(encoder_thread_.IsRunning());
+ // If the source is restarted, might have changed to another capture thread.
+ capture_thread_checker_.DetachFromThread();
+ DCHECK(capture_thread_checker_.CalledOnValidThread());
+
+ encoder_thread_.task_runner()->PostTask(
+ FROM_HERE, base::Bind(&AudioEncoder::OnSetFormat, encoder_, params));
+}
+
+void AudioTrackRecorder::OnData(const media::AudioBus& audio_bus,
+ base::TimeTicks capture_time) {
+ DCHECK(encoder_thread_.IsRunning());
+ DCHECK(capture_thread_checker_.CalledOnValidThread());
+ DCHECK(!capture_time.is_null());
+
+ scoped_ptr<media::AudioBus> audio_data =
+ media::AudioBus::Create(audio_bus.channels(), audio_bus.frames());
+ audio_bus.CopyTo(audio_data.get());
+
+ encoder_thread_.task_runner()->PostTask(
+ FROM_HERE, base::Bind(&AudioEncoder::EncodeAudio, encoder_,
+ base::Passed(&audio_data), capture_time));
+}
+
+int AudioTrackRecorder::GetOpusBufferDuration(int sample_rate) {
+ // Valid buffer durations in millseconds. Note there are other valid
+ // durations for Opus, see https://tools.ietf.org/html/rfc6716#section-2.1.4
+ // Descending order as longer durations can increase compression performance.
+ const std::vector<int> opus_valid_buffer_durations_ms = {60, 40, 20, 10};
+
+ // Search for a duration such that |sample_rate| % |buffers_per_second| == 0,
+ // where |buffers_per_second| = 1000ms / |possible_duration|.
+ for (auto possible_duration : opus_valid_buffer_durations_ms) {
+ if (sample_rate * possible_duration % 1000 == 0) {
+ return possible_duration;
+ }
+ }
+
+ // Otherwise, couldn't find a good duration.
+ return 0;
+}
+
+} // namespace content
diff --git a/content/renderer/media/audio_track_recorder.h b/content/renderer/media/audio_track_recorder.h
new file mode 100644
index 0000000..353331c
--- /dev/null
+++ b/content/renderer/media/audio_track_recorder.h
@@ -0,0 +1,85 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_MEDIA_AUDIO_TRACK_RECORDER_H_
+#define CONTENT_RENDERER_MEDIA_AUDIO_TRACK_RECORDER_H_
+
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_checker.h"
+#include "base/time/time.h"
+#include "content/public/renderer/media_stream_audio_sink.h"
+#include "third_party/WebKit/public/platform/WebMediaStreamTrack.h"
+
+namespace media {
+class AudioBus;
+} // namespace media
+
+namespace content {
+
+// AudioTrackRecorder is a MediaStreamAudioSink that encodes the audio buses
+// received from a Stream Audio Track. The class is constructed on a
+// single thread (the main Render thread) but can recieve MediaStreamAudioSink-
+// related calls on a different "live audio" thread (referred to internally as
+// the "capture thread"). It owns an internal thread to use for encoding, on
+// which lives an AudioEncoder (a private nested class of ATR) with its own
+// threading subtleties, see the implementation file.
+class CONTENT_EXPORT AudioTrackRecorder
+ : NON_EXPORTED_BASE(public MediaStreamAudioSink) {
+ public:
+ using OnEncodedAudioCB =
+ base::Callback<void(const media::AudioParameters& params,
+ scoped_ptr<std::string> encoded_data,
+ base::TimeTicks capture_time)>;
+
+ AudioTrackRecorder(const blink::WebMediaStreamTrack& track,
+ const OnEncodedAudioCB& on_encoded_audio_cb);
+ ~AudioTrackRecorder() override;
+
+ // Implement MediaStreamAudioSink.
+ void OnSetFormat(const media::AudioParameters& params) override;
+ void OnData(const media::AudioBus& audio_bus,
+ base::TimeTicks capture_time) override;
+
+ private:
+ friend class AudioTrackRecorderTest;
+ class AudioParameters;
+
+ // Forward declaration of nested class for handling encoding.
+ // See the implementation file for details.
+ class AudioEncoder;
+
+ // Returns the Opus buffer duration in milliseconds, or zero if none will work
+ // for the given |sample_rate|.
+ static int GetOpusBufferDuration(int sample_rate);
+
+ // Used to check that we are destroyed on the same thread we were created on.
+ base::ThreadChecker main_render_thread_checker_;
+
+ // Used to check that MediaStreamAudioSink's methods are called on the
+ // capture audio thread.
+ base::ThreadChecker capture_thread_checker_;
+
+ // We need to hold on to the Blink track to remove ourselves on destruction.
+ const blink::WebMediaStreamTrack track_;
+
+ // Thin wrapper around OpusEncoder.
+ // |encoder_| should be initialized before |encoder_thread_| such that
+ // |encoder_thread_| is destructed first. This, combined with all
+ // AudioEncoder work (aside from construction and destruction) happening on
+ // |encoder_thread_|, should allow us to be sure that all AudioEncoder work is
+ // done by the time we destroy it on ATR's thread.
+ const scoped_refptr<AudioEncoder> encoder_;
+ // The thread on which |encoder_| works.
+ base::Thread encoder_thread_;
+
+ DISALLOW_COPY_AND_ASSIGN(AudioTrackRecorder);
+};
+
+} // namespace content
+
+#endif // CONTENT_RENDERER_MEDIA_AUDIO_TRACK_RECORDER_H_
diff --git a/content/renderer/media/audio_track_recorder_unittest.cc b/content/renderer/media/audio_track_recorder_unittest.cc
new file mode 100644
index 0000000..1966464
--- /dev/null
+++ b/content/renderer/media/audio_track_recorder_unittest.cc
@@ -0,0 +1,253 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/audio_track_recorder.h"
+
+#include "base/run_loop.h"
+#include "base/stl_util.h"
+#include "base/strings/utf_string_conversions.h"
+#include "content/renderer/media/media_stream_audio_source.h"
+#include "content/renderer/media/mock_media_constraint_factory.h"
+#include "content/renderer/media/webrtc/webrtc_local_audio_track_adapter.h"
+#include "content/renderer/media/webrtc_local_audio_track.h"
+#include "media/audio/simple_sources.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/WebKit/public/web/WebHeap.h"
+#include "third_party/opus/src/include/opus.h"
+
+using ::testing::_;
+using ::testing::DoAll;
+using ::testing::InSequence;
+using ::testing::Mock;
+using ::testing::Return;
+using ::testing::SaveArg;
+using ::testing::TestWithParam;
+using ::testing::ValuesIn;
+
+namespace {
+
+// Input audio format.
+const media::AudioParameters::Format kDefaultInputFormat =
+ media::AudioParameters::AUDIO_PCM_LOW_LATENCY;
+const int kDefaultBitsPerSample = 16;
+const int kDefaultSampleRate = 48000;
+// The |frames_per_buffer| field of AudioParameters is not used by ATR.
+const int kIgnoreFramesPerBuffer = 1;
+const int kOpusMaxBufferDurationMS = 120;
+
+} // namespace
+
+namespace content {
+
+ACTION_P(RunClosure, closure) {
+ closure.Run();
+}
+
+struct ATRTestParams {
+ const media::AudioParameters::Format input_format;
+ const media::ChannelLayout channel_layout;
+ const int sample_rate;
+ const int bits_per_sample;
+};
+
+const ATRTestParams kATRTestParams[] = {
+ // Equivalent to default settings:
+ {media::AudioParameters::AUDIO_PCM_LOW_LATENCY, /* input format */
+ media::CHANNEL_LAYOUT_STEREO, /* channel layout */
+ kDefaultSampleRate, /* sample rate */
+ kDefaultBitsPerSample}, /* bits per sample */
+ // Change to mono:
+ {media::AudioParameters::AUDIO_PCM_LOW_LATENCY, media::CHANNEL_LAYOUT_MONO,
+ kDefaultSampleRate, kDefaultBitsPerSample},
+ // Different sampling rate as well:
+ {media::AudioParameters::AUDIO_PCM_LOW_LATENCY, media::CHANNEL_LAYOUT_MONO,
+ 24000, kDefaultBitsPerSample},
+ {media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ media::CHANNEL_LAYOUT_STEREO, 8000, kDefaultBitsPerSample},
+};
+
+class AudioTrackRecorderTest : public TestWithParam<ATRTestParams> {
+ public:
+ // Initialize |first_params_| based on test parameters, and |second_params_|
+ // to always be the same thing.
+ AudioTrackRecorderTest()
+ : first_params_(GetParam().input_format,
+ GetParam().channel_layout,
+ GetParam().sample_rate,
+ GetParam().bits_per_sample,
+ kIgnoreFramesPerBuffer),
+ second_params_(kDefaultInputFormat,
+ media::CHANNEL_LAYOUT_STEREO,
+ kDefaultSampleRate,
+ kDefaultBitsPerSample,
+ kIgnoreFramesPerBuffer),
+ first_source_(first_params_.channels(), /* # channels */
+ 440, /* frequency */
+ first_params_.sample_rate()), /* sample rate */
+ second_source_(second_params_.channels(),
+ 440,
+ second_params_.sample_rate()),
+ opus_decoder_(nullptr) {
+ ResetDecoder(first_params_);
+ PrepareBlinkTrack();
+ audio_track_recorder_.reset(new AudioTrackRecorder(
+ blink_track_, base::Bind(&AudioTrackRecorderTest::OnEncodedAudio,
+ base::Unretained(this))));
+ }
+
+ ~AudioTrackRecorderTest() {
+ opus_decoder_destroy(opus_decoder_);
+ opus_decoder_ = nullptr;
+ audio_track_recorder_.reset();
+ blink_track_.reset();
+ blink::WebHeap::collectAllGarbageForTesting();
+ }
+
+ void ResetDecoder(const media::AudioParameters& params) {
+ if (opus_decoder_) {
+ opus_decoder_destroy(opus_decoder_);
+ opus_decoder_ = nullptr;
+ }
+
+ int error;
+ opus_decoder_ =
+ opus_decoder_create(params.sample_rate(), params.channels(), &error);
+ EXPECT_TRUE(error == OPUS_OK && opus_decoder_);
+
+ max_frames_per_buffer_ =
+ kOpusMaxBufferDurationMS * params.sample_rate() / 1000;
+ buffer_.reset(new float[max_frames_per_buffer_ * params.channels()]);
+ }
+
+ scoped_ptr<media::AudioBus> GetFirstSourceAudioBus() {
+ scoped_ptr<media::AudioBus> bus(media::AudioBus::Create(
+ first_params_.channels(),
+ first_params_.sample_rate() *
+ audio_track_recorder_->GetOpusBufferDuration(
+ first_params_.sample_rate()) /
+ 1000));
+ first_source_.OnMoreData(bus.get(), 0);
+ return bus.Pass();
+ }
+ scoped_ptr<media::AudioBus> GetSecondSourceAudioBus() {
+ scoped_ptr<media::AudioBus> bus(media::AudioBus::Create(
+ second_params_.channels(),
+ second_params_.sample_rate() *
+ audio_track_recorder_->GetOpusBufferDuration(
+ second_params_.sample_rate()) /
+ 1000));
+ second_source_.OnMoreData(bus.get(), 0);
+ return bus.Pass();
+ }
+
+ MOCK_METHOD3(DoOnEncodedAudio,
+ void(const media::AudioParameters& params,
+ std::string encoded_data,
+ base::TimeTicks timestamp));
+
+ void OnEncodedAudio(const media::AudioParameters& params,
+ scoped_ptr<std::string> encoded_data,
+ base::TimeTicks timestamp) {
+ EXPECT_TRUE(!encoded_data->empty());
+
+ // Decode |encoded_data| and check we get the expected number of frames
+ // per buffer.
+ EXPECT_EQ(
+ params.sample_rate() *
+ audio_track_recorder_->GetOpusBufferDuration(params.sample_rate()) /
+ 1000,
+ opus_decode_float(
+ opus_decoder_,
+ reinterpret_cast<uint8*>(string_as_array(encoded_data.get())),
+ encoded_data->size(), buffer_.get(), max_frames_per_buffer_, 0));
+
+ DoOnEncodedAudio(params, *encoded_data, timestamp);
+ }
+
+ const base::MessageLoop message_loop_;
+
+ // ATR and WebMediaStreamTrack for fooling it.
+ scoped_ptr<AudioTrackRecorder> audio_track_recorder_;
+ blink::WebMediaStreamTrack blink_track_;
+
+ // Two different sets of AudioParameters for testing re-init of ATR.
+ const media::AudioParameters first_params_;
+ const media::AudioParameters second_params_;
+
+ // AudioSources for creating AudioBuses.
+ media::SineWaveAudioSource first_source_;
+ media::SineWaveAudioSource second_source_;
+
+ // Decoder for verifying data was properly encoded.
+ OpusDecoder* opus_decoder_;
+ int max_frames_per_buffer_;
+ scoped_ptr<float[]> buffer_;
+
+ private:
+ // Prepares a blink track of a given MediaStreamType and attaches the native
+ // track, which can be used to capture audio data and pass it to the producer.
+ // Adapted from media::WebRTCLocalAudioSourceProviderTest.
+ void PrepareBlinkTrack() {
+ MockMediaConstraintFactory constraint_factory;
+ scoped_refptr<WebRtcAudioCapturer> capturer(
+ WebRtcAudioCapturer::CreateCapturer(
+ -1, StreamDeviceInfo(),
+ constraint_factory.CreateWebMediaConstraints(), NULL, NULL));
+ scoped_refptr<WebRtcLocalAudioTrackAdapter> adapter(
+ WebRtcLocalAudioTrackAdapter::Create(std::string(), NULL));
+ scoped_ptr<WebRtcLocalAudioTrack> native_track(
+ new WebRtcLocalAudioTrack(adapter.get(), capturer, NULL));
+ blink::WebMediaStreamSource audio_source;
+ audio_source.initialize(base::UTF8ToUTF16("dummy_source_id"),
+ blink::WebMediaStreamSource::TypeAudio,
+ base::UTF8ToUTF16("dummy_source_name"),
+ false /* remote */, true /* readonly */);
+ blink_track_.initialize(blink::WebString::fromUTF8("audio_track"),
+ audio_source);
+ blink_track_.setExtraData(native_track.release());
+ }
+
+ DISALLOW_COPY_AND_ASSIGN(AudioTrackRecorderTest);
+};
+
+TEST_P(AudioTrackRecorderTest, OnData) {
+ InSequence s;
+ base::RunLoop run_loop;
+ base::Closure quit_closure = run_loop.QuitClosure();
+
+ // Give ATR initial audio parameters.
+ audio_track_recorder_->OnSetFormat(first_params_);
+ // TODO(ajose): consider adding WillOnce(SaveArg...) and inspecting, as done
+ // in VTR unittests. http://crbug.com/548856
+ const base::TimeTicks time1 = base::TimeTicks::Now();
+ EXPECT_CALL(*this, DoOnEncodedAudio(_, _, time1)).Times(1);
+ audio_track_recorder_->OnData(*GetFirstSourceAudioBus(), time1);
+
+ // Send more audio.
+ const base::TimeTicks time2 = base::TimeTicks::Now();
+ EXPECT_CALL(*this, DoOnEncodedAudio(_, _, _))
+ .Times(1)
+ // Only reset the decoder once we've heard back:
+ .WillOnce(RunClosure(base::Bind(&AudioTrackRecorderTest::ResetDecoder,
+ base::Unretained(this), second_params_)));
+ audio_track_recorder_->OnData(*GetFirstSourceAudioBus(), time2);
+
+ // Give ATR new audio parameters.
+ audio_track_recorder_->OnSetFormat(second_params_);
+
+ // Send audio with different params.
+ const base::TimeTicks time3 = base::TimeTicks::Now();
+ EXPECT_CALL(*this, DoOnEncodedAudio(_, _, _))
+ .Times(1)
+ .WillOnce(RunClosure(quit_closure));
+ audio_track_recorder_->OnData(*GetSecondSourceAudioBus(), time3);
+
+ run_loop.Run();
+ Mock::VerifyAndClearExpectations(this);
+}
+
+INSTANTIATE_TEST_CASE_P(, AudioTrackRecorderTest, ValuesIn(kATRTestParams));
+
+} // namespace content