summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--content/content_renderer.gypi9
-rw-r--r--content/public/renderer/media_stream_audio_sink.cc40
-rw-r--r--content/public/renderer/media_stream_audio_sink.h58
-rw-r--r--content/renderer/media/media_stream_audio_sink_owner.cc63
-rw-r--r--content/renderer/media/media_stream_audio_sink_owner.h52
-rw-r--r--content/renderer/media/media_stream_audio_track_sink.h66
-rw-r--r--content/renderer/media/media_stream_dependency_factory.cc12
-rw-r--r--content/renderer/media/media_stream_dependency_factory.h3
-rw-r--r--content/renderer/media/media_stream_impl.cc26
-rw-r--r--content/renderer/media/media_stream_impl.h2
-rw-r--r--content/renderer/media/media_stream_track_extra_data.cc5
-rw-r--r--content/renderer/media/media_stream_track_extra_data.h5
-rw-r--r--content/renderer/media/peer_connection_audio_sink_owner.cc66
-rw-r--r--content/renderer/media/peer_connection_audio_sink_owner.h52
-rw-r--r--content/renderer/media/remote_media_stream_impl.cc4
-rw-r--r--content/renderer/media/rtc_peer_connection_handler_unittest.cc4
-rw-r--r--content/renderer/media/webaudio_capturer_source.cc7
-rw-r--r--content/renderer/media/webrtc_audio_capturer.cc8
-rw-r--r--content/renderer/media/webrtc_audio_capturer_sink_owner.cc57
-rw-r--r--content/renderer/media/webrtc_audio_capturer_sink_owner.h66
-rw-r--r--content/renderer/media/webrtc_audio_capturer_unittest.cc40
-rw-r--r--content/renderer/media/webrtc_audio_device_impl.cc22
-rw-r--r--content/renderer/media/webrtc_audio_device_impl.h52
-rw-r--r--content/renderer/media/webrtc_audio_device_unittest.cc49
-rw-r--r--content/renderer/media/webrtc_local_audio_renderer.cc38
-rw-r--r--content/renderer/media/webrtc_local_audio_renderer.h37
-rw-r--r--content/renderer/media/webrtc_local_audio_source_provider.cc17
-rw-r--r--content/renderer/media/webrtc_local_audio_source_provider.h27
-rw-r--r--content/renderer/media/webrtc_local_audio_source_provider_unittest.cc23
-rw-r--r--content/renderer/media/webrtc_local_audio_track.cc100
-rw-r--r--content/renderer/media/webrtc_local_audio_track.h23
-rw-r--r--content/renderer/media/webrtc_local_audio_track_unittest.cc70
32 files changed, 702 insertions, 401 deletions
diff --git a/content/content_renderer.gypi b/content/content_renderer.gypi
index ddddb5d..24fda29 100644
--- a/content/content_renderer.gypi
+++ b/content/content_renderer.gypi
@@ -587,11 +587,16 @@
'<(DEPTH)/crypto/crypto.gyp:crypto',
],
'sources': [
+ 'public/renderer/media_stream_audio_sink.h',
+ 'public/renderer/media_stream_audio_sink.cc',
'public/renderer/webrtc_log_message_delegate.h',
'renderer/media/media_stream_audio_processor.cc',
'renderer/media/media_stream_audio_processor.h',
'renderer/media/media_stream_audio_processor_options.cc',
'renderer/media/media_stream_audio_processor_options.h',
+ 'renderer/media/media_stream_audio_sink_owner.cc',
+ 'renderer/media/media_stream_audio_sink_owner.h',
+ 'renderer/media/media_stream_audio_track_sink.h',
'renderer/media/media_stream_center.cc',
'renderer/media/media_stream_dependency_factory.cc',
'renderer/media/media_stream_dispatcher.cc',
@@ -601,6 +606,8 @@
'renderer/media/media_stream_source_observer.h',
'renderer/media/native_handle_impl.cc',
'renderer/media/native_handle_impl.h',
+ 'renderer/media/peer_connection_audio_sink_owner.cc',
+ 'renderer/media/peer_connection_audio_sink_owner.h',
'renderer/media/peer_connection_handler_base.cc',
'renderer/media/peer_connection_handler_base.h',
'renderer/media/peer_connection_identity_service.cc',
@@ -639,8 +646,6 @@
'renderer/media/webaudio_capturer_source.h',
'renderer/media/webrtc_audio_capturer.cc',
'renderer/media/webrtc_audio_capturer.h',
- 'renderer/media/webrtc_audio_capturer_sink_owner.cc',
- 'renderer/media/webrtc_audio_capturer_sink_owner.h',
'renderer/media/webrtc_audio_device_impl.cc',
'renderer/media/webrtc_audio_device_impl.h',
'renderer/media/webrtc_audio_device_not_impl.cc',
diff --git a/content/public/renderer/media_stream_audio_sink.cc b/content/public/renderer/media_stream_audio_sink.cc
new file mode 100644
index 0000000..f96c92b
--- /dev/null
+++ b/content/public/renderer/media_stream_audio_sink.cc
@@ -0,0 +1,40 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/public/renderer/media_stream_audio_sink.h"
+
+#include "base/logging.h"
+#include "content/renderer/media/media_stream_track_extra_data.h"
+#include "content/renderer/media/webrtc_local_audio_track.h"
+#include "third_party/WebKit/public/platform/WebMediaStreamSource.h"
+#include "third_party/WebKit/public/platform/WebMediaStreamTrack.h"
+
+namespace content {
+
+void MediaStreamAudioSink::AddToAudioTrack(
+ MediaStreamAudioSink* sink,
+ const blink::WebMediaStreamTrack& track) {
+ DCHECK(track.source().type() == blink::WebMediaStreamSource::TypeAudio);
+ MediaStreamTrackExtraData* extra_data =
+ static_cast<MediaStreamTrackExtraData*>(track.extraData());
+ // TODO(xians): Support remote audio track.
+ DCHECK(extra_data->is_local_track());
+ WebRtcLocalAudioTrack* audio_track =
+ static_cast<WebRtcLocalAudioTrack*>(extra_data->track().get());
+ audio_track->AddSink(sink);
+}
+
+void MediaStreamAudioSink::RemoveFromAudioTrack(
+ MediaStreamAudioSink* sink,
+ const blink::WebMediaStreamTrack& track) {
+ MediaStreamTrackExtraData* extra_data =
+ static_cast<MediaStreamTrackExtraData*>(track.extraData());
+ // TODO(xians): Support remote audio track.
+ DCHECK(extra_data->is_local_track());
+ WebRtcLocalAudioTrack* audio_track =
+ static_cast<WebRtcLocalAudioTrack*>(extra_data->track().get());
+ audio_track->RemoveSink(sink);
+}
+
+} // namespace content
diff --git a/content/public/renderer/media_stream_audio_sink.h b/content/public/renderer/media_stream_audio_sink.h
new file mode 100644
index 0000000..26ebd54
--- /dev/null
+++ b/content/public/renderer/media_stream_audio_sink.h
@@ -0,0 +1,58 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_PUBLIC_RENDERER_MEDIA_STREAM_AUDIO_SINK_H_
+#define CONTENT_PUBLIC_RENDERER_MEDIA_STREAM_AUDIO_SINK_H_
+
+#include <vector>
+
+#include "base/basictypes.h"
+#include "content/common/content_export.h"
+
+namespace blink {
+class WebMediaStreamTrack;
+}
+
+namespace media {
+class AudioParameters;
+}
+
+namespace content {
+
+class MediaStreamAudioSink {
+ public:
+ // Adds a MediaStreamAudioSink to the audio track to receive audio data from
+ // the track.
+ // Called on the main render thread.
+ static void AddToAudioTrack(MediaStreamAudioSink* sink,
+ const blink::WebMediaStreamTrack& track);
+
+ // Removes a MediaStreamAudioSink from the audio track to stop receiving
+ // audio data from the track.
+ // Called on the main render thread.
+ static void RemoveFromAudioTrack(MediaStreamAudioSink* sink,
+ const blink::WebMediaStreamTrack& track);
+
+ // Callback on delivering the interleaved audio data.
+ // |audio_data| is the pointer to the audio data.
+ // |sample_rate| is the sample frequency of |audio_data|.
+ // |number_of_channels| is the number of audio channels of |audio_data|.
+ // |number_of_frames| is the number of audio frames in the |audio_data|.
+ // Called on real-time audio thread.
+ virtual void OnData(const int16* audio_data,
+ int sample_rate,
+ int number_of_channels,
+ int number_of_frames) = 0;
+
+ // Callback called when the format of the audio stream has changed.
+ // This is called on the same thread as OnData().
+ virtual void OnSetFormat(const media::AudioParameters& params) = 0;
+
+ protected:
+ virtual ~MediaStreamAudioSink() {}
+};
+
+} // namespace content
+
+#endif // CONTENT_PUBLIC_RENDERER_MEDIA_STREAM_AUDIO_SINK_H_
diff --git a/content/renderer/media/media_stream_audio_sink_owner.cc b/content/renderer/media/media_stream_audio_sink_owner.cc
new file mode 100644
index 0000000..53fe0e7
--- /dev/null
+++ b/content/renderer/media/media_stream_audio_sink_owner.cc
@@ -0,0 +1,63 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/media_stream_audio_sink_owner.h"
+
+#include "content/public/renderer/media_stream_audio_sink.h"
+#include "media/audio/audio_parameters.h"
+
+namespace content {
+
+MediaStreamAudioSinkOwner::MediaStreamAudioSinkOwner(MediaStreamAudioSink* sink)
+ : delegate_(sink) {
+}
+
+int MediaStreamAudioSinkOwner::OnData(const int16* audio_data,
+ int sample_rate,
+ int number_of_channels,
+ int number_of_frames,
+ const std::vector<int>& channels,
+ int audio_delay_milliseconds,
+ int current_volume,
+ bool need_audio_processing,
+ bool key_pressed) {
+ base::AutoLock lock(lock_);
+ // TODO(xians): Investigate on the possibility of not calling out with the
+ // lock.
+ if (delegate_) {
+ delegate_->OnData(audio_data,
+ sample_rate,
+ number_of_channels,
+ number_of_frames);
+ }
+
+ return 0;
+}
+
+void MediaStreamAudioSinkOwner::OnSetFormat(
+ const media::AudioParameters& params) {
+ base::AutoLock lock(lock_);
+ if (delegate_)
+ delegate_->OnSetFormat(params);
+}
+
+void MediaStreamAudioSinkOwner::Reset() {
+ base::AutoLock lock(lock_);
+ delegate_ = NULL;
+}
+
+bool MediaStreamAudioSinkOwner::IsEqual(
+ const MediaStreamAudioSink* other) const {
+ DCHECK(other);
+ base::AutoLock lock(lock_);
+ return (other == delegate_);
+}
+
+bool MediaStreamAudioSinkOwner::IsEqual(
+ const PeerConnectionAudioSink* other) const {
+ DCHECK(other);
+ return false;
+}
+
+} // namespace content
diff --git a/content/renderer/media/media_stream_audio_sink_owner.h b/content/renderer/media/media_stream_audio_sink_owner.h
new file mode 100644
index 0000000..d76cbc4
--- /dev/null
+++ b/content/renderer/media/media_stream_audio_sink_owner.h
@@ -0,0 +1,52 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_MEDIA_MEDIA_STREAM_AUDIO_SINK_OWNER_H_
+#define CONTENT_RENDERER_MEDIA_MEDIA_STREAM_AUDIO_SINK_OWNER_H_
+
+#include <vector>
+
+#include "base/synchronization/lock.h"
+#include "content/renderer/media/media_stream_audio_track_sink.h"
+
+namespace content {
+
+class MediaStreamAudioSink;
+
+// Reference counted holder of MediaStreamAudioSink sinks.
+class MediaStreamAudioSinkOwner : public MediaStreamAudioTrackSink {
+ public:
+ explicit MediaStreamAudioSinkOwner(MediaStreamAudioSink* sink);
+
+ // MediaStreamAudioTrackSink implementation.
+ virtual int OnData(const int16* audio_data,
+ int sample_rate,
+ int number_of_channels,
+ int number_of_frames,
+ const std::vector<int>& channels,
+ int audio_delay_milliseconds,
+ int current_volume,
+ bool need_audio_processing,
+ bool key_pressed) OVERRIDE;
+ virtual void OnSetFormat(const media::AudioParameters& params) OVERRIDE;
+ virtual void Reset() OVERRIDE;
+ virtual bool IsEqual(const MediaStreamAudioSink* other) const OVERRIDE;
+ virtual bool IsEqual(const PeerConnectionAudioSink* other) const OVERRIDE;
+
+ protected:
+ virtual ~MediaStreamAudioSinkOwner() {}
+
+ private:
+ mutable base::Lock lock_;
+
+ // Raw pointer to the delegate, the client need to call Reset() to set the
+ // pointer to NULL before the delegate goes away.
+ MediaStreamAudioSink* delegate_;
+
+ DISALLOW_COPY_AND_ASSIGN(MediaStreamAudioSinkOwner);
+};
+
+} // namespace content
+
+#endif // CONTENT_RENDERER_MEDIA_MEDIA_STREAM_AUDIO_SINK_OWNER_H_
diff --git a/content/renderer/media/media_stream_audio_track_sink.h b/content/renderer/media/media_stream_audio_track_sink.h
new file mode 100644
index 0000000..d3d8f64
--- /dev/null
+++ b/content/renderer/media/media_stream_audio_track_sink.h
@@ -0,0 +1,66 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_MEDIA_MEDIA_STREAM_AUDIO_TRACK_SINK_H_
+#define CONTENT_RENDERER_MEDIA_MEDIA_STREAM_AUDIO_TRACK_SINK_H_
+
+#include <vector>
+
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
+#include "media/audio/audio_parameters.h"
+
+namespace content {
+
+class MediaStreamAudioSink;
+class PeerConnectionAudioSink;
+
+// Interface for reference counted holder of audio stream audio track sink.
+class MediaStreamAudioTrackSink
+ : public base::RefCountedThreadSafe<MediaStreamAudioTrackSink> {
+ public:
+ virtual int OnData(const int16* audio_data,
+ int sample_rate,
+ int number_of_channels,
+ int number_of_frames,
+ const std::vector<int>& channels,
+ int audio_delay_milliseconds,
+ int current_volume,
+ bool need_audio_processing,
+ bool key_pressed) = 0;
+
+ virtual void OnSetFormat(const media::AudioParameters& params) = 0;
+
+ virtual void Reset() = 0;
+
+ virtual bool IsEqual(const MediaStreamAudioSink* other) const = 0;
+ virtual bool IsEqual(const PeerConnectionAudioSink* other) const = 0;
+
+ // Wrapper which allows to use std::find_if() when adding and removing
+ // sinks to/from the list.
+ struct WrapsMediaStreamSink {
+ WrapsMediaStreamSink(MediaStreamAudioSink* sink) : sink_(sink) {}
+ bool operator()(const scoped_refptr<MediaStreamAudioTrackSink>& owner) {
+ return owner->IsEqual(sink_);
+ }
+ MediaStreamAudioSink* sink_;
+ };
+ struct WrapsPeerConnectionSink {
+ WrapsPeerConnectionSink(PeerConnectionAudioSink* sink) : sink_(sink) {}
+ bool operator()(const scoped_refptr<MediaStreamAudioTrackSink>& owner) {
+ return owner->IsEqual(sink_);
+ }
+ PeerConnectionAudioSink* sink_;
+ };
+
+ protected:
+ virtual ~MediaStreamAudioTrackSink() {}
+
+ private:
+ friend class base::RefCountedThreadSafe<MediaStreamAudioTrackSink>;
+};
+
+} // namespace content
+
+#endif // CONTENT_RENDERER_MEDIA_MEDIA_STREAM_AUDIO_TRACK_SINK_H_
diff --git a/content/renderer/media/media_stream_dependency_factory.cc b/content/renderer/media/media_stream_dependency_factory.cc
index 4c2f046..a37ae5f 100644
--- a/content/renderer/media/media_stream_dependency_factory.cc
+++ b/content/renderer/media/media_stream_dependency_factory.cc
@@ -426,7 +426,7 @@ MediaStreamDependencyFactory::CreateNativeAudioMediaStreamTrack(
webaudio_source.get(),
source_data->local_audio_source(),
&track_constraints));
- AddNativeTrackToBlinkTrack(audio_track.get(), track);
+ AddNativeTrackToBlinkTrack(audio_track.get(), track, true);
audio_track->set_enabled(track.isEnabled());
@@ -456,7 +456,7 @@ MediaStreamDependencyFactory::CreateNativeVideoMediaStreamTrack(
std::string track_id = UTF16ToUTF8(track.id());
scoped_refptr<webrtc::VideoTrackInterface> video_track(
CreateLocalVideoTrack(track_id, source_data->video_source()));
- AddNativeTrackToBlinkTrack(video_track.get(), track);
+ AddNativeTrackToBlinkTrack(video_track.get(), track, true);
video_track->set_enabled(track.isEnabled());
@@ -541,7 +541,7 @@ bool MediaStreamDependencyFactory::AddNativeVideoMediaTrack(
webkit_source.initialize(webkit_track_id, type, webkit_track_id);
webkit_track.initialize(webkit_track_id, webkit_source);
- AddNativeTrackToBlinkTrack(native_track.get(), webkit_track);
+ AddNativeTrackToBlinkTrack(native_track.get(), webkit_track, true);
// Add the track to WebMediaStream.
stream->addTrack(webkit_track);
@@ -911,10 +911,12 @@ MediaStreamDependencyFactory::MaybeCreateAudioCapturer(
void MediaStreamDependencyFactory::AddNativeTrackToBlinkTrack(
webrtc::MediaStreamTrackInterface* native_track,
- const blink::WebMediaStreamTrack& webkit_track) {
+ const blink::WebMediaStreamTrack& webkit_track,
+ bool is_local_track) {
DCHECK(!webkit_track.isNull() && !webkit_track.extraData());
blink::WebMediaStreamTrack track = webkit_track;
- track.setExtraData(new MediaStreamTrackExtraData(native_track));
+ track.setExtraData(new MediaStreamTrackExtraData(native_track,
+ is_local_track));
}
webrtc::MediaStreamInterface*
diff --git a/content/renderer/media/media_stream_dependency_factory.h b/content/renderer/media/media_stream_dependency_factory.h
index 1e7b790..d075970 100644
--- a/content/renderer/media/media_stream_dependency_factory.h
+++ b/content/renderer/media/media_stream_dependency_factory.h
@@ -148,7 +148,8 @@ class CONTENT_EXPORT MediaStreamDependencyFactory
static void AddNativeTrackToBlinkTrack(
webrtc::MediaStreamTrackInterface* native_track,
- const blink::WebMediaStreamTrack& webkit_track);
+ const blink::WebMediaStreamTrack& webkit_track,
+ bool is_local_track);
static webrtc::MediaStreamInterface* GetNativeMediaStream(
const blink::WebMediaStream& stream);
diff --git a/content/renderer/media/media_stream_impl.cc b/content/renderer/media/media_stream_impl.cc
index 1deb444..457649d 100644
--- a/content/renderer/media/media_stream_impl.cc
+++ b/content/renderer/media/media_stream_impl.cc
@@ -271,7 +271,14 @@ MediaStreamImpl::GetAudioRenderer(const GURL& url) {
if (extra_data->is_local()) {
// Create the local audio renderer if the stream contains audio tracks.
- return CreateLocalAudioRenderer(extra_data->stream().get());
+ blink::WebVector<blink::WebMediaStreamTrack> audio_tracks;
+ web_stream.audioTracks(audio_tracks);
+ if (audio_tracks.isEmpty())
+ return NULL;
+
+ // TODO(xians): Add support for the case that the media stream contains
+ // multiple audio tracks.
+ return CreateLocalAudioRenderer(audio_tracks[0]);
}
webrtc::MediaStreamInterface* stream = extra_data->stream().get();
@@ -799,19 +806,8 @@ scoped_refptr<WebRtcAudioRenderer> MediaStreamImpl::CreateRemoteAudioRenderer(
scoped_refptr<WebRtcLocalAudioRenderer>
MediaStreamImpl::CreateLocalAudioRenderer(
- webrtc::MediaStreamInterface* stream) {
- if (stream->GetAudioTracks().empty())
- return NULL;
-
- DVLOG(1) << "MediaStreamImpl::CreateLocalAudioRenderer label:"
- << stream->label();
-
- webrtc::AudioTrackVector audio_tracks = stream->GetAudioTracks();
- DCHECK_EQ(audio_tracks.size(), 1u);
- webrtc::AudioTrackInterface* audio_track = audio_tracks[0];
- DVLOG(1) << "audio_track.kind : " << audio_track->kind()
- << "audio_track.id : " << audio_track->id()
- << "audio_track.enabled: " << audio_track->enabled();
+ const blink::WebMediaStreamTrack& audio_track) {
+ DVLOG(1) << "MediaStreamImpl::CreateLocalAudioRenderer";
int session_id = 0, sample_rate = 0, buffer_size = 0;
if (!GetAuthorizedDeviceInfoForAudioRenderer(&session_id,
@@ -823,7 +819,7 @@ MediaStreamImpl::CreateLocalAudioRenderer(
// Create a new WebRtcLocalAudioRenderer instance and connect it to the
// existing WebRtcAudioCapturer so that the renderer can use it as source.
return new WebRtcLocalAudioRenderer(
- static_cast<WebRtcLocalAudioTrack*>(audio_track),
+ audio_track,
RenderViewObserver::routing_id(),
session_id,
buffer_size);
diff --git a/content/renderer/media/media_stream_impl.h b/content/renderer/media/media_stream_impl.h
index 7988245..eb72ecd4 100644
--- a/content/renderer/media/media_stream_impl.h
+++ b/content/renderer/media/media_stream_impl.h
@@ -189,7 +189,7 @@ class CONTENT_EXPORT MediaStreamImpl
scoped_refptr<WebRtcAudioRenderer> CreateRemoteAudioRenderer(
webrtc::MediaStreamInterface* stream);
scoped_refptr<WebRtcLocalAudioRenderer> CreateLocalAudioRenderer(
- webrtc::MediaStreamInterface* stream);
+ const blink::WebMediaStreamTrack& audio_track);
// Returns a valid session id if a single capture device is currently open
// (and then the matching session_id), otherwise -1.
diff --git a/content/renderer/media/media_stream_track_extra_data.cc b/content/renderer/media/media_stream_track_extra_data.cc
index 07ac863..0fbb294 100644
--- a/content/renderer/media/media_stream_track_extra_data.cc
+++ b/content/renderer/media/media_stream_track_extra_data.cc
@@ -9,8 +9,9 @@
namespace content {
MediaStreamTrackExtraData::MediaStreamTrackExtraData(
- webrtc::MediaStreamTrackInterface* track)
- : track_(track) {
+ webrtc::MediaStreamTrackInterface* track, bool is_local_track)
+ : track_(track),
+ is_local_track_(is_local_track) {
}
MediaStreamTrackExtraData::~MediaStreamTrackExtraData() {
diff --git a/content/renderer/media/media_stream_track_extra_data.h b/content/renderer/media/media_stream_track_extra_data.h
index 80c7a98..dbc25b4 100644
--- a/content/renderer/media/media_stream_track_extra_data.h
+++ b/content/renderer/media/media_stream_track_extra_data.h
@@ -20,15 +20,18 @@ namespace content {
class CONTENT_EXPORT MediaStreamTrackExtraData
: NON_EXPORTED_BASE(public blink::WebMediaStreamTrack::ExtraData) {
public:
- MediaStreamTrackExtraData(webrtc::MediaStreamTrackInterface* track);
+ MediaStreamTrackExtraData(webrtc::MediaStreamTrackInterface* track,
+ bool is_local_track);
virtual ~MediaStreamTrackExtraData();
const scoped_refptr<webrtc::MediaStreamTrackInterface>& track() const {
return track_;
}
+ bool is_local_track () const { return is_local_track_; }
private:
scoped_refptr<webrtc::MediaStreamTrackInterface> track_;
+ const bool is_local_track_;
DISALLOW_COPY_AND_ASSIGN(MediaStreamTrackExtraData);
};
diff --git a/content/renderer/media/peer_connection_audio_sink_owner.cc b/content/renderer/media/peer_connection_audio_sink_owner.cc
new file mode 100644
index 0000000..84ff52b
--- /dev/null
+++ b/content/renderer/media/peer_connection_audio_sink_owner.cc
@@ -0,0 +1,66 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "content/renderer/media/peer_connection_audio_sink_owner.h"
+
+#include "content/renderer/media/webrtc_audio_device_impl.h"
+
+namespace content {
+
+PeerConnectionAudioSinkOwner::PeerConnectionAudioSinkOwner(
+ PeerConnectionAudioSink* sink)
+ : delegate_(sink) {
+}
+
+int PeerConnectionAudioSinkOwner::OnData(const int16* audio_data,
+ int sample_rate,
+ int number_of_channels,
+ int number_of_frames,
+ const std::vector<int>& channels,
+ int audio_delay_milliseconds,
+ int current_volume,
+ bool need_audio_processing,
+ bool key_pressed) {
+ base::AutoLock lock(lock_);
+ if (delegate_) {
+ return delegate_->OnData(audio_data,
+ sample_rate,
+ number_of_channels,
+ number_of_frames,
+ channels,
+ audio_delay_milliseconds,
+ current_volume,
+ need_audio_processing,
+ key_pressed);
+ }
+
+ return 0;
+}
+
+void PeerConnectionAudioSinkOwner::OnSetFormat(
+ const media::AudioParameters& params) {
+ base::AutoLock lock(lock_);
+ if (delegate_)
+ delegate_->OnSetFormat(params);
+}
+
+void PeerConnectionAudioSinkOwner::Reset() {
+ base::AutoLock lock(lock_);
+ delegate_ = NULL;
+}
+
+bool PeerConnectionAudioSinkOwner::IsEqual(
+ const MediaStreamAudioSink* other) const {
+ DCHECK(other);
+ return false;
+}
+
+bool PeerConnectionAudioSinkOwner::IsEqual(
+ const PeerConnectionAudioSink* other) const {
+ DCHECK(other);
+ base::AutoLock lock(lock_);
+ return (other == delegate_);
+}
+
+} // namespace content
diff --git a/content/renderer/media/peer_connection_audio_sink_owner.h b/content/renderer/media/peer_connection_audio_sink_owner.h
new file mode 100644
index 0000000..dda561e
--- /dev/null
+++ b/content/renderer/media/peer_connection_audio_sink_owner.h
@@ -0,0 +1,52 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_RENDERER_MEDIA_PEER_CONNECTION_AUDIO_SINK_OWNER_H_
+#define CONTENT_RENDERER_MEDIA_PEER_CONNECTION_AUDIO_SINK_OWNER_H_
+
+#include <vector>
+
+#include "base/synchronization/lock.h"
+#include "content/renderer/media/media_stream_audio_track_sink.h"
+
+namespace content {
+
+class PeerConnectionAudioSink;
+
+// Reference counted holder of PeerConnectionAudioSink.
+class PeerConnectionAudioSinkOwner : public MediaStreamAudioTrackSink {
+ public:
+ explicit PeerConnectionAudioSinkOwner(PeerConnectionAudioSink* sink);
+
+ // MediaStreamAudioTrackSink implementation.
+ virtual int OnData(const int16* audio_data,
+ int sample_rate,
+ int number_of_channels,
+ int number_of_frames,
+ const std::vector<int>& channels,
+ int audio_delay_milliseconds,
+ int current_volume,
+ bool need_audio_processing,
+ bool key_pressed) OVERRIDE;
+ virtual void OnSetFormat(const media::AudioParameters& params) OVERRIDE;
+ virtual void Reset() OVERRIDE;
+ virtual bool IsEqual(const MediaStreamAudioSink* other) const OVERRIDE;
+ virtual bool IsEqual(const PeerConnectionAudioSink* other) const OVERRIDE;
+
+ protected:
+ virtual ~PeerConnectionAudioSinkOwner() {}
+
+ private:
+ mutable base::Lock lock_;
+
+ // Raw pointer to the delegate, the client need to call Reset() to set the
+ // pointer to NULL before the delegate goes away.
+ PeerConnectionAudioSink* delegate_;
+
+ DISALLOW_COPY_AND_ASSIGN(PeerConnectionAudioSinkOwner);
+};
+
+} // namespace content
+
+#endif // CONTENT_RENDERER_MEDIA_PEER_CONNECTION_AUDIO_SINK_OWNER_H_
diff --git a/content/renderer/media/remote_media_stream_impl.cc b/content/renderer/media/remote_media_stream_impl.cc
index da68411..af430e2 100644
--- a/content/renderer/media/remote_media_stream_impl.cc
+++ b/content/renderer/media/remote_media_stream_impl.cc
@@ -53,8 +53,8 @@ void InitializeWebkitTrack(webrtc::MediaStreamTrackInterface* track,
webkit_source.initialize(webkit_track_id, type, webkit_track_id);
webkit_track->initialize(webkit_track_id, webkit_source);
- content::MediaStreamDependencyFactory::AddNativeTrackToBlinkTrack(track,
- *webkit_track);
+ content::MediaStreamDependencyFactory::AddNativeTrackToBlinkTrack(
+ track, *webkit_track, false);
}
content::RemoteMediaStreamTrackObserver* FindTrackObserver(
diff --git a/content/renderer/media/rtc_peer_connection_handler_unittest.cc b/content/renderer/media/rtc_peer_connection_handler_unittest.cc
index e0f6885..18901ea 100644
--- a/content/renderer/media/rtc_peer_connection_handler_unittest.cc
+++ b/content/renderer/media/rtc_peer_connection_handler_unittest.cc
@@ -249,7 +249,7 @@ class RTCPeerConnectionHandlerTest : public ::testing::Test {
audio_track_id, capturer, NULL, NULL,
&audio_constraints));
MediaStreamDependencyFactory::AddNativeTrackToBlinkTrack(
- audio_track.get(), audio_tracks[0]);
+ audio_track.get(), audio_tracks[0], true);
native_stream->AddTrack(audio_track.get());
local_stream.videoTracks(video_tracks);
@@ -259,7 +259,7 @@ class RTCPeerConnectionHandlerTest : public ::testing::Test {
mock_dependency_factory_->CreateLocalVideoTrack(
video_track_id, source));
MediaStreamDependencyFactory::AddNativeTrackToBlinkTrack(
- video_track.get(), video_tracks[0]);
+ video_track.get(), video_tracks[0], true);
native_stream->AddTrack(video_track.get());
local_stream.setExtraData(
diff --git a/content/renderer/media/webaudio_capturer_source.cc b/content/renderer/media/webaudio_capturer_source.cc
index b805d40..1f1192a 100644
--- a/content/renderer/media/webaudio_capturer_source.cc
+++ b/content/renderer/media/webaudio_capturer_source.cc
@@ -63,11 +63,6 @@ void WebAudioCapturerSource::Start(
WebRtcLocalAudioTrack* track, WebRtcAudioCapturer* capturer) {
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(track);
- // The downstream client should be configured the same as what WebKit
- // is feeding it.
- if (params_.IsValid())
- track->SetCaptureFormat(params_);
-
base::AutoLock auto_lock(lock_);
track_ = track;
capturer_ = capturer;
@@ -89,7 +84,7 @@ void WebAudioCapturerSource::consumeAudio(
// Update the downstream client if the audio format has been changed.
if (audio_format_changed_) {
- track_->SetCaptureFormat(params_);
+ track_->OnSetFormat(params_);
audio_format_changed_ = false;
}
diff --git a/content/renderer/media/webrtc_audio_capturer.cc b/content/renderer/media/webrtc_audio_capturer.cc
index 275290a..822c13a 100644
--- a/content/renderer/media/webrtc_audio_capturer.cc
+++ b/content/renderer/media/webrtc_audio_capturer.cc
@@ -57,10 +57,10 @@ class WebRtcAudioCapturer::TrackOwner
}
}
- void SetCaptureFormat(const media::AudioParameters& params) {
+ void OnSetFormat(const media::AudioParameters& params) {
base::AutoLock lock(lock_);
if (delegate_)
- delegate_->SetCaptureFormat(params);
+ delegate_->OnSetFormat(params);
}
void Reset() {
@@ -238,7 +238,7 @@ void WebRtcAudioCapturer::AddTrack(WebRtcLocalAudioTrack* track) {
tracks_.push_back(track_owner);
// Also push the track to |tracks_to_notify_format_| so that we will call
- // SetCaptureFormat() on the new track.
+ // OnSetFormat() on the new track.
tracks_to_notify_format_.push_back(track_owner);
}
@@ -450,7 +450,7 @@ void WebRtcAudioCapturer::Capture(media::AudioBus* audio_source,
// |tracks_to_notify_format| is empty.
for (TrackList::const_iterator it = tracks_to_notify_format.begin();
it != tracks_to_notify_format.end(); ++it) {
- (*it)->SetCaptureFormat(params);
+ (*it)->OnSetFormat(params);
}
// Feed the data to the tracks.
diff --git a/content/renderer/media/webrtc_audio_capturer_sink_owner.cc b/content/renderer/media/webrtc_audio_capturer_sink_owner.cc
deleted file mode 100644
index cbff31b..0000000
--- a/content/renderer/media/webrtc_audio_capturer_sink_owner.cc
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "content/renderer/media/webrtc_audio_capturer_sink_owner.h"
-
-namespace content {
-
-WebRtcAudioCapturerSinkOwner::WebRtcAudioCapturerSinkOwner(
- WebRtcAudioCapturerSink* sink)
- : delegate_(sink) {
-}
-
-int WebRtcAudioCapturerSinkOwner::CaptureData(const std::vector<int>& channels,
- const int16* audio_data,
- int sample_rate,
- int number_of_channels,
- int number_of_frames,
- int audio_delay_milliseconds,
- int current_volume,
- bool need_audio_processing,
- bool key_pressed) {
- base::AutoLock lock(lock_);
- if (delegate_) {
- return delegate_->CaptureData(channels,
- audio_data,
- sample_rate,
- number_of_channels,
- number_of_frames,
- audio_delay_milliseconds,
- current_volume,
- need_audio_processing,
- key_pressed);
- }
-
- return 0;
-}
-
-void WebRtcAudioCapturerSinkOwner::SetCaptureFormat(
- const media::AudioParameters& params) {
- base::AutoLock lock(lock_);
- if (delegate_)
- delegate_->SetCaptureFormat(params);
-}
-
-bool WebRtcAudioCapturerSinkOwner::IsEqual(
- const WebRtcAudioCapturerSink* other) const {
- base::AutoLock lock(lock_);
- return (other == delegate_);
-}
-
-void WebRtcAudioCapturerSinkOwner::Reset() {
- base::AutoLock lock(lock_);
- delegate_ = NULL;
-}
-
-} // namespace content
diff --git a/content/renderer/media/webrtc_audio_capturer_sink_owner.h b/content/renderer/media/webrtc_audio_capturer_sink_owner.h
deleted file mode 100644
index f338209..0000000
--- a/content/renderer/media/webrtc_audio_capturer_sink_owner.h
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef CONTENT_RENDERER_MEDIA_WEBRTC_AUDIO_CAPTURER_SINK_OWNER_H_
-#define CONTENT_RENDERER_MEDIA_WEBRTC_AUDIO_CAPTURER_SINK_OWNER_H_
-
-#include <vector>
-
-#include "base/memory/ref_counted.h"
-#include "base/synchronization/lock.h"
-#include "content/renderer/media/webrtc_audio_device_impl.h"
-#include "media/audio/audio_parameters.h"
-
-namespace content {
-
-class WebRtcAudioCapturerSink;
-
-// Reference counted container of WebRtcAudioCapturerSink delegates.
-class WebRtcAudioCapturerSinkOwner
- : public base::RefCountedThreadSafe<WebRtcAudioCapturerSinkOwner>,
- public WebRtcAudioCapturerSink {
- public:
- explicit WebRtcAudioCapturerSinkOwner(WebRtcAudioCapturerSink* sink);
-
- // WebRtcAudioCapturerSink implementation.
- virtual int CaptureData(const std::vector<int>& channels,
- const int16* audio_data,
- int sample_rate,
- int number_of_channels,
- int number_of_frames,
- int audio_delay_milliseconds,
- int current_volume,
- bool need_audio_processing,
- bool key_pressed) OVERRIDE;
-
- virtual void SetCaptureFormat(const media::AudioParameters& params) OVERRIDE;
-
- bool IsEqual(const WebRtcAudioCapturerSink* other) const;
- void Reset();
-
- // Wrapper which allows to use std::find_if() when adding and removing
- // sinks to/from the list.
- struct WrapsSink {
- WrapsSink(WebRtcAudioCapturerSink* sink) : sink_(sink) {}
- bool operator()(
- const scoped_refptr<WebRtcAudioCapturerSinkOwner>& owner) {
- return owner->IsEqual(sink_);
- }
- WebRtcAudioCapturerSink* sink_;
- };
-
- protected:
- virtual ~WebRtcAudioCapturerSinkOwner() {}
-
- private:
- friend class base::RefCountedThreadSafe<WebRtcAudioCapturerSinkOwner>;
- WebRtcAudioCapturerSink* delegate_;
- mutable base::Lock lock_;
-
- DISALLOW_COPY_AND_ASSIGN(WebRtcAudioCapturerSinkOwner);
-};
-
-} // namespace content
-
-#endif // CONTENT_RENDERER_MEDIA_WEBRTC_AUDIO_CAPTURER_SINK_OWNER_H_
diff --git a/content/renderer/media/webrtc_audio_capturer_unittest.cc b/content/renderer/media/webrtc_audio_capturer_unittest.cc
index 01c8b48..cc7d528 100644
--- a/content/renderer/media/webrtc_audio_capturer_unittest.cc
+++ b/content/renderer/media/webrtc_audio_capturer_unittest.cc
@@ -65,20 +65,20 @@ class MockCapturerSource : public media::AudioCapturerSource {
virtual ~MockCapturerSource() {}
};
-class MockWebRtcAudioCapturerSink : public WebRtcAudioCapturerSink {
+class MockPeerConnectionAudioSink : public PeerConnectionAudioSink {
public:
- MockWebRtcAudioCapturerSink() {}
- ~MockWebRtcAudioCapturerSink() {}
- MOCK_METHOD9(CaptureData, int(const std::vector<int>& channels,
- const int16* audio_data,
- int sample_rate,
- int number_of_channels,
- int number_of_frames,
- int audio_delay_milliseconds,
- int current_volume,
- bool need_audio_processing,
- bool key_pressed));
- MOCK_METHOD1(SetCaptureFormat, void(const media::AudioParameters& params));
+ MockPeerConnectionAudioSink() {}
+ ~MockPeerConnectionAudioSink() {}
+ MOCK_METHOD9(OnData, int(const int16* audio_data,
+ int sample_rate,
+ int number_of_channels,
+ int number_of_frames,
+ const std::vector<int>& channels,
+ int audio_delay_milliseconds,
+ int current_volume,
+ bool need_audio_processing,
+ bool key_pressed));
+ MOCK_METHOD1(OnSetFormat, void(const media::AudioParameters& params));
};
} // namespace
@@ -125,8 +125,8 @@ class WebRtcAudioCapturerTest : public testing::Test {
// those values should be correctly stored and passed to the track.
TEST_F(WebRtcAudioCapturerTest, VerifyAudioParams) {
// Connect a mock sink to the track.
- scoped_ptr<MockWebRtcAudioCapturerSink> sink(
- new MockWebRtcAudioCapturerSink());
+ scoped_ptr<MockPeerConnectionAudioSink> sink(
+ new MockPeerConnectionAudioSink());
track_->AddSink(sink.get());
int delay_ms = 65;
@@ -146,12 +146,12 @@ TEST_F(WebRtcAudioCapturerTest, VerifyAudioParams) {
media::AudioCapturerSource::CaptureCallback* callback =
static_cast<media::AudioCapturerSource::CaptureCallback*>(capturer_);
// Verify the sink is getting the correct values.
- EXPECT_CALL(*sink, SetCaptureFormat(_));
+ EXPECT_CALL(*sink, OnSetFormat(_));
EXPECT_CALL(*sink,
- CaptureData(_, _, params_.sample_rate(), params_.channels(),
- expected_buffer_size, delay_ms,
- expected_volume_value, expected_need_audio_processing,
- key_pressed)).Times(AtLeast(1));
+ OnData(_, params_.sample_rate(), params_.channels(),
+ expected_buffer_size, _, delay_ms,
+ expected_volume_value, expected_need_audio_processing,
+ key_pressed)).Times(AtLeast(1));
callback->Capture(audio_bus.get(), delay_ms, volume, key_pressed);
// Verify the cached values in the capturer fits what we expect.
diff --git a/content/renderer/media/webrtc_audio_device_impl.cc b/content/renderer/media/webrtc_audio_device_impl.cc
index 36b321d..1daf048 100644
--- a/content/renderer/media/webrtc_audio_device_impl.cc
+++ b/content/renderer/media/webrtc_audio_device_impl.cc
@@ -50,15 +50,15 @@ int32_t WebRtcAudioDeviceImpl::Release() {
}
return ret;
}
-int WebRtcAudioDeviceImpl::CaptureData(const std::vector<int>& channels,
- const int16* audio_data,
- int sample_rate,
- int number_of_channels,
- int number_of_frames,
- int audio_delay_milliseconds,
- int current_volume,
- bool need_audio_processing,
- bool key_pressed) {
+int WebRtcAudioDeviceImpl::OnData(const int16* audio_data,
+ int sample_rate,
+ int number_of_channels,
+ int number_of_frames,
+ const std::vector<int>& channels,
+ int audio_delay_milliseconds,
+ int current_volume,
+ bool need_audio_processing,
+ bool key_pressed) {
int total_delay_ms = 0;
{
base::AutoLock auto_lock(lock_);
@@ -106,9 +106,9 @@ int WebRtcAudioDeviceImpl::CaptureData(const std::vector<int>& channels,
return new_volume;
}
-void WebRtcAudioDeviceImpl::SetCaptureFormat(
+void WebRtcAudioDeviceImpl::OnSetFormat(
const media::AudioParameters& params) {
- DVLOG(1) << "WebRtcAudioDeviceImpl::SetCaptureFormat()";
+ DVLOG(1) << "WebRtcAudioDeviceImpl::OnSetFormat()";
}
void WebRtcAudioDeviceImpl::RenderData(uint8* audio_data,
diff --git a/content/renderer/media/webrtc_audio_device_impl.h b/content/renderer/media/webrtc_audio_device_impl.h
index 86f8716..f515c6e 100644
--- a/content/renderer/media/webrtc_audio_device_impl.h
+++ b/content/renderer/media/webrtc_audio_device_impl.h
@@ -202,7 +202,7 @@ class WebRtcAudioRendererSource {
virtual ~WebRtcAudioRendererSource() {}
};
-class WebRtcAudioCapturerSink {
+class PeerConnectionAudioSink {
public:
// Callback to deliver the captured interleaved data.
// |channels| contains a vector of WebRtc VoE channels.
@@ -216,31 +216,31 @@ class WebRtcAudioCapturerSink {
// audio processing.
// The return value is the new microphone volume, in the range of |0, 255].
// When the volume does not need to be updated, it returns 0.
- virtual int CaptureData(const std::vector<int>& channels,
- const int16* audio_data,
- int sample_rate,
- int number_of_channels,
- int number_of_frames,
- int audio_delay_milliseconds,
- int current_volume,
- bool need_audio_processing,
- bool key_pressed) = 0;
+ virtual int OnData(const int16* audio_data,
+ int sample_rate,
+ int number_of_channels,
+ int number_of_frames,
+ const std::vector<int>& channels,
+ int audio_delay_milliseconds,
+ int current_volume,
+ bool need_audio_processing,
+ bool key_pressed) = 0;
// Set the format for the capture audio parameters.
// This is called when the capture format has changed, and it must be called
// on the same thread as calling CaptureData().
- virtual void SetCaptureFormat(const media::AudioParameters& params) = 0;
+ virtual void OnSetFormat(const media::AudioParameters& params) = 0;
protected:
- virtual ~WebRtcAudioCapturerSink() {}
+ virtual ~PeerConnectionAudioSink() {}
};
// Note that this class inherits from webrtc::AudioDeviceModule but due to
// the high number of non-implemented methods, we move the cruft over to the
// WebRtcAudioDeviceNotImpl.
class CONTENT_EXPORT WebRtcAudioDeviceImpl
- : NON_EXPORTED_BASE(public WebRtcAudioDeviceNotImpl),
- NON_EXPORTED_BASE(public WebRtcAudioCapturerSink),
+ : NON_EXPORTED_BASE(public PeerConnectionAudioSink),
+ NON_EXPORTED_BASE(public WebRtcAudioDeviceNotImpl),
NON_EXPORTED_BASE(public WebRtcAudioRendererSource) {
public:
// The maximum volume value WebRtc uses.
@@ -327,21 +327,21 @@ class CONTENT_EXPORT WebRtcAudioDeviceImpl
// Make destructor private to ensure that we can only be deleted by Release().
virtual ~WebRtcAudioDeviceImpl();
- // WebRtcAudioCapturerSink implementation.
+ // PeerConnectionAudioSink implementation.
// Called on the AudioInputDevice worker thread.
- virtual int CaptureData(const std::vector<int>& channels,
- const int16* audio_data,
- int sample_rate,
- int number_of_channels,
- int number_of_frames,
- int audio_delay_milliseconds,
- int current_volume,
- bool need_audio_processing,
- bool key_pressed) OVERRIDE;
+ virtual int OnData(const int16* audio_data,
+ int sample_rate,
+ int number_of_channels,
+ int number_of_frames,
+ const std::vector<int>& channels,
+ int audio_delay_milliseconds,
+ int current_volume,
+ bool need_audio_processing,
+ bool key_pressed) OVERRIDE;
- // Called on the main render thread.
- virtual void SetCaptureFormat(const media::AudioParameters& params) OVERRIDE;
+ // Called on the AudioInputDevice worker thread.
+ virtual void OnSetFormat(const media::AudioParameters& params) OVERRIDE;
// WebRtcAudioRendererSource implementation.
diff --git a/content/renderer/media/webrtc_audio_device_unittest.cc b/content/renderer/media/webrtc_audio_device_unittest.cc
index 6dcdda7..a79176e 100644
--- a/content/renderer/media/webrtc_audio_device_unittest.cc
+++ b/content/renderer/media/webrtc_audio_device_unittest.cc
@@ -135,7 +135,7 @@ bool CreateAndInitializeCapturer(WebRtcAudioDeviceImpl* webrtc_audio_device) {
// Also, connect the sink to the audio track.
scoped_refptr<WebRtcLocalAudioTrack>
CreateAndStartLocalAudioTrack(WebRtcAudioCapturer* capturer,
- WebRtcAudioCapturerSink* sink) {
+ PeerConnectionAudioSink* sink) {
scoped_refptr<WebRtcLocalAudioTrack> local_audio_track(
WebRtcLocalAudioTrack::Create(std::string(), capturer, NULL, NULL, NULL));
local_audio_track->AddSink(sink);
@@ -205,37 +205,38 @@ class WebRTCMediaProcessImpl : public webrtc::VoEMediaProcess {
DISALLOW_COPY_AND_ASSIGN(WebRTCMediaProcessImpl);
};
-class MockWebRtcAudioCapturerSink : public WebRtcAudioCapturerSink {
+// TODO(xians): Use MediaStreamAudioSink.
+class MockMediaStreamAudioSink : public PeerConnectionAudioSink {
public:
- explicit MockWebRtcAudioCapturerSink(base::WaitableEvent* event)
+ explicit MockMediaStreamAudioSink(base::WaitableEvent* event)
: event_(event) {
DCHECK(event_);
}
- virtual ~MockWebRtcAudioCapturerSink() {}
-
- // WebRtcAudioCapturerSink implementation.
- virtual int CaptureData(const std::vector<int>& channels,
- const int16* audio_data,
- int sample_rate,
- int number_of_channels,
- int number_of_frames,
- int audio_delay_milliseconds,
- int current_volume,
- bool need_audio_processing,
- bool key_pressed) OVERRIDE {
+ virtual ~MockMediaStreamAudioSink() {}
+
+ // PeerConnectionAudioSink implementation.
+ virtual int OnData(const int16* audio_data,
+ int sample_rate,
+ int number_of_channels,
+ int number_of_frames,
+ const std::vector<int>& channels,
+ int audio_delay_milliseconds,
+ int current_volume,
+ bool need_audio_processing,
+ bool key_pressed) OVERRIDE {
// Signal that a callback has been received.
event_->Signal();
return 0;
}
// Set the format for the capture audio parameters.
- virtual void SetCaptureFormat(
+ virtual void OnSetFormat(
const media::AudioParameters& params) OVERRIDE {}
private:
base::WaitableEvent* event_;
- DISALLOW_COPY_AND_ASSIGN(MockWebRtcAudioCapturerSink);
+ DISALLOW_COPY_AND_ASSIGN(MockMediaStreamAudioSink);
};
class MockWebRtcAudioRendererSource : public WebRtcAudioRendererSource {
@@ -329,13 +330,13 @@ int RunWebRtcLoopbackTimeTest(media::AudioManager* manager,
int err = base->Init(webrtc_audio_device.get());
EXPECT_EQ(0, err);
- // We use SetCaptureFormat() and SetRenderFormat() to configure the audio
+ // We use OnSetFormat() and SetRenderFormat() to configure the audio
// parameters so that this test can run on machine without hardware device.
const media::AudioParameters params = media::AudioParameters(
media::AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO,
48000, 2, 480);
- WebRtcAudioCapturerSink* capturer_sink =
- static_cast<WebRtcAudioCapturerSink*>(webrtc_audio_device.get());
+ PeerConnectionAudioSink* capturer_sink =
+ static_cast<PeerConnectionAudioSink*>(webrtc_audio_device.get());
WebRtcAudioRendererSource* renderer_source =
static_cast<WebRtcAudioRendererSource*>(webrtc_audio_device.get());
renderer_source->SetRenderFormat(params);
@@ -379,12 +380,12 @@ int RunWebRtcLoopbackTimeTest(media::AudioManager* manager,
voe_channels.push_back(channel);
for (int j = 0; j < kNumberOfPacketsForLoopbackTest; ++j) {
// Sending fake capture data to WebRtc.
- capturer_sink->CaptureData(
- voe_channels,
+ capturer_sink->OnData(
reinterpret_cast<int16*>(capture_data.get() + input_packet_size * j),
params.sample_rate(),
params.channels(),
params.frames_per_buffer(),
+ voe_channels,
kHardwareLatencyInMs,
1.0,
enable_apm,
@@ -860,8 +861,8 @@ TEST_F(MAYBE_WebRTCAudioDeviceTest, DISABLED_WebRtcRecordingSetupTime) {
EXPECT_TRUE(CreateAndInitializeCapturer(webrtc_audio_device.get()));
base::WaitableEvent event(false, false);
- scoped_ptr<MockWebRtcAudioCapturerSink> sink(
- new MockWebRtcAudioCapturerSink(&event));
+ scoped_ptr<MockMediaStreamAudioSink> sink(
+ new MockMediaStreamAudioSink(&event));
// Create and start a local audio track. Starting the audio track will connect
// the audio track to the capturer and also start the source of the capturer.
diff --git a/content/renderer/media/webrtc_local_audio_renderer.cc b/content/renderer/media/webrtc_local_audio_renderer.cc
index 017f632..6a92d90 100644
--- a/content/renderer/media/webrtc_local_audio_renderer.cc
+++ b/content/renderer/media/webrtc_local_audio_renderer.cc
@@ -56,21 +56,16 @@ void WebRtcLocalAudioRenderer::OnRenderError() {
NOTIMPLEMENTED();
}
-// content::WebRtcAudioCapturerSink implementation
-int WebRtcLocalAudioRenderer::CaptureData(const std::vector<int>& channels,
- const int16* audio_data,
- int sample_rate,
- int number_of_channels,
- int number_of_frames,
- int audio_delay_milliseconds,
- int current_volume,
- bool need_audio_processing,
- bool key_pressed) {
+// content::MediaStreamAudioSink implementation
+void WebRtcLocalAudioRenderer::OnData(const int16* audio_data,
+ int sample_rate,
+ int number_of_channels,
+ int number_of_frames) {
DCHECK(capture_thread_checker_.CalledOnValidThread());
TRACE_EVENT0("audio", "WebRtcLocalAudioRenderer::CaptureData");
base::AutoLock auto_lock(thread_lock_);
if (!playing_ || !volume_ || !loopback_fifo_)
- return 0;
+ return;
// Push captured audio to FIFO so it can be read by a local sink.
if (loopback_fifo_->frames() + number_of_frames <=
@@ -88,13 +83,11 @@ int WebRtcLocalAudioRenderer::CaptureData(const std::vector<int>& channels,
} else {
DVLOG(1) << "FIFO is full";
}
-
- return 0;
}
-void WebRtcLocalAudioRenderer::SetCaptureFormat(
+void WebRtcLocalAudioRenderer::OnSetFormat(
const media::AudioParameters& params) {
- DVLOG(1) << "WebRtcLocalAudioRenderer::SetCaptureFormat()";
+ DVLOG(1) << "WebRtcLocalAudioRenderer::OnSetFormat()";
// If the source is restarted, we might have changed to another capture
// thread.
capture_thread_checker_.DetachFromThread();
@@ -148,7 +141,7 @@ void WebRtcLocalAudioRenderer::SetCaptureFormat(
// WebRtcLocalAudioRenderer::WebRtcLocalAudioRenderer implementation.
WebRtcLocalAudioRenderer::WebRtcLocalAudioRenderer(
- WebRtcLocalAudioTrack* audio_track,
+ const blink::WebMediaStreamTrack& audio_track,
int source_render_view_id,
int session_id,
int frames_per_buffer)
@@ -160,7 +153,6 @@ WebRtcLocalAudioRenderer::WebRtcLocalAudioRenderer(
frames_per_buffer_(frames_per_buffer),
volume_(0.0),
sink_started_(false) {
- DCHECK(audio_track);
DVLOG(1) << "WebRtcLocalAudioRenderer::WebRtcLocalAudioRenderer()";
}
@@ -174,11 +166,8 @@ void WebRtcLocalAudioRenderer::Start() {
DVLOG(1) << "WebRtcLocalAudioRenderer::Start()";
DCHECK(message_loop_->BelongsToCurrentThread());
- if (!audio_track_)
- return; // Stop() has been called, so never start again.
-
// We get audio data from |audio_track_|...
- audio_track_->AddSink(this);
+ MediaStreamAudioSink::AddToAudioTrack(this, audio_track_);
// ...and |sink_| will get audio data from us.
DCHECK(!sink_.get());
sink_ = AudioDeviceFactory::NewOutputDevice(source_render_view_id_);
@@ -213,12 +202,7 @@ void WebRtcLocalAudioRenderer::Stop() {
sink_started_ = false;
// Ensure that the capturer stops feeding us with captured audio.
- // Note that, we do not stop the capturer here since it may still be used by
- // the WebRTC ADM.
- if (audio_track_) {
- audio_track_->RemoveSink(this);
- audio_track_ = NULL;
- }
+ MediaStreamAudioSink::RemoveFromAudioTrack(this, audio_track_);
}
void WebRtcLocalAudioRenderer::Play() {
diff --git a/content/renderer/media/webrtc_local_audio_renderer.h b/content/renderer/media/webrtc_local_audio_renderer.h
index 8e8c96d..e9871ec 100644
--- a/content/renderer/media/webrtc_local_audio_renderer.h
+++ b/content/renderer/media/webrtc_local_audio_renderer.h
@@ -13,9 +13,11 @@
#include "base/synchronization/lock.h"
#include "base/threading/thread_checker.h"
#include "content/common/content_export.h"
+#include "content/public/renderer/media_stream_audio_sink.h"
#include "content/renderer/media/media_stream_audio_renderer.h"
#include "content/renderer/media/webrtc_audio_device_impl.h"
#include "content/renderer/media/webrtc_local_audio_track.h"
+#include "third_party/WebKit/public/platform/WebMediaStreamTrack.h"
namespace media {
class AudioBus;
@@ -34,7 +36,7 @@ class WebRtcAudioCapturer;
// It also implements media::AudioRendererSink::RenderCallback to render audio
// data provided from a WebRtcLocalAudioTrack source.
// When the audio layer in the browser process asks for data to render, this
-// class provides the data by implementing the WebRtcAudioCapturerSink
+// class provides the data by implementing the MediaStreamAudioSink
// interface, i.e., we are a sink seen from the WebRtcAudioCapturer perspective.
// TODO(henrika): improve by using similar principles as in RTCVideoRenderer
// which register itself to the video track when the provider is started and
@@ -42,13 +44,13 @@ class WebRtcAudioCapturer;
// Tracking this at http://crbug.com/164813.
class CONTENT_EXPORT WebRtcLocalAudioRenderer
: NON_EXPORTED_BASE(public MediaStreamAudioRenderer),
- NON_EXPORTED_BASE(public media::AudioRendererSink::RenderCallback),
- NON_EXPORTED_BASE(public WebRtcAudioCapturerSink) {
+ NON_EXPORTED_BASE(public MediaStreamAudioSink),
+ NON_EXPORTED_BASE(public media::AudioRendererSink::RenderCallback) {
public:
// Creates a local renderer and registers a capturing |source| object.
// The |source| is owned by the WebRtcAudioDeviceImpl.
// Called on the main thread.
- WebRtcLocalAudioRenderer(WebRtcLocalAudioTrack* audio_track,
+ WebRtcLocalAudioRenderer(const blink::WebMediaStreamTrack& audio_track,
int source_render_view_id,
int session_id,
int frames_per_buffer);
@@ -71,21 +73,16 @@ class CONTENT_EXPORT WebRtcLocalAudioRenderer
virtual ~WebRtcLocalAudioRenderer();
private:
- // WebRtcAudioCapturerSink implementation.
+ // MediaStreamAudioSink implementation.
// Called on the AudioInputDevice worker thread.
- virtual int CaptureData(const std::vector<int>& channels,
- const int16* audio_data,
- int sample_rate,
- int number_of_channels,
- int number_of_frames,
- int audio_delay_milliseconds,
- int current_volume,
- bool need_audio_processing,
- bool key_pressed) OVERRIDE;
-
- // Can be called on different user thread.
- virtual void SetCaptureFormat(const media::AudioParameters& params) OVERRIDE;
+ virtual void OnData(const int16* audio_data,
+ int sample_rate,
+ int number_of_channels,
+ int number_of_frames) OVERRIDE;
+
+ // Called on the AudioInputDevice worker thread.
+ virtual void OnSetFormat(const media::AudioParameters& params) OVERRIDE;
// media::AudioRendererSink::RenderCallback implementation.
// Render() is called on the AudioOutputDevice thread and OnRenderError()
@@ -108,8 +105,10 @@ class CONTENT_EXPORT WebRtcLocalAudioRenderer
// instance like a selected microphone and forwards the recorded data to its
// sinks. The recorded data is stored in a FIFO and consumed
// by this class when the sink asks for new data.
- // The WebRtcAudioCapturer is today created by WebRtcAudioDeviceImpl.
- scoped_refptr<WebRtcLocalAudioTrack> audio_track_;
+ // This class is calling MediaStreamAudioSink::AddToAudioTrack() and
+ // MediaStreamAudioSink::RemoveFromAudioTrack() to connect and disconnect
+ // with the audio track.
+ blink::WebMediaStreamTrack audio_track_;
// The render view in which the audio is rendered into |sink_|.
const int source_render_view_id_;
diff --git a/content/renderer/media/webrtc_local_audio_source_provider.cc b/content/renderer/media/webrtc_local_audio_source_provider.cc
index f18d8c5..bc5f80d 100644
--- a/content/renderer/media/webrtc_local_audio_source_provider.cc
+++ b/content/renderer/media/webrtc_local_audio_source_provider.cc
@@ -43,10 +43,10 @@ WebRtcLocalAudioSourceProvider::~WebRtcLocalAudioSourceProvider() {
audio_converter_->RemoveInput(this);
}
-void WebRtcLocalAudioSourceProvider::SetCaptureFormat(
+void WebRtcLocalAudioSourceProvider::OnSetFormat(
const media::AudioParameters& params) {
// We need detach the thread here because it will be a new capture thread
- // calling SetCaptureFormat() and CaptureData() if the source is restarted.
+ // calling OnSetFormat() and OnData() if the source is restarted.
capture_thread_checker_.DetachFromThread();
DCHECK(capture_thread_checker_.CalledOnValidThread());
DCHECK(params.IsValid());
@@ -68,20 +68,15 @@ void WebRtcLocalAudioSourceProvider::SetCaptureFormat(
params.frames_per_buffer());
}
-int WebRtcLocalAudioSourceProvider::CaptureData(
- const std::vector<int>& channels,
+void WebRtcLocalAudioSourceProvider::OnData(
const int16* audio_data,
int sample_rate,
int number_of_channels,
- int number_of_frames,
- int audio_delay_milliseconds,
- int current_volume,
- bool need_audio_processing,
- bool key_pressed) {
+ int number_of_frames) {
DCHECK(capture_thread_checker_.CalledOnValidThread());
base::AutoLock auto_lock(lock_);
if (!is_enabled_)
- return 0;
+ return;
DCHECK(fifo_.get());
@@ -98,8 +93,6 @@ int WebRtcLocalAudioSourceProvider::CaptureData(
// WebAudio stops consuming data.
DLOG(WARNING) << "Local source provicer FIFO is full" << fifo_->frames();
}
-
- return 0;
}
void WebRtcLocalAudioSourceProvider::setClient(
diff --git a/content/renderer/media/webrtc_local_audio_source_provider.h b/content/renderer/media/webrtc_local_audio_source_provider.h
index 4536a25..eb437fa 100644
--- a/content/renderer/media/webrtc_local_audio_source_provider.h
+++ b/content/renderer/media/webrtc_local_audio_source_provider.h
@@ -5,12 +5,14 @@
#ifndef CONTENT_RENDERER_MEDIA_WEBRTC_LOCAL_AUDIO_SOURCE_PROVIDER_H_
#define CONTENT_RENDERER_MEDIA_WEBRTC_LOCAL_AUDIO_SOURCE_PROVIDER_H_
+#include <vector>
+
#include "base/memory/scoped_ptr.h"
#include "base/synchronization/lock.h"
#include "base/threading/thread_checker.h"
#include "base/time/time.h"
#include "content/common/content_export.h"
-#include "content/renderer/media/webrtc_audio_device_impl.h"
+#include "content/public/renderer/media_stream_audio_sink.h"
#include "media/base/audio_converter.h"
#include "third_party/WebKit/public/platform/WebAudioSourceProvider.h"
#include "third_party/WebKit/public/platform/WebVector.h"
@@ -38,26 +40,21 @@ namespace content {
//
// All calls are protected by a lock.
class CONTENT_EXPORT WebRtcLocalAudioSourceProvider
- : NON_EXPORTED_BASE(public media::AudioConverter::InputCallback),
- NON_EXPORTED_BASE(public blink::WebAudioSourceProvider),
- NON_EXPORTED_BASE(public WebRtcAudioCapturerSink) {
+ : NON_EXPORTED_BASE(public blink::WebAudioSourceProvider),
+ NON_EXPORTED_BASE(public media::AudioConverter::InputCallback),
+ NON_EXPORTED_BASE(public MediaStreamAudioSink) {
public:
static const size_t kWebAudioRenderBufferSize;
WebRtcLocalAudioSourceProvider();
virtual ~WebRtcLocalAudioSourceProvider();
- // WebRtcAudioCapturerSink implementation.
- virtual int CaptureData(const std::vector<int>& channels,
- const int16* audio_data,
- int sample_rate,
- int number_of_channels,
- int number_of_frames,
- int audio_delay_milliseconds,
- int current_volume,
- bool need_audio_processing,
- bool key_pressed) OVERRIDE;
- virtual void SetCaptureFormat(const media::AudioParameters& params) OVERRIDE;
+ // MediaStreamAudioSink implementation.
+ virtual void OnData(const int16* audio_data,
+ int sample_rate,
+ int number_of_channels,
+ int number_of_frames) OVERRIDE;
+ virtual void OnSetFormat(const media::AudioParameters& params) OVERRIDE;
// blink::WebAudioSourceProvider implementation.
virtual void setClient(blink::WebAudioSourceProviderClient* client) OVERRIDE;
diff --git a/content/renderer/media/webrtc_local_audio_source_provider_unittest.cc b/content/renderer/media/webrtc_local_audio_source_provider_unittest.cc
index 03b4991..5b7e852 100644
--- a/content/renderer/media/webrtc_local_audio_source_provider_unittest.cc
+++ b/content/renderer/media/webrtc_local_audio_source_provider_unittest.cc
@@ -25,7 +25,7 @@ class WebRtcLocalAudioSourceProviderTest : public testing::Test {
sink_bus_ = media::AudioBus::Create(sink_params_);
source_provider_.reset(new WebRtcLocalAudioSourceProvider());
source_provider_->SetSinkParamsForTesting(sink_params_);
- source_provider_->SetCaptureFormat(source_params_);
+ source_provider_->OnSetFormat(source_params_);
}
media::AudioParameters source_params_;
@@ -54,13 +54,10 @@ TEST_F(WebRtcLocalAudioSourceProviderTest, VerifyDataFlow) {
std::fill(source_data_.get(), source_data_.get() + length, 1);
// Deliver data to |source_provider_|.
- std::vector<int> voe_channels;
- source_provider_->CaptureData(voe_channels,
- source_data_.get(),
- source_params_.sample_rate(),
- source_params_.channels(),
- source_params_.frames_per_buffer(),
- 0, 0, false, false);
+ source_provider_->OnData(source_data_.get(),
+ source_params_.sample_rate(),
+ source_params_.channels(),
+ source_params_.frames_per_buffer());
// Consume the first packet in the resampler, which contains only zero.
// And the consumption of the data will trigger pulling the real packet from
@@ -77,12 +74,10 @@ TEST_F(WebRtcLocalAudioSourceProviderTest, VerifyDataFlow) {
}
// Prepare the second packet for featching.
- source_provider_->CaptureData(voe_channels,
- source_data_.get(),
- source_params_.sample_rate(),
- source_params_.channels(),
- source_params_.frames_per_buffer(),
- 0, 0, false, false);
+ source_provider_->OnData(source_data_.get(),
+ source_params_.sample_rate(),
+ source_params_.channels(),
+ source_params_.frames_per_buffer());
// Verify the packets.
for (int i = 0; i < source_params_.frames_per_buffer();
diff --git a/content/renderer/media/webrtc_local_audio_track.cc b/content/renderer/media/webrtc_local_audio_track.cc
index ab92477..bedab26 100644
--- a/content/renderer/media/webrtc_local_audio_track.cc
+++ b/content/renderer/media/webrtc_local_audio_track.cc
@@ -4,9 +4,12 @@
#include "content/renderer/media/webrtc_local_audio_track.h"
+#include "content/public/renderer/media_stream_audio_sink.h"
+#include "content/renderer/media/media_stream_audio_sink_owner.h"
+#include "content/renderer/media/media_stream_audio_track_sink.h"
+#include "content/renderer/media/peer_connection_audio_sink_owner.h"
#include "content/renderer/media/webaudio_capturer_source.h"
#include "content/renderer/media/webrtc_audio_capturer.h"
-#include "content/renderer/media/webrtc_audio_capturer_sink_owner.h"
#include "content/renderer/media/webrtc_local_audio_source_provider.h"
#include "media/base/audio_fifo.h"
#include "third_party/libjingle/source/talk/media/base/audiorenderer.h"
@@ -179,7 +182,7 @@ void WebRtcLocalAudioTrack::Capture(media::AudioBus* audio_source,
// |sinks_to_notify_format| is empty.
for (SinkList::const_iterator it = sinks_to_notify_format.begin();
it != sinks_to_notify_format.end(); ++it) {
- (*it)->SetCaptureFormat(buffer_->params());
+ (*it)->OnSetFormat(buffer_->params());
}
// Push the data to the fifo.
@@ -202,15 +205,15 @@ void WebRtcLocalAudioTrack::Capture(media::AudioBus* audio_source,
// detection and should be changed when audio processing is moved from
// WebRTC to the track.
for (SinkList::const_iterator it = sinks.begin(); it != sinks.end(); ++it) {
- int new_volume = (*it)->CaptureData(voe_channels,
- buffer_->buffer(),
- buffer_->params().sample_rate(),
- buffer_->params().channels(),
- buffer_->params().frames_per_buffer(),
- audio_delay_milliseconds,
- current_volume,
- need_audio_processing,
- key_pressed);
+ int new_volume = (*it)->OnData(buffer_->buffer(),
+ buffer_->params().sample_rate(),
+ buffer_->params().channels(),
+ buffer_->params().frames_per_buffer(),
+ voe_channels,
+ audio_delay_milliseconds,
+ current_volume,
+ need_audio_processing,
+ key_pressed);
if (new_volume != 0 && capturer.get()) {
// Feed the new volume to WebRtc while changing the volume on the
// browser.
@@ -221,9 +224,9 @@ void WebRtcLocalAudioTrack::Capture(media::AudioBus* audio_source,
}
}
-void WebRtcLocalAudioTrack::SetCaptureFormat(
+void WebRtcLocalAudioTrack::OnSetFormat(
const media::AudioParameters& params) {
- DVLOG(1) << "WebRtcLocalAudioTrack::SetCaptureFormat()";
+ DVLOG(1) << "WebRtcLocalAudioTrack::OnSetFormat()";
// If the source is restarted, we might have changed to another capture
// thread.
capture_thread_checker_.DetachFromThread();
@@ -275,7 +278,7 @@ std::string WebRtcLocalAudioTrack::kind() const {
return kAudioTrackKind;
}
-void WebRtcLocalAudioTrack::AddSink(WebRtcAudioCapturerSink* sink) {
+void WebRtcLocalAudioTrack::AddSink(MediaStreamAudioSink* sink) {
DCHECK(main_render_thread_checker_.CalledOnValidThread());
DVLOG(1) << "WebRtcLocalAudioTrack::AddSink()";
base::AutoLock auto_lock(lock_);
@@ -283,22 +286,21 @@ void WebRtcLocalAudioTrack::AddSink(WebRtcAudioCapturerSink* sink) {
// Verify that |sink| is not already added to the list.
DCHECK(std::find_if(
sinks_.begin(), sinks_.end(),
- WebRtcAudioCapturerSinkOwner::WrapsSink(sink)) == sinks_.end());
+ MediaStreamAudioTrackSink::WrapsMediaStreamSink(sink)) == sinks_.end());
- // Create (and add to the list) a new WebRtcAudioCapturerSinkOwner which owns
- // the |sink| and delagates all calls to the WebRtcAudioCapturerSink
+ // Create (and add to the list) a new MediaStreamAudioTrackSink which owns
+ // the |sink| and delagates all calls to the MediaStreamAudioSink
// interface.
- scoped_refptr<WebRtcAudioCapturerSinkOwner> sink_owner(
- new WebRtcAudioCapturerSinkOwner(sink));
+ scoped_refptr<MediaStreamAudioTrackSink> sink_owner(
+ new MediaStreamAudioSinkOwner(sink));
sinks_.push_back(sink_owner);
// Also push the |sink_owner| to |sinks_to_notify_format_| so that we will
- // call SetCaptureFormat() on the new sink.
+ // call OnSetFormat() on the new sink.
sinks_to_notify_format_.push_back(sink_owner);
}
-void WebRtcLocalAudioTrack::RemoveSink(
- WebRtcAudioCapturerSink* sink) {
+void WebRtcLocalAudioTrack::RemoveSink(MediaStreamAudioSink* sink) {
DCHECK(main_render_thread_checker_.CalledOnValidThread());
DVLOG(1) << "WebRtcLocalAudioTrack::RemoveSink()";
@@ -308,13 +310,63 @@ void WebRtcLocalAudioTrack::RemoveSink(
// will clear the delegate.
SinkList::iterator it = std::find_if(
sinks_to_notify_format_.begin(), sinks_to_notify_format_.end(),
- WebRtcAudioCapturerSinkOwner::WrapsSink(sink));
+ MediaStreamAudioTrackSink::WrapsMediaStreamSink(sink));
if (it != sinks_to_notify_format_.end())
sinks_to_notify_format_.erase(it);
// Get iterator to the first element for which WrapsSink(sink) returns true.
it = std::find_if(sinks_.begin(), sinks_.end(),
- WebRtcAudioCapturerSinkOwner::WrapsSink(sink));
+ MediaStreamAudioTrackSink::WrapsMediaStreamSink(sink));
+ if (it != sinks_.end()) {
+ // Clear the delegate to ensure that no more capture callbacks will
+ // be sent to this sink. Also avoids a possible crash which can happen
+ // if this method is called while capturing is active.
+ (*it)->Reset();
+ sinks_.erase(it);
+ }
+}
+
+void WebRtcLocalAudioTrack::AddSink(PeerConnectionAudioSink* sink) {
+ DCHECK(main_render_thread_checker_.CalledOnValidThread());
+ DVLOG(1) << "WebRtcLocalAudioTrack::AddSink()";
+ base::AutoLock auto_lock(lock_);
+
+ // Verify that |sink| is not already added to the list.
+ DCHECK(std::find_if(
+ sinks_.begin(), sinks_.end(),
+ MediaStreamAudioTrackSink::WrapsPeerConnectionSink(sink)) ==
+ sinks_.end());
+
+ // Create (and add to the list) a new MediaStreamAudioTrackSink which owns
+ // the |sink| and delagates all calls to the MediaStreamAudioSink
+ // interface.
+ scoped_refptr<MediaStreamAudioTrackSink> sink_owner(
+ new PeerConnectionAudioSinkOwner(sink));
+ sinks_.push_back(sink_owner);
+
+ // Also push the |sink_owner| to |sinks_to_notify_format_| so that we will
+ // call OnSetFormat() on the new sink.
+ sinks_to_notify_format_.push_back(sink_owner);
+}
+
+void WebRtcLocalAudioTrack::RemoveSink(PeerConnectionAudioSink* sink) {
+ DCHECK(main_render_thread_checker_.CalledOnValidThread());
+ DVLOG(1) << "WebRtcLocalAudioTrack::RemoveSink()";
+
+ base::AutoLock auto_lock(lock_);
+ // Remove the item on |tracks_to_notify_format_|.
+ // This has to be done before remove the element in |sinks_| since there it
+ // will clear the delegate.
+ SinkList::iterator it = std::find_if(
+ sinks_to_notify_format_.begin(), sinks_to_notify_format_.end(),
+ MediaStreamAudioTrackSink::WrapsPeerConnectionSink(sink));
+ if (it != sinks_to_notify_format_.end())
+ sinks_to_notify_format_.erase(it);
+
+ // Get iterator to the first element for which WrapsPeerConnectionSink(sink)
+ // returns true.
+ it = std::find_if(sinks_.begin(), sinks_.end(),
+ MediaStreamAudioTrackSink::WrapsPeerConnectionSink(sink));
if (it != sinks_.end()) {
// Clear the delegate to ensure that no more capture callbacks will
// be sent to this sink. Also avoids a possible crash which can happen
diff --git a/content/renderer/media/webrtc_local_audio_track.h b/content/renderer/media/webrtc_local_audio_track.h
index 6952722..b53eab2 100644
--- a/content/renderer/media/webrtc_local_audio_track.h
+++ b/content/renderer/media/webrtc_local_audio_track.h
@@ -10,6 +10,7 @@
#include "base/synchronization/lock.h"
#include "base/threading/thread_checker.h"
+#include "content/renderer/media/media_stream_audio_track_sink.h"
#include "content/renderer/media/webrtc_audio_device_impl.h"
#include "content/renderer/media/webrtc_local_audio_source_provider.h"
#include "third_party/libjingle/source/talk/app/webrtc/mediaconstraintsinterface.h"
@@ -27,12 +28,14 @@ class AudioBus;
namespace content {
+class MediaStreamAudioSink;
+class MediaStreamAudioSinkOwner;
+class PeerConnectionAudioSink;
class WebAudioCapturerSource;
class WebRtcAudioCapturer;
-class WebRtcAudioCapturerSinkOwner;
// A WebRtcLocalAudioTrack instance contains the implementations of
-// MediaStreamTrack and WebRtcAudioCapturerSink.
+// MediaStreamTrack and MediaStreamAudioSink.
// When an instance is created, it will register itself as a track to the
// WebRtcAudioCapturer to get the captured data, and forward the data to
// its |sinks_|. The data flow can be stopped by disabling the audio track.
@@ -48,14 +51,20 @@ class CONTENT_EXPORT WebRtcLocalAudioTrack
webrtc::AudioSourceInterface* track_source,
const webrtc::MediaConstraintsInterface* constraints);
- // Add a sink to the track. This function will trigger a SetCaptureFormat()
+ // Add a sink to the track. This function will trigger a OnSetFormat()
// call on the |sink|.
// Called on the main render thread.
- void AddSink(WebRtcAudioCapturerSink* sink);
+ void AddSink(MediaStreamAudioSink* sink);
// Remove a sink from the track.
// Called on the main render thread.
- void RemoveSink(WebRtcAudioCapturerSink* sink);
+ void RemoveSink(MediaStreamAudioSink* sink);
+
+ // Add/remove PeerConnection sink to/from the track.
+ // TODO(xians): Remove these two methods after PeerConnection can use the
+ // same sink interface as MediaStreamAudioSink.
+ void AddSink(PeerConnectionAudioSink* sink);
+ void RemoveSink(PeerConnectionAudioSink* sink);
// Starts the local audio track. Called on the main render thread and
// should be called only once when audio track is created.
@@ -75,7 +84,7 @@ class CONTENT_EXPORT WebRtcLocalAudioTrack
// Method called by the capturer to set the audio parameters used by source
// of the capture data..
// Call on the capture audio thread.
- void SetCaptureFormat(const media::AudioParameters& params);
+ void OnSetFormat(const media::AudioParameters& params);
blink::WebAudioSourceProvider* audio_source_provider() const {
return source_provider_.get();
@@ -92,7 +101,7 @@ class CONTENT_EXPORT WebRtcLocalAudioTrack
virtual ~WebRtcLocalAudioTrack();
private:
- typedef std::list<scoped_refptr<WebRtcAudioCapturerSinkOwner> > SinkList;
+ typedef std::list<scoped_refptr<MediaStreamAudioTrackSink> > SinkList;
// cricket::AudioCapturer implementation.
virtual void AddChannel(int channel_id) OVERRIDE;
diff --git a/content/renderer/media/webrtc_local_audio_track_unittest.cc b/content/renderer/media/webrtc_local_audio_track_unittest.cc
index aac06d0..f5b668a 100644
--- a/content/renderer/media/webrtc_local_audio_track_unittest.cc
+++ b/content/renderer/media/webrtc_local_audio_track_unittest.cc
@@ -6,6 +6,7 @@
#include "base/test/test_timeouts.h"
#include "content/renderer/media/rtc_media_constraints.h"
#include "content/renderer/media/webrtc_audio_capturer.h"
+#include "content/renderer/media/webrtc_audio_device_impl.h"
#include "content/renderer/media/webrtc_local_audio_source_provider.h"
#include "content/renderer/media/webrtc_local_audio_track.h"
#include "media/audio/audio_parameters.h"
@@ -118,19 +119,20 @@ class MockCapturerSource : public media::AudioCapturerSource {
media::AudioParameters params_;
};
-class MockWebRtcAudioCapturerSink : public WebRtcAudioCapturerSink {
+// TODO(xians): Use MediaStreamAudioSink.
+class MockMediaStreamAudioSink : public PeerConnectionAudioSink {
public:
- MockWebRtcAudioCapturerSink() {}
- ~MockWebRtcAudioCapturerSink() {}
- int CaptureData(const std::vector<int>& channels,
- const int16* audio_data,
- int sample_rate,
- int number_of_channels,
- int number_of_frames,
- int audio_delay_milliseconds,
- int current_volume,
- bool need_audio_processing,
- bool key_pressed) OVERRIDE {
+ MockMediaStreamAudioSink() {}
+ ~MockMediaStreamAudioSink() {}
+ int OnData(const int16* audio_data,
+ int sample_rate,
+ int number_of_channels,
+ int number_of_frames,
+ const std::vector<int>& channels,
+ int audio_delay_milliseconds,
+ int current_volume,
+ bool need_audio_processing,
+ bool key_pressed) OVERRIDE {
CaptureData(channels.size(),
sample_rate,
number_of_channels,
@@ -150,7 +152,7 @@ class MockWebRtcAudioCapturerSink : public WebRtcAudioCapturerSink {
int current_volume,
bool need_audio_processing,
bool key_pressed));
- MOCK_METHOD1(SetCaptureFormat, void(const media::AudioParameters& params));
+ MOCK_METHOD1(OnSetFormat, void(const media::AudioParameters& params));
};
} // namespace
@@ -196,11 +198,10 @@ TEST_F(WebRtcLocalAudioTrackTest, ConnectAndDisconnectOneSink) {
static_cast<webrtc::AudioTrackInterface*>(track.get())->
GetRenderer()->AddChannel(i);
}
- scoped_ptr<MockWebRtcAudioCapturerSink> sink(
- new MockWebRtcAudioCapturerSink());
+ scoped_ptr<MockMediaStreamAudioSink> sink(new MockMediaStreamAudioSink());
const media::AudioParameters params = capturer_->audio_parameters();
base::WaitableEvent event(false, false);
- EXPECT_CALL(*sink, SetCaptureFormat(_)).WillOnce(Return());
+ EXPECT_CALL(*sink, OnSetFormat(_)).WillOnce(Return());
EXPECT_CALL(*sink,
CaptureData(kNumberOfNetworkChannels,
params.sample_rate(),
@@ -240,11 +241,10 @@ TEST_F(WebRtcLocalAudioTrackTest, DISABLED_DisableEnableAudioTrack) {
GetRenderer()->AddChannel(0);
EXPECT_TRUE(track->enabled());
EXPECT_TRUE(track->set_enabled(false));
- scoped_ptr<MockWebRtcAudioCapturerSink> sink(
- new MockWebRtcAudioCapturerSink());
+ scoped_ptr<MockMediaStreamAudioSink> sink(new MockMediaStreamAudioSink());
const media::AudioParameters params = capturer_->audio_parameters();
base::WaitableEvent event(false, false);
- EXPECT_CALL(*sink, SetCaptureFormat(_)).Times(1);
+ EXPECT_CALL(*sink, OnSetFormat(_)).Times(1);
EXPECT_CALL(*sink,
CaptureData(1,
params.sample_rate(),
@@ -293,11 +293,10 @@ TEST_F(WebRtcLocalAudioTrackTest, DISABLED_MultipleAudioTracks) {
static_cast<webrtc::AudioTrackInterface*>(track_1.get())->
GetRenderer()->AddChannel(0);
EXPECT_TRUE(track_1->enabled());
- scoped_ptr<MockWebRtcAudioCapturerSink> sink_1(
- new MockWebRtcAudioCapturerSink());
+ scoped_ptr<MockMediaStreamAudioSink> sink_1(new MockMediaStreamAudioSink());
const media::AudioParameters params = capturer_->audio_parameters();
base::WaitableEvent event_1(false, false);
- EXPECT_CALL(*sink_1, SetCaptureFormat(_)).WillOnce(Return());
+ EXPECT_CALL(*sink_1, OnSetFormat(_)).WillOnce(Return());
EXPECT_CALL(*sink_1,
CaptureData(1,
params.sample_rate(),
@@ -325,9 +324,8 @@ TEST_F(WebRtcLocalAudioTrackTest, DISABLED_MultipleAudioTracks) {
event_1.Reset();
base::WaitableEvent event_2(false, false);
- scoped_ptr<MockWebRtcAudioCapturerSink> sink_2(
- new MockWebRtcAudioCapturerSink());
- EXPECT_CALL(*sink_2, SetCaptureFormat(_)).WillOnce(Return());
+ scoped_ptr<MockMediaStreamAudioSink> sink_2(new MockMediaStreamAudioSink());
+ EXPECT_CALL(*sink_2, OnSetFormat(_)).WillOnce(Return());
EXPECT_CALL(*sink_1,
CaptureData(1,
params.sample_rate(),
@@ -404,10 +402,9 @@ TEST_F(WebRtcLocalAudioTrackTest, StartAndStopAudioTracks) {
EXPECT_TRUE(event.TimedWait(TestTimeouts::tiny_timeout()));
// Verify the data flow by connecting the sink to |track_1|.
- scoped_ptr<MockWebRtcAudioCapturerSink> sink(
- new MockWebRtcAudioCapturerSink());
+ scoped_ptr<MockMediaStreamAudioSink> sink(new MockMediaStreamAudioSink());
event.Reset();
- EXPECT_CALL(*sink, SetCaptureFormat(_)).WillOnce(SignalEvent(&event));
+ EXPECT_CALL(*sink, OnSetFormat(_)).WillOnce(SignalEvent(&event));
EXPECT_CALL(*sink, CaptureData(_, _, _, _, 0, 0, false, false))
.Times(AnyNumber()).WillRepeatedly(Return());
track_1->AddSink(sink.get());
@@ -431,7 +428,7 @@ TEST_F(WebRtcLocalAudioTrackTest, StartAndStopAudioTracks) {
// Adding a new track to the capturer.
track_2->AddSink(sink.get());
- EXPECT_CALL(*sink, SetCaptureFormat(_)).Times(0);
+ EXPECT_CALL(*sink, OnSetFormat(_)).Times(0);
// Stop the capturer again will not trigger stopping the source of the
// capturer again..
@@ -490,14 +487,13 @@ TEST_F(WebRtcLocalAudioTrackTest, ConnectTracksToDifferentCapturers) {
GetRenderer()->AddChannel(i);
}
// Verify the data flow by connecting the |sink_1| to |track_1|.
- scoped_ptr<MockWebRtcAudioCapturerSink> sink_1(
- new MockWebRtcAudioCapturerSink());
+ scoped_ptr<MockMediaStreamAudioSink> sink_1(new MockMediaStreamAudioSink());
EXPECT_CALL(
*sink_1.get(),
CaptureData(
kNumberOfNetworkChannelsForTrack1, 48000, 2, _, 0, 0, false, false))
.Times(AnyNumber()).WillRepeatedly(Return());
- EXPECT_CALL(*sink_1.get(), SetCaptureFormat(_)).Times(AnyNumber());
+ EXPECT_CALL(*sink_1.get(), OnSetFormat(_)).Times(AnyNumber());
track_1->AddSink(sink_1.get());
// Create a new capturer with new source with different audio format.
@@ -527,15 +523,14 @@ TEST_F(WebRtcLocalAudioTrackTest, ConnectTracksToDifferentCapturers) {
GetRenderer()->AddChannel(i);
}
// Verify the data flow by connecting the |sink_2| to |track_2|.
- scoped_ptr<MockWebRtcAudioCapturerSink> sink_2(
- new MockWebRtcAudioCapturerSink());
+ scoped_ptr<MockMediaStreamAudioSink> sink_2(new MockMediaStreamAudioSink());
base::WaitableEvent event(false, false);
EXPECT_CALL(
*sink_2,
CaptureData(
kNumberOfNetworkChannelsForTrack2, 44100, 1, _, 0, 0, false, false))
.Times(AnyNumber()).WillRepeatedly(Return());
- EXPECT_CALL(*sink_2, SetCaptureFormat(_)).WillOnce(SignalEvent(&event));
+ EXPECT_CALL(*sink_2, OnSetFormat(_)).WillOnce(SignalEvent(&event));
track_2->AddSink(sink_2.get());
EXPECT_TRUE(event.TimedWait(TestTimeouts::tiny_timeout()));
@@ -583,10 +578,9 @@ TEST_F(WebRtcLocalAudioTrackTest, TrackWorkWithSmallBufferSize) {
track->Start();
// Verify the data flow by connecting the |sink| to |track|.
- scoped_ptr<MockWebRtcAudioCapturerSink> sink(
- new MockWebRtcAudioCapturerSink());
+ scoped_ptr<MockMediaStreamAudioSink> sink(new MockMediaStreamAudioSink());
base::WaitableEvent event(false, false);
- EXPECT_CALL(*sink, SetCaptureFormat(_)).Times(1);
+ EXPECT_CALL(*sink, OnSetFormat(_)).Times(1);
// Verify the sinks are getting the packets with an expecting buffer size.
#if defined(OS_ANDROID)
const int expected_buffer_size = params.sample_rate() / 100;