summaryrefslogtreecommitdiffstats
path: root/content
diff options
context:
space:
mode:
authorgrunell <grunell@chromium.org>2014-09-30 01:53:19 -0700
committerCommit bot <commit-bot@chromium.org>2014-09-30 08:55:03 +0000
commite372e4f4bbcb94a6ff427b337845ad452133e527 (patch)
tree5b161bb750134700efc29e509bbefa39edb14a0b /content
parent4388931142e7a999ad6a0820afde47810e050e68 (diff)
downloadchromium_src-e372e4f4bbcb94a6ff427b337845ad452133e527.zip
chromium_src-e372e4f4bbcb94a6ff427b337845ad452133e527.tar.gz
chromium_src-e372e4f4bbcb94a6ff427b337845ad452133e527.tar.bz2
Strip away the keyboard mic channel if no audio processing.
This will currently never be exercised, but is required when we support multiple MediaStreamAudioProcessors with different constraints. TEST=Since we don't support this yet, I have tested with a custom build hard coded to not create |audio_processor_| in MSAP (to simulate custom constraint settings that would cause no audio processing to happen). BUG=345296 Review URL: https://codereview.chromium.org/594233002 Cr-Commit-Position: refs/heads/master@{#297386}
Diffstat (limited to 'content')
-rw-r--r--content/renderer/media/media_stream_audio_processor.cc62
-rw-r--r--content/renderer/media/media_stream_audio_processor_unittest.cc80
2 files changed, 131 insertions, 11 deletions
diff --git a/content/renderer/media/media_stream_audio_processor.cc b/content/renderer/media/media_stream_audio_processor.cc
index 4efc507..65c668a 100644
--- a/content/renderer/media/media_stream_audio_processor.cc
+++ b/content/renderer/media/media_stream_audio_processor.cc
@@ -49,6 +49,7 @@ AudioProcessing::ChannelLayout MapLayout(media::ChannelLayout media_layout) {
}
}
+// This is only used for playout data where only max two channels is supported.
AudioProcessing::ChannelLayout ChannelsToLayout(int num_channels) {
switch (num_channels) {
case 1:
@@ -113,20 +114,33 @@ class MediaStreamAudioBus {
// Wraps AudioFifo to provide a cleaner interface to MediaStreamAudioProcessor.
// It avoids the FIFO when the source and destination frames match. All methods
-// are called on one of the capture or render audio threads exclusively.
+// are called on one of the capture or render audio threads exclusively. If
+// |source_channels| is larger than |destination_channels|, only the first
+// |destination_channels| are kept from the source.
class MediaStreamAudioFifo {
public:
- MediaStreamAudioFifo(int channels, int source_frames,
+ MediaStreamAudioFifo(int source_channels,
+ int destination_channels,
+ int source_frames,
int destination_frames)
- : source_frames_(source_frames),
- destination_(new MediaStreamAudioBus(channels, destination_frames)),
+ : source_channels_(source_channels),
+ source_frames_(source_frames),
+ destination_(
+ new MediaStreamAudioBus(destination_channels, destination_frames)),
data_available_(false) {
+ DCHECK_GE(source_channels, destination_channels);
+
+ if (source_channels > destination_channels) {
+ audio_source_intermediate_ =
+ media::AudioBus::CreateWrapper(destination_channels);
+ }
+
if (source_frames != destination_frames) {
// Since we require every Push to be followed by as many Consumes as
// possible, twice the larger of the two is a (probably) loose upper bound
// on the FIFO size.
const int fifo_frames = 2 * std::max(source_frames, destination_frames);
- fifo_.reset(new media::AudioFifo(channels, fifo_frames));
+ fifo_.reset(new media::AudioFifo(destination_channels, fifo_frames));
}
// May be created in the main render thread and used in the audio threads.
@@ -135,13 +149,25 @@ class MediaStreamAudioFifo {
void Push(const media::AudioBus* source) {
DCHECK(thread_checker_.CalledOnValidThread());
- DCHECK_EQ(source->channels(), destination_->bus()->channels());
+ DCHECK_EQ(source->channels(), source_channels_);
DCHECK_EQ(source->frames(), source_frames_);
+ const media::AudioBus* source_to_push = source;
+
+ if (audio_source_intermediate_) {
+ for (int i = 0; i < destination_->bus()->channels(); ++i) {
+ audio_source_intermediate_->SetChannelData(
+ i,
+ const_cast<float*>(source->channel(i)));
+ }
+ audio_source_intermediate_->set_frames(source->frames());
+ source_to_push = audio_source_intermediate_.get();
+ }
+
if (fifo_) {
- fifo_->Push(source);
+ fifo_->Push(source_to_push);
} else {
- source->CopyTo(destination_->bus());
+ source_to_push->CopyTo(destination_->bus());
data_available_ = true;
}
}
@@ -170,7 +196,9 @@ class MediaStreamAudioFifo {
private:
base::ThreadChecker thread_checker_;
+ const int source_channels_; // For a DCHECK.
const int source_frames_; // For a DCHECK.
+ scoped_ptr<media::AudioBus> audio_source_intermediate_;
scoped_ptr<MediaStreamAudioBus> destination_;
scoped_ptr<media::AudioFifo> fifo_;
// Only used when the FIFO is disabled;
@@ -465,10 +493,24 @@ void MediaStreamAudioProcessor::InitializeCaptureFifo(
// format it would prefer.
const int output_sample_rate = audio_processing_ ?
kAudioProcessingSampleRate : input_format.sample_rate();
- const media::ChannelLayout output_channel_layout = audio_processing_ ?
+ media::ChannelLayout output_channel_layout = audio_processing_ ?
media::GuessChannelLayout(kAudioProcessingNumberOfChannels) :
input_format.channel_layout();
+ // The output channels from the fifo is normally the same as input.
+ int fifo_output_channels = input_format.channels();
+
+ // Special case for if we have a keyboard mic channel on the input and no
+ // audio processing is used. We will then have the fifo strip away that
+ // channel. So we use stereo as output layout, and also change the output
+ // channels for the fifo.
+ if (input_format.channel_layout() ==
+ media::CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC &&
+ !audio_processing_) {
+ output_channel_layout = media::CHANNEL_LAYOUT_STEREO;
+ fifo_output_channels = ChannelLayoutToChannelCount(output_channel_layout);
+ }
+
// webrtc::AudioProcessing requires a 10 ms chunk size. We use this native
// size when processing is enabled. When disabled we use the same size as
// the source if less than 10 ms.
@@ -495,6 +537,7 @@ void MediaStreamAudioProcessor::InitializeCaptureFifo(
capture_fifo_.reset(
new MediaStreamAudioFifo(input_format.channels(),
+ fifo_output_channels,
input_format.frames_per_buffer(),
processing_frames));
@@ -527,6 +570,7 @@ void MediaStreamAudioProcessor::InitializeRenderFifoIfNeeded(
const int analysis_frames = sample_rate / 100; // 10 ms chunks.
render_fifo_.reset(
new MediaStreamAudioFifo(number_of_channels,
+ number_of_channels,
frames_per_buffer,
analysis_frames));
}
diff --git a/content/renderer/media/media_stream_audio_processor_unittest.cc b/content/renderer/media/media_stream_audio_processor_unittest.cc
index dd8aa6f..91d7b32 100644
--- a/content/renderer/media/media_stream_audio_processor_unittest.cc
+++ b/content/renderer/media/media_stream_audio_processor_unittest.cc
@@ -40,6 +40,8 @@ const int kAudioProcessingNumberOfChannel = 1;
// The number of packers used for testing.
const int kNumberOfPacketsForTest = 100;
+const int kMaxNumberOfPlayoutDataChannels = 2;
+
void ReadDataFromSpeechFile(char* data, int length) {
base::FilePath file;
CHECK(PathService::Get(base::DIR_SOURCE_ROOT, &file));
@@ -79,6 +81,19 @@ class MediaStreamAudioProcessorTest : public ::testing::Test {
const int16* data_ptr = reinterpret_cast<const int16*>(capture_data.get());
scoped_ptr<media::AudioBus> data_bus = media::AudioBus::Create(
params.channels(), params.frames_per_buffer());
+
+ // |data_bus_playout| is used if the number of capture channels is larger
+ // that max allowed playout channels. |data_bus_playout_to_use| points to
+ // the AudioBus to use, either |data_bus| or |data_bus_playout|.
+ scoped_ptr<media::AudioBus> data_bus_playout;
+ media::AudioBus* data_bus_playout_to_use = data_bus.get();
+ if (params.channels() > kMaxNumberOfPlayoutDataChannels) {
+ data_bus_playout =
+ media::AudioBus::CreateWrapper(kMaxNumberOfPlayoutDataChannels);
+ data_bus_playout->set_frames(params.frames_per_buffer());
+ data_bus_playout_to_use = data_bus_playout.get();
+ }
+
for (int i = 0; i < kNumberOfPacketsForTest; ++i) {
data_bus->FromInterleaved(data_ptr, data_bus->frames(), 2);
audio_processor->PushCaptureData(data_bus.get());
@@ -94,8 +109,14 @@ class MediaStreamAudioProcessorTest : public ::testing::Test {
const bool is_aec_enabled = ap && ap->echo_cancellation()->is_enabled();
#endif
if (is_aec_enabled) {
- audio_processor->OnPlayoutData(data_bus.get(), params.sample_rate(),
- 10);
+ if (params.channels() > kMaxNumberOfPlayoutDataChannels) {
+ for (int i = 0; i < kMaxNumberOfPlayoutDataChannels; ++i) {
+ data_bus_playout->SetChannelData(
+ i, const_cast<float*>(data_bus->channel(i)));
+ }
+ }
+ audio_processor->OnPlayoutData(data_bus_playout_to_use,
+ params.sample_rate(), 10);
}
int16* output = NULL;
@@ -469,4 +490,59 @@ TEST_F(MediaStreamAudioProcessorTest, TestStereoAudio) {
audio_processor = NULL;
}
+TEST_F(MediaStreamAudioProcessorTest, TestWithKeyboardMicChannel) {
+ MockMediaConstraintFactory constraint_factory;
+ constraint_factory.AddMandatory(
+ MediaAudioConstraints::kGoogExperimentalNoiseSuppression, true);
+ scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
+ new WebRtcAudioDeviceImpl());
+ scoped_refptr<MediaStreamAudioProcessor> audio_processor(
+ new rtc::RefCountedObject<MediaStreamAudioProcessor>(
+ constraint_factory.CreateWebMediaConstraints(), 0,
+ webrtc_audio_device.get()));
+ EXPECT_TRUE(audio_processor->has_audio_processing());
+
+ media::AudioParameters params(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ media::CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC,
+ 48000, 16, 512);
+ audio_processor->OnCaptureFormatChanged(params);
+
+ ProcessDataAndVerifyFormat(audio_processor.get(),
+ kAudioProcessingSampleRate,
+ kAudioProcessingNumberOfChannel,
+ kAudioProcessingSampleRate / 100);
+ // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives
+ // |audio_processor|.
+ audio_processor = NULL;
+}
+
+TEST_F(MediaStreamAudioProcessorTest,
+ TestWithKeyboardMicChannelWithoutProcessing) {
+ // Setup the audio processor with disabled flag on.
+ CommandLine::ForCurrentProcess()->AppendSwitch(
+ switches::kDisableAudioTrackProcessing);
+ MockMediaConstraintFactory constraint_factory;
+ scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
+ new WebRtcAudioDeviceImpl());
+ scoped_refptr<MediaStreamAudioProcessor> audio_processor(
+ new rtc::RefCountedObject<MediaStreamAudioProcessor>(
+ constraint_factory.CreateWebMediaConstraints(), 0,
+ webrtc_audio_device.get()));
+ EXPECT_FALSE(audio_processor->has_audio_processing());
+
+ media::AudioParameters params(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
+ media::CHANNEL_LAYOUT_STEREO_AND_KEYBOARD_MIC,
+ 48000, 16, 512);
+ audio_processor->OnCaptureFormatChanged(params);
+
+ ProcessDataAndVerifyFormat(
+ audio_processor.get(),
+ params.sample_rate(),
+ media::ChannelLayoutToChannelCount(media::CHANNEL_LAYOUT_STEREO),
+ params.sample_rate() / 100);
+ // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives
+ // |audio_processor|.
+ audio_processor = NULL;
+}
+
} // namespace content