summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorxians@chromium.org <xians@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2014-07-02 11:49:26 +0000
committerxians@chromium.org <xians@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2014-07-02 11:49:26 +0000
commiteb4234f1b6431b6bdd7f219fedd70754c33c7bce (patch)
tree5b20c56f54da390c7bc7ddf14d5916f57ccd867f
parent3958f458b8e838f272d0b8215d70c399225d229f (diff)
downloadchromium_src-eb4234f1b6431b6bdd7f219fedd70754c33c7bce.zip
chromium_src-eb4234f1b6431b6bdd7f219fedd70754c33c7bce.tar.gz
chromium_src-eb4234f1b6431b6bdd7f219fedd70754c33c7bce.tar.bz2
Revert 280896 "Fixed the audio mirroring problem introduced by r..."
The CL broke the memory.fyi bot. http://build.chromium.org/p/chromium.memory.fyi/builders/Linux%20Tests%20%28valgrind%29%281%29/builds/34976/steps/memory%20test%3A%20content/logs/stdio > Fixed the audio mirroring problem introduced by r278345 ( https://codereview.chromium.org/344583002) > > The problem is that: > Before r278345, we copied the data to a audio bus, then we do the channel swapping on that audio bus. > With r278345, we simply wrapped the shared memory to the audio bus in AudioInputDevice, and if MediaStreamAudioProcessor swaps the channels of the audio bus, it will flip the channels in each callback, which breaks the channel swapping. > > BUG=390439 > TEST=try stereo mode using webrtc > > Review URL: https://codereview.chromium.org/352943003 TBR=xians@chromium.org Review URL: https://codereview.chromium.org/367903006 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@281002 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r--content/renderer/media/media_stream_audio_processor.cc28
-rw-r--r--content/renderer/media/media_stream_audio_processor.h2
-rw-r--r--content/renderer/media/media_stream_audio_processor_unittest.cc66
-rw-r--r--content/renderer/media/webrtc_audio_capturer.cc2
-rw-r--r--content/renderer/media/webrtc_audio_capturer.h2
-rw-r--r--media/base/audio_capturer_source.h2
6 files changed, 17 insertions, 85 deletions
diff --git a/content/renderer/media/media_stream_audio_processor.cc b/content/renderer/media/media_stream_audio_processor.cc
index 2e7a40d..d2e0695 100644
--- a/content/renderer/media/media_stream_audio_processor.cc
+++ b/content/renderer/media/media_stream_audio_processor.cc
@@ -86,7 +86,7 @@ class MediaStreamAudioProcessor::MediaStreamAudioConverter
audio_converter_.RemoveInput(this);
}
- void Push(const media::AudioBus* audio_source) {
+ void Push(media::AudioBus* audio_source) {
// Called on the audio thread, which is the capture audio thread for
// |MediaStreamAudioProcessor::capture_converter_|, and render audio thread
// for |MediaStreamAudioProcessor::render_converter_|.
@@ -95,7 +95,7 @@ class MediaStreamAudioProcessor::MediaStreamAudioConverter
fifo_->Push(audio_source);
}
- bool Convert(webrtc::AudioFrame* out, bool audio_mirroring) {
+ bool Convert(webrtc::AudioFrame* out) {
// Called on the audio thread, which is the capture audio thread for
// |MediaStreamAudioProcessor::capture_converter_|, and render audio thread
// for |MediaStreamAudioProcessor::render_converter_|.
@@ -110,18 +110,10 @@ class MediaStreamAudioProcessor::MediaStreamAudioConverter
// Convert data to the output format, this will trigger ProvideInput().
audio_converter_.Convert(audio_wrapper_.get());
- DCHECK_EQ(audio_wrapper_->frames(), sink_params_.frames_per_buffer());
-
- // Swap channels before interleaving the data if |audio_mirroring| is
- // set to true.
- if (audio_mirroring &&
- sink_params_.channel_layout() == media::CHANNEL_LAYOUT_STEREO) {
- // Swap the first and second channels.
- audio_wrapper_->SwapChannels(0, 1);
- }
// TODO(xians): Figure out a better way to handle the interleaved and
// deinterleaved format switching.
+ DCHECK_EQ(audio_wrapper_->frames(), sink_params_.frames_per_buffer());
audio_wrapper_->ToInterleaved(audio_wrapper_->frames(),
sink_params_.bits_per_sample() / 8,
out->data_);
@@ -214,14 +206,20 @@ void MediaStreamAudioProcessor::OnCaptureFormatChanged(
capture_thread_checker_.DetachFromThread();
}
-void MediaStreamAudioProcessor::PushCaptureData(
- const media::AudioBus* audio_source) {
+void MediaStreamAudioProcessor::PushCaptureData(media::AudioBus* audio_source) {
DCHECK(capture_thread_checker_.CalledOnValidThread());
DCHECK_EQ(audio_source->channels(),
capture_converter_->source_parameters().channels());
DCHECK_EQ(audio_source->frames(),
capture_converter_->source_parameters().frames_per_buffer());
+ if (audio_mirroring_ &&
+ capture_converter_->source_parameters().channel_layout() ==
+ media::CHANNEL_LAYOUT_STEREO) {
+ // Swap the first and second channels.
+ audio_source->SwapChannels(0, 1);
+ }
+
capture_converter_->Push(audio_source);
}
@@ -231,7 +229,7 @@ bool MediaStreamAudioProcessor::ProcessAndConsumeData(
DCHECK(capture_thread_checker_.CalledOnValidThread());
TRACE_EVENT0("audio", "MediaStreamAudioProcessor::ProcessAndConsumeData");
- if (!capture_converter_->Convert(&capture_frame_, audio_mirroring_))
+ if (!capture_converter_->Convert(&capture_frame_))
return false;
*new_volume = ProcessData(&capture_frame_, capture_delay, volume,
@@ -312,7 +310,7 @@ void MediaStreamAudioProcessor::OnPlayoutData(media::AudioBus* audio_bus,
audio_bus->frames());
render_converter_->Push(audio_bus);
- while (render_converter_->Convert(&render_frame_, false))
+ while (render_converter_->Convert(&render_frame_))
audio_processing_->AnalyzeReverseStream(&render_frame_);
}
diff --git a/content/renderer/media/media_stream_audio_processor.h b/content/renderer/media/media_stream_audio_processor.h
index 8211fcc..08f8ed0 100644
--- a/content/renderer/media/media_stream_audio_processor.h
+++ b/content/renderer/media/media_stream_audio_processor.h
@@ -68,7 +68,7 @@ class CONTENT_EXPORT MediaStreamAudioProcessor :
// Pushes capture data in |audio_source| to the internal FIFO.
// Called on the capture audio thread.
- void PushCaptureData(const media::AudioBus* audio_source);
+ void PushCaptureData(media::AudioBus* audio_source);
// Processes a block of 10 ms data from the internal FIFO and outputs it via
// |out|. |out| is the address of the pointer that will be pointed to
diff --git a/content/renderer/media/media_stream_audio_processor_unittest.cc b/content/renderer/media/media_stream_audio_processor_unittest.cc
index 1b9023b..4d1868b 100644
--- a/content/renderer/media/media_stream_audio_processor_unittest.cc
+++ b/content/renderer/media/media_stream_audio_processor_unittest.cc
@@ -6,7 +6,6 @@
#include "base/file_util.h"
#include "base/files/file_path.h"
#include "base/logging.h"
-#include "base/memory/aligned_memory.h"
#include "base/path_service.h"
#include "base/time/time.h"
#include "content/public/common/content_switches.h"
@@ -407,69 +406,4 @@ TEST_F(MediaStreamAudioProcessorTest, GetAecDumpMessageFilter) {
audio_processor = NULL;
}
-TEST_F(MediaStreamAudioProcessorTest, TestStereoAudio) {
- // Set up the correct constraints to turn off the audio processing and turn
- // on the stereo channels mirroring.
- MockMediaConstraintFactory constraint_factory;
- constraint_factory.AddMandatory(MediaAudioConstraints::kEchoCancellation,
- false);
- constraint_factory.AddMandatory(MediaAudioConstraints::kGoogAudioMirroring,
- true);
- scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
- new WebRtcAudioDeviceImpl());
- scoped_refptr<MediaStreamAudioProcessor> audio_processor(
- new talk_base::RefCountedObject<MediaStreamAudioProcessor>(
- constraint_factory.CreateWebMediaConstraints(), 0,
- webrtc_audio_device.get()));
- EXPECT_FALSE(audio_processor->has_audio_processing());
- const media::AudioParameters source_params(
- media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
- media::CHANNEL_LAYOUT_STEREO, 48000, 16, 480);
- audio_processor->OnCaptureFormatChanged(source_params);
- EXPECT_EQ(audio_processor->OutputFormat().channels(), 2);
-
- // Construct left and right channels, and assign different values to the
- // first data of the left channel and right channel.
- scoped_ptr<float, base::AlignedFreeDeleter> left_channel(
- static_cast<float*>(
- base::AlignedAlloc(source_params.frames_per_buffer() * 2, 32)));
- scoped_ptr<float, base::AlignedFreeDeleter> right_channel(
- static_cast<float*>(
- base::AlignedAlloc(source_params.frames_per_buffer() * 2, 32)));
- scoped_ptr<media::AudioBus> wrapper = media::AudioBus::CreateWrapper(
- source_params.channels());
- float* left_channel_ptr = left_channel.get();
- float* right_channel_ptr = right_channel.get();
- memset(left_channel_ptr, 0, source_params.frames_per_buffer() * 2);
- memset(right_channel_ptr, 0, source_params.frames_per_buffer() * 2);
- left_channel_ptr[0] = 1.0f;
- wrapper->set_frames(source_params.frames_per_buffer());
- wrapper->SetChannelData(0, left_channel.get());
- wrapper->SetChannelData(1, right_channel.get());
-
- // A audio bus used for verifying the output data values.
- scoped_ptr<media::AudioBus> output_bus = media::AudioBus::Create(
- audio_processor->OutputFormat());
-
- // Run the test consecutively to make sure the stereo channels are not
- // flipped back and forth.
- static const int kNumberOfPacketsForTest = 100;
- for (int i = 0; i < kNumberOfPacketsForTest; ++i) {
- audio_processor->PushCaptureData(wrapper.get());
-
- int16* output = NULL;
- int new_volume = 0;
- EXPECT_TRUE(audio_processor->ProcessAndConsumeData(
- base::TimeDelta::FromMilliseconds(0), 0, false, &new_volume, &output));
- output_bus->FromInterleaved(output, output_bus->frames(), 2);
- EXPECT_TRUE(output != NULL);
- EXPECT_EQ(output_bus->channel(0)[0], 0);
- EXPECT_NE(output_bus->channel(1)[0], 0);
- }
-
- // Set |audio_processor| to NULL to make sure |webrtc_audio_device| outlives
- // |audio_processor|.
- audio_processor = NULL;
-}
-
} // namespace content
diff --git a/content/renderer/media/webrtc_audio_capturer.cc b/content/renderer/media/webrtc_audio_capturer.cc
index e47beea..056cb2b 100644
--- a/content/renderer/media/webrtc_audio_capturer.cc
+++ b/content/renderer/media/webrtc_audio_capturer.cc
@@ -448,7 +448,7 @@ int WebRtcAudioCapturer::MaxVolume() const {
return WebRtcAudioDeviceImpl::kMaxVolumeLevel;
}
-void WebRtcAudioCapturer::Capture(const media::AudioBus* audio_source,
+void WebRtcAudioCapturer::Capture(media::AudioBus* audio_source,
int audio_delay_milliseconds,
double volume,
bool key_pressed) {
diff --git a/content/renderer/media/webrtc_audio_capturer.h b/content/renderer/media/webrtc_audio_capturer.h
index d77a107..89ac7a9 100644
--- a/content/renderer/media/webrtc_audio_capturer.h
+++ b/content/renderer/media/webrtc_audio_capturer.h
@@ -133,7 +133,7 @@ class CONTENT_EXPORT WebRtcAudioCapturer
// AudioCapturerSource::CaptureCallback implementation.
// Called on the AudioInputDevice audio thread.
- virtual void Capture(const media::AudioBus* audio_source,
+ virtual void Capture(media::AudioBus* audio_source,
int audio_delay_milliseconds,
double volume,
bool key_pressed) OVERRIDE;
diff --git a/media/base/audio_capturer_source.h b/media/base/audio_capturer_source.h
index 621c392..b584f8a 100644
--- a/media/base/audio_capturer_source.h
+++ b/media/base/audio_capturer_source.h
@@ -24,7 +24,7 @@ class AudioCapturerSource
class CaptureCallback {
public:
// Callback to deliver the captured data from the OS.
- virtual void Capture(const AudioBus* audio_source,
+ virtual void Capture(AudioBus* audio_source,
int audio_delay_milliseconds,
double volume,
bool key_pressed) = 0;