summaryrefslogtreecommitdiffstats
path: root/content/renderer/media/webrtc_audio_device_unittest.cc
diff options
context:
space:
mode:
authorxians@chromium.org <xians@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-05-17 16:33:09 +0000
committerxians@chromium.org <xians@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-05-17 16:33:09 +0000
commit61cd2b9630f8eb2fde57f5c209bbbd4637f794f6 (patch)
tree229c0d02ba942ca56394bbdc26d5df8eb5ca71c9 /content/renderer/media/webrtc_audio_device_unittest.cc
parent797a5cfc1f6fc22d5f6a397f25432d5c4751d263 (diff)
downloadchromium_src-61cd2b9630f8eb2fde57f5c209bbbd4637f794f6.zip
chromium_src-61cd2b9630f8eb2fde57f5c209bbbd4637f794f6.tar.gz
chromium_src-61cd2b9630f8eb2fde57f5c209bbbd4637f794f6.tar.bz2
Add performance tests to the content_unittests::WebRTCAudioDeviceTest.
This patch adds setup time tests for recording and playout. And measure the data processing time in webrtc loopback. And we use perf_test.h to print out the result, and will plot the result in some of our audio bots. Some result of running the unittests on Linux: *RESULT WebRtcRecordingSetupTime: t= [88.00,] ms *RESULT WebRtcPlayoutSetupTime: t= [150.00,] ms *RESULT WebRtcLoopbackTimeWithoutSignalProcessing (100 packets): t= [80.00,] ms *RESULT WebRtcLoopbackTimeWithSignalProcessing (100 packets): t= [115.00,] ms TEST=content_unittests --gtest_filter="*WebRtc*" TBR=henrika@chromium.org BUG= Review URL: https://chromiumcodereview.appspot.com/15295006 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@200824 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'content/renderer/media/webrtc_audio_device_unittest.cc')
-rw-r--r--content/renderer/media/webrtc_audio_device_unittest.cc303
1 files changed, 301 insertions, 2 deletions
diff --git a/content/renderer/media/webrtc_audio_device_unittest.cc b/content/renderer/media/webrtc_audio_device_unittest.cc
index 04963d5..222271d 100644
--- a/content/renderer/media/webrtc_audio_device_unittest.cc
+++ b/content/renderer/media/webrtc_audio_device_unittest.cc
@@ -3,6 +3,10 @@
// found in the LICENSE file.
#include "base/environment.h"
+#include "base/file_util.h"
+#include "base/files/file_path.h"
+#include "base/path_service.h"
+#include "base/stringprintf.h"
#include "base/test/test_timeouts.h"
#include "content/renderer/media/webrtc_audio_capturer.h"
#include "content/renderer/media/webrtc_audio_device_impl.h"
@@ -14,6 +18,7 @@
#include "testing/gmock/include/gmock/gmock.h"
#include "third_party/webrtc/voice_engine/include/voe_audio_processing.h"
#include "third_party/webrtc/voice_engine/include/voe_base.h"
+#include "third_party/webrtc/voice_engine/include/voe_codec.h"
#include "third_party/webrtc/voice_engine/include/voe_external_media.h"
#include "third_party/webrtc/voice_engine/include/voe_file.h"
#include "third_party/webrtc/voice_engine/include/voe_network.h"
@@ -23,6 +28,7 @@
#endif
using media::AudioParameters;
+using media::CHANNEL_LAYOUT_STEREO;
using testing::_;
using testing::AnyNumber;
using testing::InvokeWithoutArgs;
@@ -35,6 +41,12 @@ namespace {
const int kRenderViewId = 1;
+// The number of packers that RunWebRtcLoopbackTimeTest() uses for measurement.
+const int kNumberOfPacketsForLoopbackTest = 100;
+
+// The hardware latency we feed to WebRtc.
+const int kHardwareLatencyInMs = 50;
+
scoped_ptr<media::AudioHardwareConfig> CreateRealHardwareConfig(
media::AudioManager* manager) {
const AudioParameters output_parameters =
@@ -42,6 +54,7 @@ scoped_ptr<media::AudioHardwareConfig> CreateRealHardwareConfig(
const AudioParameters input_parameters =
manager->GetInputStreamParameters(
media::AudioManagerBase::kDefaultDeviceId);
+
return make_scoped_ptr(new media::AudioHardwareConfig(
input_parameters, output_parameters));
}
@@ -110,7 +123,6 @@ bool InitializeCapturer(WebRtcAudioDeviceImpl* webrtc_audio_device) {
return true;
}
-
class WebRTCMediaProcessImpl : public webrtc::VoEMediaProcess {
public:
explicit WebRTCMediaProcessImpl(base::WaitableEvent* event)
@@ -173,7 +185,195 @@ class WebRTCMediaProcessImpl : public webrtc::VoEMediaProcess {
DISALLOW_COPY_AND_ASSIGN(WebRTCMediaProcessImpl);
};
-} // end namespace
+class MockWebRtcAudioCapturerSink : public WebRtcAudioCapturerSink {
+ public:
+ explicit MockWebRtcAudioCapturerSink(base::WaitableEvent* event)
+ : event_(event) {
+ DCHECK(event_);
+ }
+ virtual ~MockWebRtcAudioCapturerSink() {}
+
+ // WebRtcAudioCapturerSink implementation.
+ virtual void CaptureData(const int16* audio_data,
+ int number_of_channels,
+ int number_of_frames,
+ int audio_delay_milliseconds,
+ double volume) OVERRIDE {
+ // Signal that a callback has been received.
+ event_->Signal();
+ }
+
+ // Set the format for the capture audio parameters.
+ virtual void SetCaptureFormat(
+ const media::AudioParameters& params) OVERRIDE {}
+
+ private:
+ base::WaitableEvent* event_;
+
+ DISALLOW_COPY_AND_ASSIGN(MockWebRtcAudioCapturerSink);
+};
+
+class MockWebRtcAudioRendererSource : public WebRtcAudioRendererSource {
+ public:
+ explicit MockWebRtcAudioRendererSource(base::WaitableEvent* event)
+ : event_(event) {
+ DCHECK(event_);
+ }
+ virtual ~MockWebRtcAudioRendererSource() {}
+
+ // WebRtcAudioRendererSource implementation.
+ virtual void RenderData(uint8* audio_data,
+ int number_of_channels,
+ int number_of_frames,
+ int audio_delay_milliseconds) OVERRIDE {
+ // Signal that a callback has been received.
+ event_->Signal();
+ }
+
+ virtual void SetRenderFormat(const media::AudioParameters& params) OVERRIDE {
+ }
+
+ virtual void RemoveAudioRenderer(WebRtcAudioRenderer* renderer) OVERRIDE {};
+
+ private:
+ base::WaitableEvent* event_;
+
+ DISALLOW_COPY_AND_ASSIGN(MockWebRtcAudioRendererSource);
+};
+
+// Prints numerical information to stdout in a controlled format so we can plot
+// the result.
+void PrintPerfResultMs(const char* graph, const char* trace, float time_ms) {
+ std::string times;
+ base::StringAppendF(&times, "%.2f,", time_ms);
+ std::string result = base::StringPrintf(
+ "%sRESULT %s%s: %s= %s%s%s %s\n", "*", graph, "",
+ trace, "[", times.c_str(), "]", "ms");
+
+ fflush(stdout);
+ printf("%s", result.c_str());
+ fflush(stdout);
+}
+
+void ReadDataFromSpeechFile(char* data, int length) {
+ base::FilePath data_file;
+ CHECK(PathService::Get(base::DIR_SOURCE_ROOT, &data_file));
+ data_file =
+ data_file.Append(FILE_PATH_LITERAL("media"))
+ .Append(FILE_PATH_LITERAL("test"))
+ .Append(FILE_PATH_LITERAL("data"))
+ .Append(FILE_PATH_LITERAL("speech_16b_stereo_48kHz.raw"));
+ DCHECK(file_util::PathExists(data_file));
+ int64 data_file_size64 = 0;
+ DCHECK(file_util::GetFileSize(data_file, &data_file_size64));
+ EXPECT_EQ(length, file_util::ReadFile(data_file, data, length));
+ DCHECK(data_file_size64 > length);
+}
+
+void SetChannelCodec(webrtc::VoiceEngine* engine, int channel) {
+ // TODO(xians): move the codec as an input param to this function, and add
+ // tests for different codecs, also add support to Android and IOS.
+#if !defined(OS_ANDROID) && !defined(OS_IOS)
+ webrtc::CodecInst isac;
+ strcpy(isac.plname, "ISAC");
+ isac.pltype = 104;
+ isac.pacsize = 960;
+ isac.plfreq = 32000;
+ isac.channels = 1;
+ isac.rate = -1;
+ ScopedWebRTCPtr<webrtc::VoECodec> codec(engine);
+ EXPECT_EQ(0, codec->SetRecPayloadType(channel, isac));
+ EXPECT_EQ(0, codec->SetSendCodec(channel, isac));
+#endif
+}
+
+// Returns the time in millisecond for sending packets to WebRtc for encoding,
+// signal processing, decoding and receiving them back.
+int RunWebRtcLoopbackTimeTest(media::AudioManager* manager,
+ bool enable_apm) {
+ scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
+ new WebRtcAudioDeviceImpl());
+ WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create());
+ EXPECT_TRUE(engine.valid());
+ ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get());
+ EXPECT_TRUE(base.valid());
+ int err = base->Init(webrtc_audio_device);
+ EXPECT_EQ(0, err);
+
+ // We use SetCaptureFormat() and SetRenderFormat() to configure the audio
+ // parameters so that this test can run on machine without hardware device.
+ const media::AudioParameters params = media::AudioParameters(
+ media::AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO,
+ 48000, 2, 480);
+ WebRtcAudioCapturerSink* capturer_sink =
+ static_cast<WebRtcAudioCapturerSink*>(webrtc_audio_device.get());
+ capturer_sink->SetCaptureFormat(params);
+ WebRtcAudioRendererSource* renderer_source =
+ static_cast<WebRtcAudioRendererSource*>(webrtc_audio_device.get());
+ renderer_source->SetRenderFormat(params);
+
+ // Turn on/off all the signal processing components like AGC, AEC and NS.
+ ScopedWebRTCPtr<webrtc::VoEAudioProcessing> audio_processing(engine.get());
+ EXPECT_TRUE(audio_processing.valid());
+ audio_processing->SetAgcStatus(enable_apm);
+ audio_processing->SetNsStatus(enable_apm);
+ audio_processing->SetEcStatus(enable_apm);
+
+ // Create a voice channel for the WebRtc.
+ int channel = base->CreateChannel();
+ EXPECT_NE(-1, channel);
+ SetChannelCodec(engine.get(), channel);
+
+ // Use our fake network transmission and start playout and recording.
+ ScopedWebRTCPtr<webrtc::VoENetwork> network(engine.get());
+ EXPECT_TRUE(network.valid());
+ scoped_ptr<WebRTCTransportImpl> transport(
+ new WebRTCTransportImpl(network.get()));
+ EXPECT_EQ(0, network->RegisterExternalTransport(channel, *transport.get()));
+ EXPECT_EQ(0, base->StartPlayout(channel));
+ EXPECT_EQ(0, base->StartSend(channel));
+
+ // Read speech data from a speech test file.
+ const int num_input_channels = webrtc_audio_device->input_channels();
+ const int input_packet_size = webrtc_audio_device->input_buffer_size() * 2 *
+ num_input_channels;
+ const int num_output_channels = webrtc_audio_device->output_channels();
+ const int output_packet_size = webrtc_audio_device->output_buffer_size() * 2 *
+ num_output_channels;
+ const size_t length = input_packet_size * kNumberOfPacketsForLoopbackTest;
+ scoped_ptr<char[]> capture_data(new char[length]);
+ ReadDataFromSpeechFile(capture_data.get(), length);
+
+ // Start the timer.
+ scoped_ptr<uint8[]> buffer(new uint8[output_packet_size]);
+ base::Time start_time = base::Time::Now();
+ int delay = 0;
+ for (int j = 0; j < kNumberOfPacketsForLoopbackTest; ++j) {
+ // Sending fake capture data to WebRtc.
+ capturer_sink->CaptureData(
+ reinterpret_cast<int16*>(capture_data.get() + input_packet_size * j),
+ num_input_channels, webrtc_audio_device->input_buffer_size(),
+ kHardwareLatencyInMs, 1.0);
+
+ // Receiving data from WebRtc.
+ renderer_source->RenderData(
+ reinterpret_cast<uint8*>(buffer.get()),
+ num_output_channels, webrtc_audio_device->output_buffer_size(),
+ kHardwareLatencyInMs + delay);
+ delay = (base::Time::Now() - start_time).InMilliseconds();
+ }
+
+ int latency = (base::Time::Now() - start_time).InMilliseconds();
+
+ EXPECT_EQ(0, base->StopSend(channel));
+ EXPECT_EQ(0, base->StopPlayout(channel));
+ EXPECT_EQ(0, base->DeleteChannel(channel));
+ EXPECT_EQ(0, base->Terminate());
+
+ return latency;
+}
+
+} // namespace
// Trivial test which verifies that one part of the test harness
// (HardwareSampleRatesAreValid()) works as intended for all supported
@@ -590,4 +790,103 @@ TEST_F(WebRTCAudioDeviceTest, MAYBE_FullDuplexAudioWithAGC) {
EXPECT_EQ(0, base->Terminate());
}
+TEST_F(WebRTCAudioDeviceTest, WebRtcRecordingSetupTime) {
+ if (!has_input_devices_) {
+ LOG(WARNING) << "Missing audio capture devices.";
+ return;
+ }
+
+ scoped_ptr<media::AudioHardwareConfig> config =
+ CreateRealHardwareConfig(audio_manager_.get());
+ SetAudioHardwareConfig(config.get());
+
+ if (!HardwareSampleRatesAreValid())
+ return;
+
+ scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
+ new WebRtcAudioDeviceImpl());
+
+ WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create());
+ ASSERT_TRUE(engine.valid());
+
+ ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get());
+ ASSERT_TRUE(base.valid());
+ int err = base->Init(webrtc_audio_device);
+ ASSERT_EQ(0, err);
+
+ EXPECT_TRUE(InitializeCapturer(webrtc_audio_device.get()));
+ webrtc_audio_device->capturer()->Start();
+
+ base::WaitableEvent event(false, false);
+ scoped_ptr<MockWebRtcAudioCapturerSink> capturer_sink(
+ new MockWebRtcAudioCapturerSink(&event));
+ WebRtcAudioCapturer* capturer = webrtc_audio_device->capturer();
+ capturer->AddSink(capturer_sink.get());
+
+ int ch = base->CreateChannel();
+ EXPECT_NE(-1, ch);
+
+ base::Time start_time = base::Time::Now();
+ EXPECT_EQ(0, base->StartSend(ch));
+
+ EXPECT_TRUE(event.TimedWait(TestTimeouts::action_timeout()));
+ int delay = (base::Time::Now() - start_time).InMilliseconds();
+ PrintPerfResultMs("webrtc_recording_setup_c", "t", delay);
+
+ capturer->RemoveSink(capturer_sink.get());
+ webrtc_audio_device->capturer()->Stop();
+ EXPECT_EQ(0, base->StopSend(ch));
+ EXPECT_EQ(0, base->DeleteChannel(ch));
+ EXPECT_EQ(0, base->Terminate());
+}
+
+TEST_F(WebRTCAudioDeviceTest, WebRtcPlayoutSetupTime) {
+ if (!has_output_devices_) {
+ LOG(WARNING) << "No output device detected.";
+ return;
+ }
+
+ scoped_ptr<media::AudioHardwareConfig> config =
+ CreateRealHardwareConfig(audio_manager_.get());
+ SetAudioHardwareConfig(config.get());
+
+ if (!HardwareSampleRatesAreValid())
+ return;
+
+ EXPECT_CALL(media_observer(),
+ OnSetAudioStreamStatus(_, 1, _)).Times(AnyNumber());
+ EXPECT_CALL(media_observer(),
+ OnSetAudioStreamPlaying(_, 1, true));
+ EXPECT_CALL(media_observer(),
+ OnDeleteAudioStream(_, 1)).Times(AnyNumber());
+
+ base::WaitableEvent event(false, false);
+ scoped_ptr<MockWebRtcAudioRendererSource> renderer_source(
+ new MockWebRtcAudioRendererSource(&event));
+ scoped_refptr<WebRtcAudioRenderer> renderer =
+ new WebRtcAudioRenderer(kRenderViewId);
+ renderer->Initialize(renderer_source.get());
+
+ // Start the timer and playout.
+ base::Time start_time = base::Time::Now();
+ renderer->Play();
+ EXPECT_TRUE(event.TimedWait(TestTimeouts::action_timeout()));
+ int delay = (base::Time::Now() - start_time).InMilliseconds();
+ PrintPerfResultMs("webrtc_playout_setup_c", "t", delay);
+
+ renderer->Stop();
+}
+
+TEST_F(WebRTCAudioDeviceTest, WebRtcLoopbackTimeWithoutSignalProcessing) {
+ int latency = RunWebRtcLoopbackTimeTest(audio_manager_.get(), false);
+ PrintPerfResultMs("webrtc_loopback_without_sigal_processing (100 packets)",
+ "t", latency);
+}
+
+TEST_F(WebRTCAudioDeviceTest, WebRtcLoopbackTimeWithSignalProcessing) {
+ int latency = RunWebRtcLoopbackTimeTest(audio_manager_.get(), true);
+ PrintPerfResultMs("webrtc_loopback_with_signal_processing (100 packets)",
+ "t", latency);
+}
+
} // namespace content