1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
|
// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/command_line.h"
#include "base/file_util.h"
#include "base/files/file_path.h"
#include "base/logging.h"
#include "base/path_service.h"
#include "base/time/time.h"
#include "content/public/common/content_switches.h"
#include "content/renderer/media/media_stream_audio_processor.h"
#include "content/renderer/media/rtc_media_constraints.h"
#include "media/audio/audio_parameters.h"
#include "media/base/audio_bus.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "third_party/libjingle/source/talk/app/webrtc/mediastreaminterface.h"
using ::testing::_;
using ::testing::AnyNumber;
using ::testing::AtLeast;
using ::testing::Return;
namespace content {
namespace {
#if defined(ANDROID)
const int kAudioProcessingSampleRate = 16000;
#else
const int kAudioProcessingSampleRate = 32000;
#endif
const int kAudioProcessingNumberOfChannel = 1;
// The number of packers used for testing.
const int kNumberOfPacketsForTest = 100;
void ReadDataFromSpeechFile(char* data, int length) {
base::FilePath file;
CHECK(PathService::Get(base::DIR_SOURCE_ROOT, &file));
file = file.Append(FILE_PATH_LITERAL("media"))
.Append(FILE_PATH_LITERAL("test"))
.Append(FILE_PATH_LITERAL("data"))
.Append(FILE_PATH_LITERAL("speech_16b_stereo_48kHz.raw"));
DCHECK(base::PathExists(file));
int64 data_file_size64 = 0;
DCHECK(base::GetFileSize(file, &data_file_size64));
EXPECT_EQ(length, file_util::ReadFile(file, data, length));
DCHECK(data_file_size64 > length);
}
void ApplyFixedAudioConstraints(RTCMediaConstraints* constraints) {
// Constant constraint keys which enables default audio constraints on
// mediastreams with audio.
struct {
const char* key;
const char* value;
} static const kDefaultAudioConstraints[] = {
{ webrtc::MediaConstraintsInterface::kEchoCancellation,
webrtc::MediaConstraintsInterface::kValueTrue },
#if defined(OS_CHROMEOS) || defined(OS_MACOSX)
// Enable the extended filter mode AEC on platforms with known echo issues.
{ webrtc::MediaConstraintsInterface::kExperimentalEchoCancellation,
webrtc::MediaConstraintsInterface::kValueTrue },
#endif
{ webrtc::MediaConstraintsInterface::kAutoGainControl,
webrtc::MediaConstraintsInterface::kValueTrue },
{ webrtc::MediaConstraintsInterface::kExperimentalAutoGainControl,
webrtc::MediaConstraintsInterface::kValueTrue },
{ webrtc::MediaConstraintsInterface::kNoiseSuppression,
webrtc::MediaConstraintsInterface::kValueTrue },
{ webrtc::MediaConstraintsInterface::kHighpassFilter,
webrtc::MediaConstraintsInterface::kValueTrue },
};
for (size_t i = 0; i < ARRAYSIZE_UNSAFE(kDefaultAudioConstraints); ++i) {
constraints->AddMandatory(kDefaultAudioConstraints[i].key,
kDefaultAudioConstraints[i].value, false);
}
}
} // namespace
class MediaStreamAudioProcessorTest : public ::testing::Test {
public:
MediaStreamAudioProcessorTest()
: params_(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
media::CHANNEL_LAYOUT_STEREO, 48000, 16, 512) {
CommandLine::ForCurrentProcess()->AppendSwitch(
switches::kEnableAudioTrackProcessing);
}
protected:
// Helper method to save duplicated code.
void ProcessDataAndVerifyFormat(MediaStreamAudioProcessor* audio_processor,
int expected_output_sample_rate,
int expected_output_channels,
int expected_output_buffer_size) {
// Read the audio data from a file.
const int packet_size =
params_.frames_per_buffer() * 2 * params_.channels();
const size_t length = packet_size * kNumberOfPacketsForTest;
scoped_ptr<char[]> capture_data(new char[length]);
ReadDataFromSpeechFile(capture_data.get(), length);
const int16* data_ptr = reinterpret_cast<const int16*>(capture_data.get());
scoped_ptr<media::AudioBus> data_bus = media::AudioBus::Create(
params_.channels(), params_.frames_per_buffer());
for (int i = 0; i < kNumberOfPacketsForTest; ++i) {
data_bus->FromInterleaved(data_ptr, data_bus->frames(), 2);
audio_processor->PushCaptureData(data_bus.get());
// |audio_processor| does nothing when the audio processing is off in
// the processor.
audio_processor->PushRenderData(
data_ptr,
params_.sample_rate(), params_.channels(),
params_.frames_per_buffer(), base::TimeDelta::FromMilliseconds(10));
int16* output = NULL;
while(audio_processor->ProcessAndConsumeData(
base::TimeDelta::FromMilliseconds(10), 255, false, &output)) {
EXPECT_TRUE(output != NULL);
EXPECT_EQ(audio_processor->OutputFormat().sample_rate(),
expected_output_sample_rate);
EXPECT_EQ(audio_processor->OutputFormat().channels(),
expected_output_channels);
EXPECT_EQ(audio_processor->OutputFormat().frames_per_buffer(),
expected_output_buffer_size);
}
data_ptr += params_.frames_per_buffer() * params_.channels();
}
}
media::AudioParameters params_;
};
TEST_F(MediaStreamAudioProcessorTest, WithoutAudioProcessing) {
// Setup the audio processor with empty constraint.
RTCMediaConstraints constraints;
MediaStreamAudioProcessor audio_processor(&constraints);
audio_processor.SetCaptureFormat(params_);
EXPECT_FALSE(audio_processor.has_audio_processing());
ProcessDataAndVerifyFormat(&audio_processor,
params_.sample_rate(),
params_.channels(),
params_.sample_rate() / 100);
}
TEST_F(MediaStreamAudioProcessorTest, WithAudioProcessing) {
// Setup the audio processor with default constraint.
RTCMediaConstraints constraints;
ApplyFixedAudioConstraints(&constraints);
MediaStreamAudioProcessor audio_processor(&constraints);
audio_processor.SetCaptureFormat(params_);
EXPECT_TRUE(audio_processor.has_audio_processing());
ProcessDataAndVerifyFormat(&audio_processor,
kAudioProcessingSampleRate,
kAudioProcessingNumberOfChannel,
kAudioProcessingSampleRate / 100);
}
} // namespace content
|