1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
|
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/audio/win/audio_low_latency_input_win.h"
#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
#include "base/utf_string_conversions.h"
#include "media/audio/audio_util.h"
#include "media/audio/win/audio_manager_win.h"
#include "media/audio/win/avrt_wrapper_win.h"
using base::win::ScopedComPtr;
using base::win::ScopedCOMInitializer;
WASAPIAudioInputStream::WASAPIAudioInputStream(
AudioManagerWin* manager, const AudioParameters& params,
const std::string& device_id)
: com_init_(ScopedCOMInitializer::kMTA),
manager_(manager),
capture_thread_(NULL),
opened_(false),
started_(false),
endpoint_buffer_size_frames_(0),
device_id_(device_id),
sink_(NULL) {
DCHECK(manager_);
// Load the Avrt DLL if not already loaded. Required to support MMCSS.
bool avrt_init = avrt::Initialize();
DCHECK(avrt_init) << "Failed to load the Avrt.dll";
// Set up the desired capture format specified by the client.
format_.nSamplesPerSec = params.sample_rate();
format_.wFormatTag = WAVE_FORMAT_PCM;
format_.wBitsPerSample = params.bits_per_sample();
format_.nChannels = params.channels();
format_.nBlockAlign = (format_.wBitsPerSample / 8) * format_.nChannels;
format_.nAvgBytesPerSec = format_.nSamplesPerSec * format_.nBlockAlign;
format_.cbSize = 0;
// Size in bytes of each audio frame.
frame_size_ = format_.nBlockAlign;
// Store size of audio packets which we expect to get from the audio
// endpoint device in each capture event.
packet_size_frames_ = params.GetBytesPerBuffer() / format_.nBlockAlign;
packet_size_bytes_ = params.GetBytesPerBuffer();
DVLOG(1) << "Number of bytes per audio frame : " << frame_size_;
DVLOG(1) << "Number of audio frames per packet: " << packet_size_frames_;
// All events are auto-reset events and non-signaled initially.
// Create the event which the audio engine will signal each time
// a buffer becomes ready to be processed by the client.
audio_samples_ready_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL));
DCHECK(audio_samples_ready_event_.IsValid());
// Create the event which will be set in Stop() when capturing shall stop.
stop_capture_event_.Set(CreateEvent(NULL, FALSE, FALSE, NULL));
DCHECK(stop_capture_event_.IsValid());
ms_to_frame_count_ = static_cast<double>(params.sample_rate()) / 1000.0;
LARGE_INTEGER performance_frequency;
if (QueryPerformanceFrequency(&performance_frequency)) {
perf_count_to_100ns_units_ =
(10000000.0 / static_cast<double>(performance_frequency.QuadPart));
} else {
LOG(ERROR) << "High-resolution performance counters are not supported.";
perf_count_to_100ns_units_ = 0.0;
}
}
WASAPIAudioInputStream::~WASAPIAudioInputStream() {}
bool WASAPIAudioInputStream::Open() {
DCHECK(CalledOnValidThread());
// Verify that we are not already opened.
if (opened_)
return false;
// Obtain a reference to the IMMDevice interface of the capturing
// device with the specified unique identifier or role which was
// set at construction.
HRESULT hr = SetCaptureDevice();
if (FAILED(hr))
return false;
// Obtain an IAudioClient interface which enables us to create and initialize
// an audio stream between an audio application and the audio engine.
hr = ActivateCaptureDevice();
if (FAILED(hr))
return false;
// Retrieve the stream format which the audio engine uses for its internal
// processing/mixing of shared-mode streams. This function call is for
// diagnostic purposes only and only in debug mode.
#ifndef NDEBUG
hr = GetAudioEngineStreamFormat();
#endif
// Verify that the selected audio endpoint supports the specified format
// set during construction.
if (!DesiredFormatIsSupported()) {
return false;
}
// Initialize the audio stream between the client and the device using
// shared mode and a lowest possible glitch-free latency.
hr = InitializeAudioEngine();
opened_ = SUCCEEDED(hr);
return opened_;
}
void WASAPIAudioInputStream::Start(AudioInputCallback* callback) {
DCHECK(CalledOnValidThread());
DCHECK(callback);
DLOG_IF(ERROR, !opened_) << "Open() has not been called successfully";
if (!opened_)
return;
if (started_)
return;
sink_ = callback;
// Create and start the thread that will drive the capturing by waiting for
// capture events.
capture_thread_ =
new base::DelegateSimpleThread(this, "wasapi_capture_thread");
capture_thread_->Start();
// Start streaming data between the endpoint buffer and the audio engine.
HRESULT hr = audio_client_->Start();
DLOG_IF(ERROR, FAILED(hr)) << "Failed to start input streaming.";
started_ = SUCCEEDED(hr);
}
void WASAPIAudioInputStream::Stop() {
DCHECK(CalledOnValidThread());
if (!started_)
return;
// Shut down the capture thread.
if (stop_capture_event_.IsValid()) {
SetEvent(stop_capture_event_.Get());
}
// Stop the input audio streaming.
HRESULT hr = audio_client_->Stop();
if (FAILED(hr)) {
LOG(ERROR) << "Failed to stop input streaming.";
}
// Wait until the thread completes and perform cleanup.
if (capture_thread_) {
SetEvent(stop_capture_event_.Get());
capture_thread_->Join();
capture_thread_ = NULL;
}
started_ = false;
}
void WASAPIAudioInputStream::Close() {
// It is valid to call Close() before calling open or Start().
// It is also valid to call Close() after Start() has been called.
Stop();
if (sink_) {
sink_->OnClose(this);
sink_ = NULL;
}
// Inform the audio manager that we have been closed. This will cause our
// destruction.
manager_->ReleaseInputStream(this);
}
double WASAPIAudioInputStream::GetMaxVolume() {
// Verify that Open() has been called succesfully, to ensure that an audio
// session exists and that an ISimpleAudioVolume interface has been created.
DLOG_IF(ERROR, !opened_) << "Open() has not been called successfully";
if (!opened_)
return 0.0;
// The effective volume value is always in the range 0.0 to 1.0, hence
// we can return a fixed value (=1.0) here.
return 1.0;
}
void WASAPIAudioInputStream::SetVolume(double volume) {
DVLOG(1) << "SetVolume(volume=" << volume << ")";
DCHECK(CalledOnValidThread());
DCHECK_GE(volume, 0.0);
DCHECK_LE(volume, 1.0);
DLOG_IF(ERROR, !opened_) << "Open() has not been called successfully";
if (!opened_)
return;
// Set a new master volume level. Valid volume levels are in the range
// 0.0 to 1.0. Ignore volume-change events.
HRESULT hr = simple_audio_volume_->SetMasterVolume(static_cast<float>(volume),
NULL);
DLOG_IF(WARNING, FAILED(hr)) << "Failed to set new input master volume.";
// Update the AGC volume level based on the last setting above. Note that,
// the volume-level resolution is not infinite and it is therefore not
// possible to assume that the volume provided as input parameter can be
// used directly. Instead, a new query to the audio hardware is required.
// This method does nothing if AGC is disabled.
UpdateAgcVolume();
}
double WASAPIAudioInputStream::GetVolume() {
DLOG_IF(ERROR, !opened_) << "Open() has not been called successfully";
if (!opened_)
return 0.0;
// Retrieve the current volume level. The value is in the range 0.0 to 1.0.
float level = 0.0f;
HRESULT hr = simple_audio_volume_->GetMasterVolume(&level);
DLOG_IF(WARNING, FAILED(hr)) << "Failed to get input master volume.";
return static_cast<double>(level);
}
// static
int WASAPIAudioInputStream::HardwareSampleRate(
const std::string& device_id) {
base::win::ScopedCoMem<WAVEFORMATEX> audio_engine_mix_format;
HRESULT hr = GetMixFormat(device_id, &audio_engine_mix_format);
if (FAILED(hr))
return 0;
return static_cast<int>(audio_engine_mix_format->nSamplesPerSec);
}
// static
uint32 WASAPIAudioInputStream::HardwareChannelCount(
const std::string& device_id) {
base::win::ScopedCoMem<WAVEFORMATEX> audio_engine_mix_format;
HRESULT hr = GetMixFormat(device_id, &audio_engine_mix_format);
if (FAILED(hr))
return 0;
return static_cast<uint32>(audio_engine_mix_format->nChannels);
}
// static
HRESULT WASAPIAudioInputStream::GetMixFormat(const std::string& device_id,
WAVEFORMATEX** device_format) {
// It is assumed that this static method is called from a COM thread, i.e.,
// CoInitializeEx() is not called here to avoid STA/MTA conflicts.
ScopedComPtr<IMMDeviceEnumerator> enumerator;
HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator),
NULL,
CLSCTX_INPROC_SERVER,
__uuidof(IMMDeviceEnumerator),
enumerator.ReceiveVoid());
if (FAILED(hr))
return hr;
ScopedComPtr<IMMDevice> endpoint_device;
if (device_id == AudioManagerBase::kDefaultDeviceId) {
// Retrieve the default capture audio endpoint.
hr = enumerator->GetDefaultAudioEndpoint(eCapture, eConsole,
endpoint_device.Receive());
} else {
// Retrieve a capture endpoint device that is specified by an endpoint
// device-identification string.
hr = enumerator->GetDevice(UTF8ToUTF16(device_id).c_str(),
endpoint_device.Receive());
}
if (FAILED(hr)) {
// This will happen if there's no audio capture device found or available
// (e.g. some audio cards that have inputs will still report them as
// "not found" when no mic is plugged into the input jack).
LOG(WARNING) << "No audio end point: " << std::hex << hr;
return hr;
}
ScopedComPtr<IAudioClient> audio_client;
hr = endpoint_device->Activate(__uuidof(IAudioClient),
CLSCTX_INPROC_SERVER,
NULL,
audio_client.ReceiveVoid());
DCHECK(SUCCEEDED(hr)) << "Failed to activate device: " << std::hex << hr;
if (SUCCEEDED(hr)) {
hr = audio_client->GetMixFormat(device_format);
DCHECK(SUCCEEDED(hr)) << "GetMixFormat: " << std::hex << hr;
}
return hr;
}
void WASAPIAudioInputStream::Run() {
ScopedCOMInitializer com_init(ScopedCOMInitializer::kMTA);
// Increase the thread priority.
capture_thread_->SetThreadPriority(base::kThreadPriority_RealtimeAudio);
// Enable MMCSS to ensure that this thread receives prioritized access to
// CPU resources.
DWORD task_index = 0;
HANDLE mm_task = avrt::AvSetMmThreadCharacteristics(L"Pro Audio",
&task_index);
bool mmcss_is_ok =
(mm_task && avrt::AvSetMmThreadPriority(mm_task, AVRT_PRIORITY_CRITICAL));
if (!mmcss_is_ok) {
// Failed to enable MMCSS on this thread. It is not fatal but can lead
// to reduced QoS at high load.
DWORD err = GetLastError();
LOG(WARNING) << "Failed to enable MMCSS (error code=" << err << ").";
}
// Allocate a buffer with a size that enables us to take care of cases like:
// 1) The recorded buffer size is smaller, or does not match exactly with,
// the selected packet size used in each callback.
// 2) The selected buffer size is larger than the recorded buffer size in
// each event.
size_t buffer_frame_index = 0;
size_t capture_buffer_size = std::max(
2 * endpoint_buffer_size_frames_ * frame_size_,
2 * packet_size_frames_ * frame_size_);
scoped_array<uint8> capture_buffer(new uint8[capture_buffer_size]);
LARGE_INTEGER now_count;
bool recording = true;
bool error = false;
double volume = GetVolume();
HANDLE wait_array[2] = {stop_capture_event_, audio_samples_ready_event_};
while (recording && !error) {
HRESULT hr = S_FALSE;
// Wait for a close-down event or a new capture event.
DWORD wait_result = WaitForMultipleObjects(2, wait_array, FALSE, INFINITE);
switch (wait_result) {
case WAIT_FAILED:
error = true;
break;
case WAIT_OBJECT_0 + 0:
// |stop_capture_event_| has been set.
recording = false;
break;
case WAIT_OBJECT_0 + 1:
{
// |audio_samples_ready_event_| has been set.
BYTE* data_ptr = NULL;
UINT32 num_frames_to_read = 0;
DWORD flags = 0;
UINT64 device_position = 0;
UINT64 first_audio_frame_timestamp = 0;
// Retrieve the amount of data in the capture endpoint buffer,
// replace it with silence if required, create callbacks for each
// packet and store non-delivered data for the next event.
hr = audio_capture_client_->GetBuffer(&data_ptr,
&num_frames_to_read,
&flags,
&device_position,
&first_audio_frame_timestamp);
if (FAILED(hr)) {
DLOG(ERROR) << "Failed to get data from the capture buffer";
continue;
}
if (num_frames_to_read != 0) {
size_t pos = buffer_frame_index * frame_size_;
size_t num_bytes = num_frames_to_read * frame_size_;
DCHECK_GE(capture_buffer_size, pos + num_bytes);
if (flags & AUDCLNT_BUFFERFLAGS_SILENT) {
// Clear out the local buffer since silence is reported.
memset(&capture_buffer[pos], 0, num_bytes);
} else {
// Copy captured data from audio engine buffer to local buffer.
memcpy(&capture_buffer[pos], data_ptr, num_bytes);
}
buffer_frame_index += num_frames_to_read;
}
hr = audio_capture_client_->ReleaseBuffer(num_frames_to_read);
DLOG_IF(ERROR, FAILED(hr)) << "Failed to release capture buffer";
// Derive a delay estimate for the captured audio packet.
// The value contains two parts (A+B), where A is the delay of the
// first audio frame in the packet and B is the extra delay
// contained in any stored data. Unit is in audio frames.
QueryPerformanceCounter(&now_count);
double audio_delay_frames =
((perf_count_to_100ns_units_ * now_count.QuadPart -
first_audio_frame_timestamp) / 10000.0) * ms_to_frame_count_ +
buffer_frame_index - num_frames_to_read;
// Update the AGC volume level once every second. Note that,
// |volume| is also updated each time SetVolume() is called
// through IPC by the render-side AGC.
QueryAgcVolume(&volume);
// Deliver captured data to the registered consumer using a packet
// size which was specified at construction.
uint32 delay_frames = static_cast<uint32>(audio_delay_frames + 0.5);
while (buffer_frame_index >= packet_size_frames_) {
uint8* audio_data =
reinterpret_cast<uint8*>(capture_buffer.get());
// Deliver data packet, delay estimation and volume level to
// the user.
sink_->OnData(this,
audio_data,
packet_size_bytes_,
delay_frames * frame_size_,
volume);
// Store parts of the recorded data which can't be delivered
// using the current packet size. The stored section will be used
// either in the next while-loop iteration or in the next
// capture event.
memmove(&capture_buffer[0],
&capture_buffer[packet_size_bytes_],
(buffer_frame_index - packet_size_frames_) * frame_size_);
buffer_frame_index -= packet_size_frames_;
delay_frames -= packet_size_frames_;
}
}
break;
default:
error = true;
break;
}
}
if (recording && error) {
// TODO(henrika): perhaps it worth improving the cleanup here by e.g.
// stopping the audio client, joining the thread etc.?
NOTREACHED() << "WASAPI capturing failed with error code "
<< GetLastError();
}
// Disable MMCSS.
if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) {
PLOG(WARNING) << "Failed to disable MMCSS";
}
}
void WASAPIAudioInputStream::HandleError(HRESULT err) {
NOTREACHED() << "Error code: " << err;
if (sink_)
sink_->OnError(this, static_cast<int>(err));
}
HRESULT WASAPIAudioInputStream::SetCaptureDevice() {
ScopedComPtr<IMMDeviceEnumerator> enumerator;
HRESULT hr = CoCreateInstance(__uuidof(MMDeviceEnumerator),
NULL,
CLSCTX_INPROC_SERVER,
__uuidof(IMMDeviceEnumerator),
enumerator.ReceiveVoid());
if (SUCCEEDED(hr)) {
// Retrieve the IMMDevice by using the specified role or the specified
// unique endpoint device-identification string.
// TODO(henrika): possibly add support for the eCommunications as well.
if (device_id_ == AudioManagerBase::kDefaultDeviceId) {
// Retrieve the default capture audio endpoint for the specified role.
// Note that, in Windows Vista, the MMDevice API supports device roles
// but the system-supplied user interface programs do not.
hr = enumerator->GetDefaultAudioEndpoint(eCapture,
eConsole,
endpoint_device_.Receive());
} else {
// Retrieve a capture endpoint device that is specified by an endpoint
// device-identification string.
hr = enumerator->GetDevice(UTF8ToUTF16(device_id_).c_str(),
endpoint_device_.Receive());
}
if (FAILED(hr))
return hr;
// Verify that the audio endpoint device is active, i.e., the audio
// adapter that connects to the endpoint device is present and enabled.
DWORD state = DEVICE_STATE_DISABLED;
hr = endpoint_device_->GetState(&state);
if (SUCCEEDED(hr)) {
if (!(state & DEVICE_STATE_ACTIVE)) {
DLOG(ERROR) << "Selected capture device is not active.";
hr = E_ACCESSDENIED;
}
}
}
return hr;
}
HRESULT WASAPIAudioInputStream::ActivateCaptureDevice() {
// Creates and activates an IAudioClient COM object given the selected
// capture endpoint device.
HRESULT hr = endpoint_device_->Activate(__uuidof(IAudioClient),
CLSCTX_INPROC_SERVER,
NULL,
audio_client_.ReceiveVoid());
return hr;
}
HRESULT WASAPIAudioInputStream::GetAudioEngineStreamFormat() {
HRESULT hr = S_OK;
#ifndef NDEBUG
// The GetMixFormat() method retrieves the stream format that the
// audio engine uses for its internal processing of shared-mode streams.
// The method always uses a WAVEFORMATEXTENSIBLE structure, instead
// of a stand-alone WAVEFORMATEX structure, to specify the format.
// An WAVEFORMATEXTENSIBLE structure can specify both the mapping of
// channels to speakers and the number of bits of precision in each sample.
base::win::ScopedCoMem<WAVEFORMATEXTENSIBLE> format_ex;
hr = audio_client_->GetMixFormat(
reinterpret_cast<WAVEFORMATEX**>(&format_ex));
// See http://msdn.microsoft.com/en-us/windows/hardware/gg463006#EFH
// for details on the WAVE file format.
WAVEFORMATEX format = format_ex->Format;
DVLOG(2) << "WAVEFORMATEX:";
DVLOG(2) << " wFormatTags : 0x" << std::hex << format.wFormatTag;
DVLOG(2) << " nChannels : " << format.nChannels;
DVLOG(2) << " nSamplesPerSec : " << format.nSamplesPerSec;
DVLOG(2) << " nAvgBytesPerSec: " << format.nAvgBytesPerSec;
DVLOG(2) << " nBlockAlign : " << format.nBlockAlign;
DVLOG(2) << " wBitsPerSample : " << format.wBitsPerSample;
DVLOG(2) << " cbSize : " << format.cbSize;
DVLOG(2) << "WAVEFORMATEXTENSIBLE:";
DVLOG(2) << " wValidBitsPerSample: " <<
format_ex->Samples.wValidBitsPerSample;
DVLOG(2) << " dwChannelMask : 0x" << std::hex <<
format_ex->dwChannelMask;
if (format_ex->SubFormat == KSDATAFORMAT_SUBTYPE_PCM)
DVLOG(2) << " SubFormat : KSDATAFORMAT_SUBTYPE_PCM";
else if (format_ex->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT)
DVLOG(2) << " SubFormat : KSDATAFORMAT_SUBTYPE_IEEE_FLOAT";
else if (format_ex->SubFormat == KSDATAFORMAT_SUBTYPE_WAVEFORMATEX)
DVLOG(2) << " SubFormat : KSDATAFORMAT_SUBTYPE_WAVEFORMATEX";
#endif
return hr;
}
bool WASAPIAudioInputStream::DesiredFormatIsSupported() {
// An application that uses WASAPI to manage shared-mode streams can rely
// on the audio engine to perform only limited format conversions. The audio
// engine can convert between a standard PCM sample size used by the
// application and the floating-point samples that the engine uses for its
// internal processing. However, the format for an application stream
// typically must have the same number of channels and the same sample
// rate as the stream format used by the device.
// Many audio devices support both PCM and non-PCM stream formats. However,
// the audio engine can mix only PCM streams.
base::win::ScopedCoMem<WAVEFORMATEX> closest_match;
HRESULT hr = audio_client_->IsFormatSupported(AUDCLNT_SHAREMODE_SHARED,
&format_,
&closest_match);
DLOG_IF(ERROR, hr == S_FALSE) << "Format is not supported "
<< "but a closest match exists.";
return (hr == S_OK);
}
HRESULT WASAPIAudioInputStream::InitializeAudioEngine() {
// Initialize the audio stream between the client and the device.
// We connect indirectly through the audio engine by using shared mode
// and WASAPI is initialized in an event driven mode.
// Note that, |hnsBufferDuration| is set of 0, which ensures that the
// buffer is never smaller than the minimum buffer size needed to ensure
// that glitches do not occur between the periodic processing passes.
// This setting should lead to lowest possible latency.
HRESULT hr = audio_client_->Initialize(AUDCLNT_SHAREMODE_SHARED,
AUDCLNT_STREAMFLAGS_EVENTCALLBACK |
AUDCLNT_STREAMFLAGS_NOPERSIST,
0, // hnsBufferDuration
0,
&format_,
NULL);
if (FAILED(hr))
return hr;
// Retrieve the length of the endpoint buffer shared between the client
// and the audio engine. The buffer length determines the maximum amount
// of capture data that the audio engine can read from the endpoint buffer
// during a single processing pass.
// A typical value is 960 audio frames <=> 20ms @ 48kHz sample rate.
hr = audio_client_->GetBufferSize(&endpoint_buffer_size_frames_);
if (FAILED(hr))
return hr;
DVLOG(1) << "endpoint buffer size: " << endpoint_buffer_size_frames_
<< " [frames]";
#ifndef NDEBUG
// The period between processing passes by the audio engine is fixed for a
// particular audio endpoint device and represents the smallest processing
// quantum for the audio engine. This period plus the stream latency between
// the buffer and endpoint device represents the minimum possible latency
// that an audio application can achieve.
// TODO(henrika): possibly remove this section when all parts are ready.
REFERENCE_TIME device_period_shared_mode = 0;
REFERENCE_TIME device_period_exclusive_mode = 0;
HRESULT hr_dbg = audio_client_->GetDevicePeriod(
&device_period_shared_mode, &device_period_exclusive_mode);
if (SUCCEEDED(hr_dbg)) {
DVLOG(1) << "device period: "
<< static_cast<double>(device_period_shared_mode / 10000.0)
<< " [ms]";
}
REFERENCE_TIME latency = 0;
hr_dbg = audio_client_->GetStreamLatency(&latency);
if (SUCCEEDED(hr_dbg)) {
DVLOG(1) << "stream latency: " << static_cast<double>(latency / 10000.0)
<< " [ms]";
}
#endif
// Set the event handle that the audio engine will signal each time
// a buffer becomes ready to be processed by the client.
hr = audio_client_->SetEventHandle(audio_samples_ready_event_.Get());
if (FAILED(hr))
return hr;
// Get access to the IAudioCaptureClient interface. This interface
// enables us to read input data from the capture endpoint buffer.
hr = audio_client_->GetService(__uuidof(IAudioCaptureClient),
audio_capture_client_.ReceiveVoid());
if (FAILED(hr))
return hr;
// Obtain a reference to the ISimpleAudioVolume interface which enables
// us to control the master volume level of an audio session.
hr = audio_client_->GetService(__uuidof(ISimpleAudioVolume),
simple_audio_volume_.ReceiveVoid());
return hr;
}
|