1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
|
// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/browser/speech/chrome_speech_input_manager.h"
#include <string>
#include "base/synchronization/lock.h"
#include "base/threading/thread_restrictions.h"
#include "base/utf_string_conversions.h"
#include "chrome/browser/browser_process.h"
#include "chrome/browser/prefs/pref_service.h"
#include "chrome/browser/tab_contents/tab_util.h"
#include "chrome/common/chrome_switches.h"
#include "chrome/common/pref_names.h"
#include "content/browser/browser_thread.h"
#include "grit/generated_resources.h"
#include "media/audio/audio_manager.h"
#include "ui/base/l10n/l10n_util.h"
#if defined(OS_WIN)
#include "chrome/installer/util/wmi.h"
#endif
namespace speech_input {
// Asynchronously fetches the PC and audio hardware/driver info if
// the user has opted into UMA. This information is sent with speech input
// requests to the server for identifying and improving quality issues with
// specific device configurations.
class ChromeSpeechInputManager::OptionalRequestInfo
: public base::RefCountedThreadSafe<OptionalRequestInfo> {
public:
OptionalRequestInfo() : can_report_metrics_(false) {}
void Refresh() {
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO));
// UMA opt-in can be checked only from the UI thread, so switch to that.
BrowserThread::PostTask(BrowserThread::UI, FROM_HERE,
NewRunnableMethod(this,
&OptionalRequestInfo::CheckUMAAndGetHardwareInfo));
}
void CheckUMAAndGetHardwareInfo() {
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI));
if (g_browser_process->local_state()->GetBoolean(
prefs::kMetricsReportingEnabled)) {
// Access potentially slow OS calls from the FILE thread.
BrowserThread::PostTask(BrowserThread::FILE, FROM_HERE,
NewRunnableMethod(this, &OptionalRequestInfo::GetHardwareInfo));
}
}
void GetHardwareInfo() {
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::FILE));
base::AutoLock lock(lock_);
can_report_metrics_ = true;
#if defined(OS_WIN)
value_ = UTF16ToUTF8(
installer::WMIComputerSystem::GetModel() + L"|" +
AudioManager::GetAudioManager()->GetAudioInputDeviceModel());
#else // defined(OS_WIN)
value_ = UTF16ToUTF8(
AudioManager::GetAudioManager()->GetAudioInputDeviceModel());
#endif // defined(OS_WIN)
}
std::string value() {
base::AutoLock lock(lock_);
return value_;
}
bool can_report_metrics() {
base::AutoLock lock(lock_);
return can_report_metrics_;
}
private:
base::Lock lock_;
std::string value_;
bool can_report_metrics_;
DISALLOW_COPY_AND_ASSIGN(OptionalRequestInfo);
};
ChromeSpeechInputManager::SpeechInputRequest::SpeechInputRequest() {
}
ChromeSpeechInputManager::SpeechInputRequest::~SpeechInputRequest() {
}
ChromeSpeechInputManager* ChromeSpeechInputManager::GetInstance() {
return Singleton<ChromeSpeechInputManager>::get();
}
ChromeSpeechInputManager::ChromeSpeechInputManager()
: recording_caller_id_(0),
bubble_controller_(new SpeechInputBubbleController(
ALLOW_THIS_IN_INITIALIZER_LIST(this))) {
}
ChromeSpeechInputManager::~ChromeSpeechInputManager() {
while (requests_.begin() != requests_.end())
CancelRecognition(requests_.begin()->first);
}
bool ChromeSpeechInputManager::HasPendingRequest(int caller_id) const {
return requests_.find(caller_id) != requests_.end();
}
SpeechInputManagerDelegate* ChromeSpeechInputManager::GetDelegate(
int caller_id) const {
return requests_.find(caller_id)->second.delegate;
}
void ChromeSpeechInputManager::StartRecognition(
SpeechInputManagerDelegate* delegate,
int caller_id,
int render_process_id,
int render_view_id,
const gfx::Rect& element_rect,
const std::string& language,
const std::string& grammar,
const std::string& origin_url) {
DCHECK(!HasPendingRequest(caller_id));
bubble_controller_->CreateBubble(caller_id, render_process_id, render_view_id,
element_rect);
if (!optional_request_info_.get()) {
optional_request_info_ = new OptionalRequestInfo();
// Since hardware info is optional with speech input requests, we start an
// asynchronous fetch here and move on with recording audio. This first
// speech input request would send an empty string for hardware info and
// subsequent requests may have the hardware info available if the fetch
// completed before them. This way we don't end up stalling the user with
// a long wait and disk seeks when they click on a UI element and start
// speaking.
optional_request_info_->Refresh();
}
SpeechInputRequest* request = &requests_[caller_id];
request->delegate = delegate;
request->recognizer = new SpeechRecognizer(
this, caller_id, language, grammar, censor_results(),
optional_request_info_->value(),
optional_request_info_->can_report_metrics() ? origin_url : "");
request->is_active = false;
StartRecognitionForRequest(caller_id);
}
void ChromeSpeechInputManager::StartRecognitionForRequest(int caller_id) {
DCHECK(HasPendingRequest(caller_id));
// If we are currently recording audio for another caller, abort that cleanly.
if (recording_caller_id_)
CancelRecognitionAndInformDelegate(recording_caller_id_);
if (!AudioManager::GetAudioManager()->HasAudioInputDevices()) {
bubble_controller_->SetBubbleMessage(
caller_id, l10n_util::GetStringUTF16(IDS_SPEECH_INPUT_NO_MIC));
} else {
recording_caller_id_ = caller_id;
requests_[caller_id].is_active = true;
requests_[caller_id].recognizer->StartRecording();
bubble_controller_->SetBubbleWarmUpMode(caller_id);
}
}
void ChromeSpeechInputManager::CancelRecognition(int caller_id) {
DCHECK(HasPendingRequest(caller_id));
if (requests_[caller_id].is_active)
requests_[caller_id].recognizer->CancelRecognition();
requests_.erase(caller_id);
if (recording_caller_id_ == caller_id)
recording_caller_id_ = 0;
bubble_controller_->CloseBubble(caller_id);
}
void ChromeSpeechInputManager::CancelAllRequestsWithDelegate(
SpeechInputManagerDelegate* delegate) {
SpeechRecognizerMap::iterator it = requests_.begin();
while (it != requests_.end()) {
if (it->second.delegate == delegate) {
CancelRecognition(it->first);
// This map will have very few elements so it is simpler to restart.
it = requests_.begin();
} else {
++it;
}
}
}
void ChromeSpeechInputManager::StopRecording(int caller_id) {
DCHECK(HasPendingRequest(caller_id));
requests_[caller_id].recognizer->StopRecording();
}
void ChromeSpeechInputManager::SetRecognitionResult(
int caller_id, bool error, const SpeechInputResultArray& result) {
DCHECK(HasPendingRequest(caller_id));
GetDelegate(caller_id)->SetRecognitionResult(caller_id, result);
}
void ChromeSpeechInputManager::DidCompleteRecording(int caller_id) {
DCHECK(recording_caller_id_ == caller_id);
DCHECK(HasPendingRequest(caller_id));
recording_caller_id_ = 0;
GetDelegate(caller_id)->DidCompleteRecording(caller_id);
bubble_controller_->SetBubbleRecognizingMode(caller_id);
}
void ChromeSpeechInputManager::DidCompleteRecognition(int caller_id) {
GetDelegate(caller_id)->DidCompleteRecognition(caller_id);
requests_.erase(caller_id);
bubble_controller_->CloseBubble(caller_id);
}
void ChromeSpeechInputManager::OnRecognizerError(
int caller_id, SpeechRecognizer::ErrorCode error) {
if (caller_id == recording_caller_id_)
recording_caller_id_ = 0;
requests_[caller_id].is_active = false;
struct ErrorMessageMapEntry {
SpeechRecognizer::ErrorCode error;
int message_id;
};
ErrorMessageMapEntry error_message_map[] = {
{
SpeechRecognizer::RECOGNIZER_ERROR_CAPTURE, IDS_SPEECH_INPUT_MIC_ERROR
}, {
SpeechRecognizer::RECOGNIZER_ERROR_NO_SPEECH, IDS_SPEECH_INPUT_NO_SPEECH
}, {
SpeechRecognizer::RECOGNIZER_ERROR_NO_RESULTS, IDS_SPEECH_INPUT_NO_RESULTS
}, {
SpeechRecognizer::RECOGNIZER_ERROR_NETWORK, IDS_SPEECH_INPUT_NET_ERROR
}
};
for (size_t i = 0; i < ARRAYSIZE_UNSAFE(error_message_map); ++i) {
if (error_message_map[i].error == error) {
bubble_controller_->SetBubbleMessage(
caller_id,
l10n_util::GetStringUTF16(error_message_map[i].message_id));
return;
}
}
NOTREACHED() << "unknown error " << error;
}
void ChromeSpeechInputManager::DidStartReceivingAudio(int caller_id) {
DCHECK(HasPendingRequest(caller_id));
DCHECK(recording_caller_id_ == caller_id);
bubble_controller_->SetBubbleRecordingMode(caller_id);
}
void ChromeSpeechInputManager::DidCompleteEnvironmentEstimation(int caller_id) {
DCHECK(HasPendingRequest(caller_id));
DCHECK(recording_caller_id_ == caller_id);
}
void ChromeSpeechInputManager::SetInputVolume(int caller_id, float volume,
float noise_volume) {
DCHECK(HasPendingRequest(caller_id));
DCHECK_EQ(recording_caller_id_, caller_id);
bubble_controller_->SetBubbleInputVolume(caller_id, volume, noise_volume);
}
void ChromeSpeechInputManager::CancelRecognitionAndInformDelegate(
int caller_id) {
SpeechInputManagerDelegate* cur_delegate = GetDelegate(caller_id);
CancelRecognition(caller_id);
cur_delegate->DidCompleteRecording(caller_id);
cur_delegate->DidCompleteRecognition(caller_id);
}
void ChromeSpeechInputManager::InfoBubbleButtonClicked(
int caller_id, SpeechInputBubble::Button button) {
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO));
// Ignore if the caller id was not in our active recognizers list because the
// user might have clicked more than once, or recognition could have been
// cancelled due to other reasons before the user click was processed.
if (!HasPendingRequest(caller_id))
return;
if (button == SpeechInputBubble::BUTTON_CANCEL) {
CancelRecognitionAndInformDelegate(caller_id);
} else if (button == SpeechInputBubble::BUTTON_TRY_AGAIN) {
StartRecognitionForRequest(caller_id);
}
}
void ChromeSpeechInputManager::InfoBubbleFocusChanged(int caller_id) {
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO));
// Ignore if the caller id was not in our active recognizers list because the
// user might have clicked more than once, or recognition could have been
// ended due to other reasons before the user click was processed.
if (HasPendingRequest(caller_id)) {
// If this is an ongoing recording or if we were displaying an error message
// to the user, abort it since user has switched focus. Otherwise
// recognition has started and keep that going so user can start speaking to
// another element while this gets the results in parallel.
if (recording_caller_id_ == caller_id || !requests_[caller_id].is_active) {
CancelRecognitionAndInformDelegate(caller_id);
}
}
}
} // namespace speech_input
|