summaryrefslogtreecommitdiffstats
path: root/content
diff options
context:
space:
mode:
authorjanx@chromium.org <janx@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-06-10 16:49:01 +0000
committerjanx@chromium.org <janx@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-06-10 16:49:01 +0000
commit39841f3451c1063363f2fee3cb32564b18ffb5a9 (patch)
tree6d7fb87f5c58cb305850d5476254024c6f8c0800 /content
parentdae0ae9eee4511b63b9dae31515d4fb878c6915b (diff)
downloadchromium_src-39841f3451c1063363f2fee3cb32564b18ffb5a9.zip
chromium_src-39841f3451c1063363f2fee3cb32564b18ffb5a9.tar.gz
chromium_src-39841f3451c1063363f2fee3cb32564b18ffb5a9.tar.bz2
Fix naked protected member in SpeechRecognizer
The Chromium style guide forbids classes to expose naked protected members. This changes two SpeechRecognizer attributes from protected to private, and implements two protected accessors for subclasses to use. BUG=248077 Review URL: https://chromiumcodereview.appspot.com/16012015 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@205227 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'content')
-rw-r--r--content/browser/speech/speech_recognizer.h7
-rw-r--r--content/browser/speech/speech_recognizer_impl.cc38
2 files changed, 24 insertions, 21 deletions
diff --git a/content/browser/speech/speech_recognizer.h b/content/browser/speech/speech_recognizer.h
index bb8fd97..b9ba67b 100644
--- a/content/browser/speech/speech_recognizer.h
+++ b/content/browser/speech/speech_recognizer.h
@@ -26,9 +26,12 @@ class CONTENT_EXPORT SpeechRecognizer
virtual bool IsCapturingAudio() const = 0;
protected:
- friend class base::RefCountedThreadSafe<SpeechRecognizer>;
-
virtual ~SpeechRecognizer() {}
+ SpeechRecognitionEventListener* listener() const { return listener_; }
+ int session_id() const { return session_id_; }
+
+ private:
+ friend class base::RefCountedThreadSafe<SpeechRecognizer>;
SpeechRecognitionEventListener* listener_;
int session_id_;
diff --git a/content/browser/speech/speech_recognizer_impl.cc b/content/browser/speech/speech_recognizer_impl.cc
index 65ec1fb..1d1c635 100644
--- a/content/browser/speech/speech_recognizer_impl.cc
+++ b/content/browser/speech/speech_recognizer_impl.cc
@@ -81,7 +81,7 @@ SpeechRecognizerImpl::SpeechRecognizerImpl(
is_dispatching_event_(false),
is_single_shot_(is_single_shot),
state_(STATE_IDLE) {
- DCHECK(listener_ != NULL);
+ DCHECK(this->listener() != NULL);
DCHECK(recognition_engine_ != NULL);
if (is_single_shot) {
// In single shot recognition, the session is automatically ended after:
@@ -396,7 +396,7 @@ SpeechRecognizerImpl::StartRecording(const FSMEventArgs&) {
DVLOG(1) << "SpeechRecognizerImpl starting audio capture.";
num_samples_recorded_ = 0;
audio_level_ = 0;
- listener_->OnRecognitionStart(session_id_);
+ listener()->OnRecognitionStart(session_id());
if (!audio_manager->HasAudioInputDevices()) {
return Abort(SpeechRecognitionError(SPEECH_RECOGNITION_ERROR_AUDIO,
@@ -429,7 +429,7 @@ SpeechRecognizerImpl::StartRecognitionEngine(const FSMEventArgs& event_args) {
// started and the delegate notified about the event.
DCHECK(recognition_engine_.get() != NULL);
recognition_engine_->StartRecognition();
- listener_->OnAudioStart(session_id_);
+ listener()->OnAudioStart(session_id());
// This is a little hack, since TakeAudioChunk() is already called by
// ProcessAudioPipeline(). It is the best tradeoff, unless we allow dropping
@@ -443,7 +443,7 @@ SpeechRecognizerImpl::WaitEnvironmentEstimationCompletion(const FSMEventArgs&) {
DCHECK(endpointer_.IsEstimatingEnvironment());
if (GetElapsedTimeMs() >= kEndpointerEstimationTimeMs) {
endpointer_.SetUserInputMode();
- listener_->OnEnvironmentEstimationComplete(session_id_);
+ listener()->OnEnvironmentEstimationComplete(session_id());
return STATE_WAITING_FOR_SPEECH;
} else {
return STATE_ESTIMATING_ENVIRONMENT;
@@ -453,7 +453,7 @@ SpeechRecognizerImpl::WaitEnvironmentEstimationCompletion(const FSMEventArgs&) {
SpeechRecognizerImpl::FSMState
SpeechRecognizerImpl::DetectUserSpeechOrTimeout(const FSMEventArgs&) {
if (endpointer_.DidStartReceivingSpeech()) {
- listener_->OnSoundStart(session_id_);
+ listener()->OnSoundStart(session_id());
return STATE_RECOGNIZING;
} else if (GetElapsedTimeMs() >= kNoSpeechTimeoutMs) {
return Abort(SpeechRecognitionError(SPEECH_RECOGNITION_ERROR_NO_SPEECH));
@@ -477,9 +477,9 @@ SpeechRecognizerImpl::StopCaptureAndWaitForResult(const FSMEventArgs&) {
recognition_engine_->AudioChunksEnded();
if (state_ > STATE_WAITING_FOR_SPEECH)
- listener_->OnSoundEnd(session_id_);
+ listener()->OnSoundEnd(session_id());
- listener_->OnAudioEnd(session_id_);
+ listener()->OnAudioEnd(session_id());
return STATE_WAITING_FINAL_RESULT;
}
@@ -514,15 +514,15 @@ SpeechRecognizerImpl::FSMState SpeechRecognizerImpl::Abort(
}
if (state_ > STATE_WAITING_FOR_SPEECH && state_ < STATE_WAITING_FINAL_RESULT)
- listener_->OnSoundEnd(session_id_);
+ listener()->OnSoundEnd(session_id());
if (state_ > STATE_STARTING && state_ < STATE_WAITING_FINAL_RESULT)
- listener_->OnAudioEnd(session_id_);
+ listener()->OnAudioEnd(session_id());
if (error.code != SPEECH_RECOGNITION_ERROR_NONE)
- listener_->OnRecognitionError(session_id_, error);
+ listener()->OnRecognitionError(session_id(), error);
- listener_->OnRecognitionEnd(session_id_);
+ listener()->OnRecognitionEnd(session_id());
return STATE_ENDED;
}
@@ -542,14 +542,14 @@ SpeechRecognizerImpl::FSMState SpeechRecognizerImpl::ProcessIntermediateResult(
if (state_ == STATE_ESTIMATING_ENVIRONMENT) {
DCHECK(endpointer_.IsEstimatingEnvironment());
endpointer_.SetUserInputMode();
- listener_->OnEnvironmentEstimationComplete(session_id_);
+ listener()->OnEnvironmentEstimationComplete(session_id());
} else if (state_ == STATE_WAITING_FOR_SPEECH) {
- listener_->OnSoundStart(session_id_);
+ listener()->OnSoundStart(session_id());
} else {
DCHECK_EQ(STATE_RECOGNIZING, state_);
}
- listener_->OnRecognitionResults(session_id_, event_args.engine_results);
+ listener()->OnRecognitionResults(session_id(), event_args.engine_results);
return STATE_RECOGNIZING;
}
@@ -570,7 +570,7 @@ SpeechRecognizerImpl::ProcessFinalResult(const FSMEventArgs& event_args) {
}
if (provisional_results_pending) {
- listener_->OnRecognitionResults(session_id_, results);
+ listener()->OnRecognitionResults(session_id(), results);
// We don't end the recognition if a provisional result is received in
// STATE_WAITING_FINAL_RESULT. A definitive result will come next and will
// end the recognition.
@@ -589,10 +589,10 @@ SpeechRecognizerImpl::ProcessFinalResult(const FSMEventArgs& event_args) {
// 4. The speech recognition engine, therefore, emits an empty result to
// notify that the recognition is ended with no error, yet neither any
// further result.
- listener_->OnRecognitionResults(session_id_, results);
+ listener()->OnRecognitionResults(session_id(), results);
}
- listener_->OnRecognitionEnd(session_id_);
+ listener()->OnRecognitionEnd(session_id());
return STATE_ENDED;
}
@@ -642,8 +642,8 @@ void SpeechRecognizerImpl::UpdateSignalAndNoiseLevels(const float& rms,
noise_level = std::min(std::max(0.0f, noise_level),
kAudioMeterRangeMaxUnclipped);
- listener_->OnAudioLevelsChange(
- session_id_, clip_detected ? 1.0f : audio_level_, noise_level);
+ listener()->OnAudioLevelsChange(
+ session_id(), clip_detected ? 1.0f : audio_level_, noise_level);
}
void SpeechRecognizerImpl::SetAudioManagerForTests(