diff options
author | primiano@chromium.org <primiano@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2012-05-16 10:30:16 +0000 |
---|---|---|
committer | primiano@chromium.org <primiano@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2012-05-16 10:30:16 +0000 |
commit | 12f4fb9b3b5f32e9822375671225a5a3dd0ed628 (patch) | |
tree | d56084c22ee539c9149e78dcf3cf0b1391b88b71 /content/browser/speech/speech_recognition_manager_impl.cc | |
parent | 0e719d5145f93900b9c9111272de5e2b23e464bc (diff) | |
download | chromium_src-12f4fb9b3b5f32e9822375671225a5a3dd0ed628.zip chromium_src-12f4fb9b3b5f32e9822375671225a5a3dd0ed628.tar.gz chromium_src-12f4fb9b3b5f32e9822375671225a5a3dd0ed628.tar.bz2 |
Refactoring of chrome speech recognition architecture (Speech CL1.8)
- Simplified the architecture of SpeechRecognitionManagerImpl. It does not track anymore the state of the recognizer (keeping a state var), rather it derives the state inquiring the recognizer object.
After moving the bubble handling code to the delegate, in fact, only 3 states are required in the manager, and they can be inferred simply looking at the recognizer.
- SpeechRecognitionManagerImpl does not handle anymore with UI related stuff (bubbles, detach, interactive, background... etc), which have been moved to ChromeSpeechRecognitionManagerDelegate.
- ChromeSpeechRecognitionManagerDelegate is now more straightforward, since it implements the "universal" SpeechRecognitionEventListener interface, thus receiving the same events of the manager in the same order, and adds UI handling behavior (only bubbles for the moment) to the behavior of the manager.
- Minor change: The SpeechRecognizerImpl now raises a SPEECH_RECOGNITION_ERROR_ABORTED upon Abort. It has no visible effect currently (since Abort is always raised when the bubble disappars) but makes sense for upcoming CLs, when speech input extension or JS APIs can cause an abort of the current recognition (starting a new one).
BUG=116954
TEST=none
Review URL: https://chromiumcodereview.appspot.com/10352007
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@137393 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'content/browser/speech/speech_recognition_manager_impl.cc')
-rw-r--r-- | content/browser/speech/speech_recognition_manager_impl.cc | 451 |
1 files changed, 164 insertions, 287 deletions
diff --git a/content/browser/speech/speech_recognition_manager_impl.cc b/content/browser/speech/speech_recognition_manager_impl.cc index 982cb38..9301d9d 100644 --- a/content/browser/speech/speech_recognition_manager_impl.cc +++ b/content/browser/speech/speech_recognition_manager_impl.cc @@ -47,7 +47,7 @@ SpeechRecognitionManagerImpl* SpeechRecognitionManagerImpl::GetInstance() { } SpeechRecognitionManagerImpl::SpeechRecognitionManagerImpl() - : interactive_session_id_(kSessionIDInvalid), + : session_id_capturing_audio_(kSessionIDInvalid), last_session_id_(kSessionIDInvalid), is_dispatching_event_(false) { delegate_ = content::GetContentClient()->browser()-> @@ -60,8 +60,7 @@ SpeechRecognitionManagerImpl::~SpeechRecognitionManagerImpl() { } int SpeechRecognitionManagerImpl::CreateSession( - const SpeechRecognitionSessionConfig& config, - SpeechRecognitionEventListener* event_listener) { + const SpeechRecognitionSessionConfig& config) { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); const int session_id = GetNextSessionID(); @@ -69,7 +68,7 @@ int SpeechRecognitionManagerImpl::CreateSession( // Set-up the new session. Session& session = sessions_[session_id]; session.id = session_id; - session.event_listener = event_listener; + session.config = config; session.context = config.initial_context; std::string hardware_info; @@ -100,12 +99,13 @@ int SpeechRecognitionManagerImpl::CreateSession( void SpeechRecognitionManagerImpl::StartSession(int session_id) { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); - DCHECK(SessionExists(session_id)); + if (!SessionExists(session_id)) + return; - // If there is another interactive session, send it to background. - if (interactive_session_id_ != kSessionIDInvalid && - interactive_session_id_ != session_id) { - SendSessionToBackground(interactive_session_id_); + // If there is another active session, abort that. + if (session_id_capturing_audio_ != kSessionIDInvalid && + session_id_capturing_audio_ != session_id) { + AbortSession(session_id_capturing_audio_); } if (delegate_) @@ -122,7 +122,7 @@ void SpeechRecognitionManagerImpl::RecognitionAllowedCallback(int session_id, if (is_allowed) { BrowserThread::PostTask(BrowserThread::IO, FROM_HERE, base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent, - Unretained(this), session_id, FSMEventArgs(EVENT_START))); + Unretained(this), session_id, EVENT_START)); } else { sessions_.erase(session_id); } @@ -130,45 +130,39 @@ void SpeechRecognitionManagerImpl::RecognitionAllowedCallback(int session_id, void SpeechRecognitionManagerImpl::AbortSession(int session_id) { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); - DCHECK(SessionExists(session_id)); + if (!SessionExists(session_id)) + return; BrowserThread::PostTask(BrowserThread::IO, FROM_HERE, base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent, Unretained(this), - session_id, FSMEventArgs(EVENT_ABORT))); + session_id, EVENT_ABORT)); } void SpeechRecognitionManagerImpl::StopAudioCaptureForSession(int session_id) { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); - DCHECK(SessionExists(session_id)); - - BrowserThread::PostTask(BrowserThread::IO, FROM_HERE, - base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent, Unretained(this), - session_id, FSMEventArgs(EVENT_STOP_CAPTURE))); -} - -void SpeechRecognitionManagerImpl::SendSessionToBackground(int session_id) { - DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); - DCHECK(SessionExists(session_id)); + if (!SessionExists(session_id)) + return; BrowserThread::PostTask(BrowserThread::IO, FROM_HERE, base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent, Unretained(this), - session_id, FSMEventArgs(EVENT_SET_BACKGROUND))); + session_id, EVENT_STOP_CAPTURE)); } // Here begins the SpeechRecognitionEventListener interface implementation, // which will simply relay the events to the proper listener registered for the -// particular session (most likely InputTagSpeechDispatcherHost) and intercept -// some of them to provide UI notifications. +// particular session (most likely InputTagSpeechDispatcherHost) and to the +// catch-all listener provided by the delegate (if any). void SpeechRecognitionManagerImpl::OnRecognitionStart(int session_id) { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); if (!SessionExists(session_id)) return; - DCHECK_EQ(interactive_session_id_, session_id); - if (delegate_) - delegate_->ShowWarmUp(session_id); - GetListener(session_id)->OnRecognitionStart(session_id); + DCHECK_EQ(session_id_capturing_audio_, session_id); + if (SpeechRecognitionEventListener* delegate_listener = GetDelegateListener()) + delegate_listener->OnRecognitionStart(session_id); + if (SpeechRecognitionEventListener* listener = GetListener(session_id)) + listener->OnRecognitionStart(session_id); } void SpeechRecognitionManagerImpl::OnAudioStart(int session_id) { @@ -176,10 +170,11 @@ void SpeechRecognitionManagerImpl::OnAudioStart(int session_id) { if (!SessionExists(session_id)) return; - DCHECK_EQ(interactive_session_id_, session_id); - if (delegate_) - delegate_->ShowRecording(session_id); - GetListener(session_id)->OnAudioStart(session_id); + DCHECK_EQ(session_id_capturing_audio_, session_id); + if (SpeechRecognitionEventListener* delegate_listener = GetDelegateListener()) + delegate_listener->OnAudioStart(session_id); + if (SpeechRecognitionEventListener* listener = GetListener(session_id)) + listener->OnAudioStart(session_id); } void SpeechRecognitionManagerImpl::OnEnvironmentEstimationComplete( @@ -188,8 +183,11 @@ void SpeechRecognitionManagerImpl::OnEnvironmentEstimationComplete( if (!SessionExists(session_id)) return; - DCHECK_EQ(interactive_session_id_, session_id); - GetListener(session_id)->OnEnvironmentEstimationComplete(session_id); + DCHECK_EQ(session_id_capturing_audio_, session_id); + if (SpeechRecognitionEventListener* delegate_listener = GetDelegateListener()) + delegate_listener->OnEnvironmentEstimationComplete(session_id); + if (SpeechRecognitionEventListener* listener = GetListener(session_id)) + listener->OnEnvironmentEstimationComplete(session_id); } void SpeechRecognitionManagerImpl::OnSoundStart(int session_id) { @@ -197,8 +195,11 @@ void SpeechRecognitionManagerImpl::OnSoundStart(int session_id) { if (!SessionExists(session_id)) return; - DCHECK_EQ(interactive_session_id_, session_id); - GetListener(session_id)->OnSoundStart(session_id); + DCHECK_EQ(session_id_capturing_audio_, session_id); + if (SpeechRecognitionEventListener* delegate_listener = GetDelegateListener()) + delegate_listener->OnSoundStart(session_id); + if (SpeechRecognitionEventListener* listener = GetListener(session_id)) + listener->OnSoundStart(session_id); } void SpeechRecognitionManagerImpl::OnSoundEnd(int session_id) { @@ -206,7 +207,10 @@ void SpeechRecognitionManagerImpl::OnSoundEnd(int session_id) { if (!SessionExists(session_id)) return; - GetListener(session_id)->OnSoundEnd(session_id); + if (SpeechRecognitionEventListener* delegate_listener = GetDelegateListener()) + delegate_listener->OnSoundEnd(session_id); + if (SpeechRecognitionEventListener* listener = GetListener(session_id)) + listener->OnSoundEnd(session_id); } void SpeechRecognitionManagerImpl::OnAudioEnd(int session_id) { @@ -214,12 +218,13 @@ void SpeechRecognitionManagerImpl::OnAudioEnd(int session_id) { if (!SessionExists(session_id)) return; - // OnAudioEnd can also be raised after an abort request, when the session is - // not interactive anymore. - if (interactive_session_id_ == session_id && delegate_) - delegate_->ShowRecognizing(session_id); - - GetListener(session_id)->OnAudioEnd(session_id); + if (SpeechRecognitionEventListener* delegate_listener = GetDelegateListener()) + delegate_listener->OnAudioEnd(session_id); + if (SpeechRecognitionEventListener* listener = GetListener(session_id)) + listener->OnAudioEnd(session_id); + BrowserThread::PostTask(BrowserThread::IO, FROM_HERE, + base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent, Unretained(this), + session_id, EVENT_AUDIO_ENDED)); } void SpeechRecognitionManagerImpl::OnRecognitionResult( @@ -228,11 +233,10 @@ void SpeechRecognitionManagerImpl::OnRecognitionResult( if (!SessionExists(session_id)) return; - GetListener(session_id)->OnRecognitionResult(session_id, result); - FSMEventArgs event_args(EVENT_RECOGNITION_RESULT); - BrowserThread::PostTask(BrowserThread::IO, FROM_HERE, - base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent, Unretained(this), - session_id, event_args)); + if (SpeechRecognitionEventListener* delegate_listener = GetDelegateListener()) + delegate_listener->OnRecognitionResult(session_id, result); + if (SpeechRecognitionEventListener* listener = GetListener(session_id)) + listener->OnRecognitionResult(session_id, result); } void SpeechRecognitionManagerImpl::OnRecognitionError( @@ -241,12 +245,10 @@ void SpeechRecognitionManagerImpl::OnRecognitionError( if (!SessionExists(session_id)) return; - GetListener(session_id)->OnRecognitionError(session_id, error); - FSMEventArgs event_args(EVENT_RECOGNITION_ERROR); - event_args.speech_error = error; - BrowserThread::PostTask(BrowserThread::IO, FROM_HERE, - base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent, Unretained(this), - session_id, event_args)); + if (SpeechRecognitionEventListener* delegate_listener = GetDelegateListener()) + delegate_listener->OnRecognitionError(session_id, error); + if (SpeechRecognitionEventListener* listener = GetListener(session_id)) + listener->OnRecognitionError(session_id, error); } void SpeechRecognitionManagerImpl::OnAudioLevelsChange( @@ -255,11 +257,10 @@ void SpeechRecognitionManagerImpl::OnAudioLevelsChange( if (!SessionExists(session_id)) return; - if (delegate_) - delegate_->ShowInputVolume(session_id, volume, noise_volume); - - GetListener(session_id)->OnAudioLevelsChange(session_id, volume, - noise_volume); + if (SpeechRecognitionEventListener* delegate_listener = GetDelegateListener()) + delegate_listener->OnAudioLevelsChange(session_id, volume, noise_volume); + if (SpeechRecognitionEventListener* listener = GetListener(session_id)) + listener->OnAudioLevelsChange(session_id, volume, noise_volume); } void SpeechRecognitionManagerImpl::OnRecognitionEnd(int session_id) { @@ -267,10 +268,13 @@ void SpeechRecognitionManagerImpl::OnRecognitionEnd(int session_id) { if (!SessionExists(session_id)) return; - GetListener(session_id)->OnRecognitionEnd(session_id); + if (SpeechRecognitionEventListener* delegate_listener = GetDelegateListener()) + delegate_listener->OnRecognitionEnd(session_id); + if (SpeechRecognitionEventListener* listener = GetListener(session_id)) + listener->OnRecognitionEnd(session_id); BrowserThread::PostTask(BrowserThread::IO, FROM_HERE, - base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent, Unretained(this), - session_id, FSMEventArgs(EVENT_RECOGNITION_ENDED))); + base::Bind(&SpeechRecognitionManagerImpl::DispatchEvent, Unretained(this), + session_id, EVENT_RECOGNITION_ENDED)); } // TODO(primiano) After CL2: if we see that both InputTagDispatcherHost and @@ -293,68 +297,39 @@ int SpeechRecognitionManagerImpl::LookupSessionByContext( SpeechRecognitionSessionContext SpeechRecognitionManagerImpl::GetSessionContext(int session_id) const { - DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); - SessionsTable::const_iterator iter = sessions_.find(session_id); - DCHECK(iter != sessions_.end()); - return iter->second.context; + return GetSession(session_id).context; } void SpeechRecognitionManagerImpl::AbortAllSessionsForListener( SpeechRecognitionEventListener* listener) { + // This method gracefully destroys sessions for the listener. However, since + // the listener itself is likely to be destroyed after this call, we avoid + // dispatching further events to it, marking the |listener_is_active| flag. DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); - - // This method ungracefully destroys sessions (and the underlying recognizer) - // for the listener. There is no time to call Abort (that is asynchronous) on - // them since the listener itself is likely to be destroyed after the call, - // and in that case we won't deliver events to a freed listener. - // Thus we assume that the dtors of Sessions (and in turn dtors of all - // contained objects) are designed to dispose resources cleanly. - std::vector<int> sessions_for_listener; - - // Copy coressponding session ids. for (SessionsTable::iterator it = sessions_.begin(); it != sessions_.end(); ++it) { - if (it->second.event_listener == listener) - sessions_for_listener.push_back(it->first); - } - // Remove them. - for (size_t i = 0; i < sessions_for_listener.size(); ++i) { - const int session_id = sessions_for_listener[i]; - if (interactive_session_id_ == session_id) - interactive_session_id_ = kSessionIDInvalid; - sessions_.erase(session_id); + Session& session = it->second; + if (session.config.event_listener == listener) { + AbortSession(session.id); + session.listener_is_active = false; + } } } // ----------------------- Core FSM implementation --------------------------- void SpeechRecognitionManagerImpl::DispatchEvent(int session_id, - FSMEventArgs event_args) { + FSMEvent event) { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); - if (!SessionExists(session_id)) - return; - - Session& session = sessions_[session_id]; - DCHECK_LE(session.state, STATE_MAX_VALUE); - DCHECK_LE(event_args.event, EVENT_MAX_VALUE); + const Session& session = GetSession(session_id); + FSMState session_state = GetSessionState(session_id); + DCHECK_LE(session_state, SESSION_STATE_MAX_VALUE); + DCHECK_LE(event, EVENT_MAX_VALUE); // Event dispatching must be sequential, otherwise it will break all the rules // and the assumptions of the finite state automata model. DCHECK(!is_dispatching_event_); is_dispatching_event_ = true; - - // Pedantic preconditions consistency checks. - if (session.state == STATE_INTERACTIVE) - DCHECK_EQ(interactive_session_id_, session_id); - - if (session.state == STATE_BACKGROUND || - session.state == STATE_WAITING_FOR_DELETION) { - DCHECK_NE(interactive_session_id_, session_id); - } - - FSMState next_state = ExecuteTransitionAndGetNextState(session, event_args); - if (SessionExists(session_id)) // Session might be deleted. - session.state = next_state; - + ExecuteTransitionAndGetNextState(session, session_state, event); is_dispatching_event_ = false; } @@ -364,214 +339,109 @@ void SpeechRecognitionManagerImpl::DispatchEvent(int session_id, // All the events received by the SpeechRecognizerImpl instances (one for each // session) are always routed to the SpeechRecognitionEventListener(s) // regardless the choices taken in this FSM. -SpeechRecognitionManagerImpl::FSMState -SpeechRecognitionManagerImpl::ExecuteTransitionAndGetNextState( - Session& session, const FSMEventArgs& event_args) { - // Some notes for the code below: - // - A session can be deleted only if it is not active, thus only if it ended - // spontaneously or we issued a prior SessionAbort. In these cases, we must - // wait for a RECOGNITION_ENDED event (which is guaranteed to come always at - // last by the SpeechRecognizer) in order to free resources gracefully. - // - Use SessionDelete only when absolutely sure that the recognizer is not - // active. Prefer SessionAbort, which will do it gracefully, otherwise. - // - Since this class methods are publicly exported, START, ABORT, - // STOP_CAPTURE and SET_BACKGROUND events can arrive in every moment from - // the outside wild wolrd, even if they make no sense. - const FSMEvent event = event_args.event; - switch (session.state) { - case STATE_IDLE: - // Session has just been created or had an error while interactive. +void SpeechRecognitionManagerImpl::ExecuteTransitionAndGetNextState( + const Session& session, FSMState session_state, FSMEvent event) { + // Note: since we're not tracking the state of the recognizer object, rather + // we're directly retrieving it (through GetSessionState), we see its events + // (that are AUDIO_ENDED and RECOGNITION_ENDED) after its state evolution + // (e.g., when we receive the AUDIO_ENDED event, the recognizer has just + // completed the transition from CAPTURING_AUDIO to WAITING_FOR_RESULT, thus + // we perceive the AUDIO_ENDED event in WAITING_FOR_RESULT). + // This makes the code below a bit tricky but avoids a lot of code for + // tracking and reconstructing asynchronously the state of the recognizer. + switch (session_state) { + case SESSION_STATE_IDLE: switch (event) { case EVENT_START: - return SessionStart(session, event_args); + return SessionStart(session); case EVENT_ABORT: - case EVENT_SET_BACKGROUND: - return SessionAbort(session, event_args); - case EVENT_STOP_CAPTURE: case EVENT_RECOGNITION_ENDED: - // In case of error, we come back in this state before receiving the - // OnRecognitionEnd event, thus EVENT_RECOGNITION_ENDED is feasible. - return DoNothing(session, event_args); - case EVENT_RECOGNITION_RESULT: - case EVENT_RECOGNITION_ERROR: - return NotFeasible(session, event_args); + return SessionDelete(session); + case EVENT_STOP_CAPTURE: + case EVENT_AUDIO_ENDED: + return; } break; - case STATE_INTERACTIVE: - // The recognizer can be either capturing audio or waiting for a result. + case SESSION_STATE_CAPTURING_AUDIO: switch (event) { - case EVENT_RECOGNITION_RESULT: - // TODO(primiano) Valid only in single shot mode. Review in next CLs. - return SessionSetBackground(session, event_args); - case EVENT_SET_BACKGROUND: - return SessionAbortIfCapturingAudioOrBackground(session, event_args); case EVENT_STOP_CAPTURE: - return SessionStopAudioCapture(session, event_args); + return SessionStopAudioCapture(session); case EVENT_ABORT: - return SessionAbort(session, event_args); - case EVENT_RECOGNITION_ERROR: - return SessionReportError(session, event_args); - case EVENT_RECOGNITION_ENDED: - // If we're still interactive it means that no result was received - // in the meanwhile (otherwise we'd have been sent to background). - return SessionReportNoMatch(session, event_args); + return SessionAbort(session); case EVENT_START: - return DoNothing(session, event_args); - } - break; - case STATE_BACKGROUND: - switch (event) { - case EVENT_ABORT: - return SessionAbort(session, event_args); + return; + case EVENT_AUDIO_ENDED: case EVENT_RECOGNITION_ENDED: - return SessionDelete(session, event_args); - case EVENT_START: - case EVENT_STOP_CAPTURE: - case EVENT_RECOGNITION_RESULT: - case EVENT_RECOGNITION_ERROR: - return DoNothing(session, event_args); - case EVENT_SET_BACKGROUND: - return NotFeasible(session, event_args); + return NotFeasible(session, event); } break; - case STATE_WAITING_FOR_DELETION: + case SESSION_STATE_WAITING_FOR_RESULT: switch (event) { - case EVENT_RECOGNITION_ENDED: - return SessionDelete(session, event_args); case EVENT_ABORT: + return SessionAbort(session); + case EVENT_AUDIO_ENDED: + return ResetCapturingSessionId(session); case EVENT_START: case EVENT_STOP_CAPTURE: - case EVENT_SET_BACKGROUND: - case EVENT_RECOGNITION_RESULT: - case EVENT_RECOGNITION_ERROR: - return DoNothing(session, event_args); + return; + case EVENT_RECOGNITION_ENDED: + return NotFeasible(session, event); } break; } - return NotFeasible(session, event_args); + return NotFeasible(session, event); +} + +SpeechRecognitionManagerImpl::FSMState +SpeechRecognitionManagerImpl::GetSessionState(int session_id) const { + const Session& session = GetSession(session_id); + if (!session.recognizer.get() || !session.recognizer->IsActive()) + return SESSION_STATE_IDLE; + if (session.recognizer->IsCapturingAudio()) + return SESSION_STATE_CAPTURING_AUDIO; + return SESSION_STATE_WAITING_FOR_RESULT; } // ----------- Contract for all the FSM evolution functions below ------------- // - Are guaranteed to be executed in the IO thread; // - Are guaranteed to be not reentrant (themselves and each other); -// - event_args members are guaranteed to be stable during the call; -SpeechRecognitionManagerImpl::FSMState -SpeechRecognitionManagerImpl::SessionStart(Session& session, - const FSMEventArgs& event_args) { - if (interactive_session_id_ != kSessionIDInvalid && delegate_) - delegate_->DoClose(interactive_session_id_); - interactive_session_id_ = session.id; - if (delegate_) - delegate_->ShowRecognitionRequested(session.id); +void SpeechRecognitionManagerImpl::SessionStart(const Session& session) { + session_id_capturing_audio_ = session.id; session.recognizer->StartRecognition(); - return STATE_INTERACTIVE; } -SpeechRecognitionManagerImpl::FSMState -SpeechRecognitionManagerImpl::SessionAbort(Session& session, - const FSMEventArgs& event_args) { - if (interactive_session_id_ == session.id) { - interactive_session_id_ = kSessionIDInvalid; - if (delegate_) - delegate_->DoClose(session.id); - } - - // If abort was requested while the recognizer was inactive, delete directly. - if (session.recognizer == NULL || !session.recognizer->IsActive()) - return SessionDelete(session, event_args); - - // Otherwise issue an abort and delete gracefully, waiting for a - // RECOGNITION_ENDED event first. +void SpeechRecognitionManagerImpl::SessionAbort(const Session& session) { + if (session_id_capturing_audio_ == session.id) + session_id_capturing_audio_ = kSessionIDInvalid; + DCHECK(session.recognizer.get() && session.recognizer->IsActive()); session.recognizer->AbortRecognition(); - return STATE_WAITING_FOR_DELETION; -} - -SpeechRecognitionManagerImpl::FSMState -SpeechRecognitionManagerImpl::SessionStopAudioCapture( - Session& session, const FSMEventArgs& event_args) { - DCHECK(session.recognizer != NULL); - DCHECK(session.recognizer->IsActive()); - if (session.recognizer->IsCapturingAudio()) - session.recognizer->StopAudioCapture(); - return STATE_INTERACTIVE; -} - -SpeechRecognitionManagerImpl::FSMState -SpeechRecognitionManagerImpl::SessionAbortIfCapturingAudioOrBackground( - Session& session, const FSMEventArgs& event_args) { - DCHECK_EQ(interactive_session_id_, session.id); - - DCHECK(session.recognizer != NULL); - DCHECK(session.recognizer->IsActive()); - if (session.recognizer->IsCapturingAudio()) - return SessionAbort(session, event_args); - - interactive_session_id_ = kSessionIDInvalid; - if (delegate_) - delegate_->DoClose(session.id); - return STATE_BACKGROUND; -} - - -SpeechRecognitionManagerImpl::FSMState -SpeechRecognitionManagerImpl::SessionSetBackground( - Session& session, const FSMEventArgs& event_args) { - DCHECK_EQ(interactive_session_id_, session.id); - interactive_session_id_ = kSessionIDInvalid; - if (delegate_) - delegate_->DoClose(session.id); - return STATE_BACKGROUND; } -SpeechRecognitionManagerImpl::FSMState -SpeechRecognitionManagerImpl::SessionReportError( - Session& session, const FSMEventArgs& event_args) { - DCHECK_EQ(interactive_session_id_, session.id); - if (delegate_) - delegate_->ShowError(session.id, event_args.speech_error); - return STATE_IDLE; +void SpeechRecognitionManagerImpl::SessionStopAudioCapture( + const Session& session) { + DCHECK(session.recognizer.get() && session.recognizer->IsCapturingAudio()); + session.recognizer->StopAudioCapture(); } -SpeechRecognitionManagerImpl::FSMState -SpeechRecognitionManagerImpl::SessionReportNoMatch( - Session& session, const FSMEventArgs& event_args) { - DCHECK_EQ(interactive_session_id_, session.id); - if (delegate_) { - delegate_->ShowError( - session.id, - SpeechRecognitionError(content::SPEECH_RECOGNITION_ERROR_NO_MATCH)); - } - return STATE_IDLE; +void SpeechRecognitionManagerImpl::ResetCapturingSessionId( + const Session& session) { + DCHECK_EQ(session_id_capturing_audio_, session.id); + session_id_capturing_audio_ = kSessionIDInvalid; } -SpeechRecognitionManagerImpl::FSMState -SpeechRecognitionManagerImpl::SessionDelete(Session& session, - const FSMEventArgs& event_args) { +void SpeechRecognitionManagerImpl::SessionDelete(const Session& session) { DCHECK(session.recognizer == NULL || !session.recognizer->IsActive()); - if (interactive_session_id_ == session.id) { - interactive_session_id_ = kSessionIDInvalid; - if (delegate_) - delegate_->DoClose(session.id); - } + if (session_id_capturing_audio_ == session.id) + session_id_capturing_audio_ = kSessionIDInvalid; sessions_.erase(session.id); - // Next state is irrelevant, the session will be deleted afterwards. - return STATE_WAITING_FOR_DELETION; -} - -SpeechRecognitionManagerImpl::FSMState -SpeechRecognitionManagerImpl::DoNothing(Session& session, - const FSMEventArgs& event_args) { - return session.state; } -SpeechRecognitionManagerImpl::FSMState -SpeechRecognitionManagerImpl::NotFeasible(Session& session, - const FSMEventArgs& event_args) { - NOTREACHED() << "Unfeasible event " << event_args.event - << " in state " << session.state +void SpeechRecognitionManagerImpl::NotFeasible(const Session& session, + FSMEvent event) { + NOTREACHED() << "Unfeasible event " << event + << " in state " << GetSessionState(session.id) << " for session " << session.id; - return session.state; } int SpeechRecognitionManagerImpl::GetNextSessionID() { @@ -586,13 +456,29 @@ bool SpeechRecognitionManagerImpl::SessionExists(int session_id) const { return sessions_.find(session_id) != sessions_.end(); } -SpeechRecognitionEventListener* SpeechRecognitionManagerImpl::GetListener( - int session_id) const { +const SpeechRecognitionManagerImpl::Session& +SpeechRecognitionManagerImpl::GetSession(int session_id) const { + DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); SessionsTable::const_iterator iter = sessions_.find(session_id); DCHECK(iter != sessions_.end()); - return iter->second.event_listener; + return iter->second; } +SpeechRecognitionEventListener* SpeechRecognitionManagerImpl::GetListener( + int session_id) const { + const Session& session = GetSession(session_id); + return session.listener_is_active ? session.config.event_listener : NULL; +} + +SpeechRecognitionEventListener* +SpeechRecognitionManagerImpl::GetDelegateListener() const { + return delegate_ ? delegate_->GetEventListener() : NULL; +} + +const SpeechRecognitionSessionConfig& +SpeechRecognitionManagerImpl::GetSessionConfig(int session_id) const { + return GetSession(session_id).config; +} bool SpeechRecognitionManagerImpl::HasAudioInputDevices() { return BrowserMainLoop::GetAudioManager()->HasAudioInputDevices(); @@ -623,18 +509,9 @@ void SpeechRecognitionManagerImpl::ShowAudioInputSettings() { audio_manager->ShowAudioInputSettings(); } -SpeechRecognitionManagerImpl::FSMEventArgs::FSMEventArgs(FSMEvent event_value) - : event(event_value), - speech_error(content::SPEECH_RECOGNITION_ERROR_NONE) { -} - -SpeechRecognitionManagerImpl::FSMEventArgs::~FSMEventArgs() { -} - SpeechRecognitionManagerImpl::Session::Session() : id(kSessionIDInvalid), - event_listener(NULL), - state(STATE_IDLE) { + listener_is_active(true) { } SpeechRecognitionManagerImpl::Session::~Session() { |