diff options
author | sergeyu@chromium.org <sergeyu@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2013-05-16 18:19:19 +0000 |
---|---|---|
committer | sergeyu@chromium.org <sergeyu@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2013-05-16 18:19:19 +0000 |
commit | 54a83ee95257f62bd1c06238449e1598f93b1a42 (patch) | |
tree | d01195c2a2581cfd77c11210ca3c60c1ce1a96e3 /content/browser/speech | |
parent | 7e3461079b02605faf2a1e767bfad03bb5e8eaf1 (diff) | |
download | chromium_src-54a83ee95257f62bd1c06238449e1598f93b1a42.zip chromium_src-54a83ee95257f62bd1c06238449e1598f93b1a42.tar.gz chromium_src-54a83ee95257f62bd1c06238449e1598f93b1a42.tar.bz2 |
Revert 200559 "Replace MediaStreamUIController with MediaStreamU..."
> Replace MediaStreamUIController with MediaStreamUIProxy.
>
> Previously a single object MediaStreamUIController was used to control UI for
> all streams. Replaced it with a per-stream MediaStreamUIProxy that simplifies
> code in many places.
> Also moved media request queueing logic from content layer to chrome. Now
> different types of requests may be queued differently (e.g. there is no
> reason to block screen capture requests on webcam infobar).
>
> This change was previously landed in 197222 and 197755 and reverted in
> 197242 and 197768. Each time it caused test failures on some Mac bots.
> The failure seems to be caused by bug 236846 which has been fixed.
>
> TBR=tommi@chromium.org
>
> Review URL: https://codereview.chromium.org/14750011
TBR=sergeyu@chromium.org
Review URL: https://codereview.chromium.org/14972023
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@200579 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'content/browser/speech')
-rw-r--r-- | content/browser/speech/speech_recognition_manager_impl.cc | 165 | ||||
-rw-r--r-- | content/browser/speech/speech_recognition_manager_impl.h | 24 |
2 files changed, 97 insertions, 92 deletions
diff --git a/content/browser/speech/speech_recognition_manager_impl.cc b/content/browser/speech/speech_recognition_manager_impl.cc index 402dd00..ea49579 100644 --- a/content/browser/speech/speech_recognition_manager_impl.cc +++ b/content/browser/speech/speech_recognition_manager_impl.cc @@ -7,7 +7,6 @@ #include "base/bind.h" #include "content/browser/browser_main_loop.h" #include "content/browser/renderer_host/media/media_stream_manager.h" -#include "content/browser/renderer_host/media/media_stream_ui_proxy.h" #include "content/browser/speech/google_one_shot_remote_engine.h" #include "content/browser/speech/google_streaming_remote_engine.h" #include "content/browser/speech/speech_recognition_engine.h" @@ -69,14 +68,7 @@ SpeechRecognitionManagerImpl::SpeechRecognitionManagerImpl() SpeechRecognitionManagerImpl::~SpeechRecognitionManagerImpl() { DCHECK(g_speech_recognition_manager_impl); g_speech_recognition_manager_impl = NULL; - - for (SessionsTable::iterator it = sessions_.begin(); it != sessions_.end(); - ++it) { - // MediaStreamUIProxy must be deleted on the IO thread. - BrowserThread::DeleteSoon(BrowserThread::IO, FROM_HERE, - it->second->ui.release()); - delete it->second; - } + // Recognition sessions will be aborted by the corresponding destructors. sessions_.clear(); } @@ -87,11 +79,10 @@ int SpeechRecognitionManagerImpl::CreateSession( const int session_id = GetNextSessionID(); DCHECK(!SessionExists(session_id)); // Set-up the new session. - Session* session = new Session(); - sessions_[session_id] = session; - session->id = session_id; - session->config = config; - session->context = config.initial_context; + Session& session = sessions_[session_id]; + session.id = session_id; + session.config = config; + session.context = config.initial_context; std::string hardware_info; bool can_report_metrics = false; @@ -126,7 +117,7 @@ int SpeechRecognitionManagerImpl::CreateSession( // The legacy api cannot use continuous mode. DCHECK(!config.is_legacy_api || !config.continuous); - session->recognizer = new SpeechRecognizer( + session.recognizer = new SpeechRecognizer( this, session_id, !config.continuous, @@ -171,7 +162,7 @@ void SpeechRecognitionManagerImpl::RecognitionAllowedCallback(int session_id, if (ask_user) { SessionsTable::iterator iter = sessions_.find(session_id); DCHECK(iter != sessions_.end()); - SpeechRecognitionSessionContext& context = iter->second->context; + SpeechRecognitionSessionContext& context = iter->second.context; context.label = BrowserMainLoop::GetMediaStreamManager()->MakeMediaAccessRequest( context.render_process_id, @@ -180,7 +171,8 @@ void SpeechRecognitionManagerImpl::RecognitionAllowedCallback(int session_id, GURL(context.context_name), base::Bind( &SpeechRecognitionManagerImpl::MediaRequestPermissionCallback, - weak_factory_.GetWeakPtr(), session_id)); + weak_factory_.GetWeakPtr())); + return; } #endif // defined(OS_IOS) @@ -205,29 +197,26 @@ void SpeechRecognitionManagerImpl::RecognitionAllowedCallback(int session_id, } void SpeechRecognitionManagerImpl::MediaRequestPermissionCallback( - int session_id, - const MediaStreamDevices& devices, - scoped_ptr<MediaStreamUIProxy> stream_ui) { + const std::string& label, const MediaStreamDevices& devices) { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); + for (SessionsTable::iterator iter = sessions_.begin(); + iter != sessions_.end(); ++iter) { + if (iter->second.context.label == label) { + bool is_allowed = false; + if (!devices.empty()) { + // Copy the approved devices array to the context for UI indication. + iter->second.context.devices = devices; + is_allowed = true; + } - SessionsTable::iterator iter = sessions_.find(session_id); - if (iter == sessions_.end()) - return; + // Clear the label to indicate the request has been done. + iter->second.context.label.clear(); - bool is_allowed = !devices.empty(); - if (is_allowed) { - // Copy the approved devices array to the context for UI indication. - iter->second->context.devices = devices; - - // Save the UI object. - iter->second->ui = stream_ui.Pass(); + // Notify the recognition about the request result. + RecognitionAllowedCallback(iter->first, false, is_allowed); + break; + } } - - // Clear the label to indicate the request has been done. - iter->second->context.label.clear(); - - // Notify the recognition about the request result. - RecognitionAllowedCallback(iter->first, false, is_allowed); } void SpeechRecognitionManagerImpl::AbortSession(int session_id) { @@ -235,8 +224,12 @@ void SpeechRecognitionManagerImpl::AbortSession(int session_id) { if (!SessionExists(session_id)) return; - SessionsTable::iterator iter = sessions_.find(session_id); - iter->second->ui.reset(); +#if !defined(OS_IOS) + const SpeechRecognitionSessionContext& context = + GetSessionContext(session_id); + if (!context.label.empty()) + BrowserMainLoop::GetMediaStreamManager()->CancelRequest(context.label); +#endif // !defined(OS_IOS) base::MessageLoop::current()->PostTask( FROM_HERE, @@ -251,8 +244,12 @@ void SpeechRecognitionManagerImpl::StopAudioCaptureForSession(int session_id) { if (!SessionExists(session_id)) return; - SessionsTable::iterator iter = sessions_.find(session_id); - iter->second->ui.reset(); +#if !defined(OS_IOS) + const SpeechRecognitionSessionContext& context = + GetSessionContext(session_id); + if (!context.label.empty()) + BrowserMainLoop::GetMediaStreamManager()->CancelRequest(context.label); +#endif // !defined(OS_IOS) base::MessageLoop::current()->PostTask( FROM_HERE, @@ -272,11 +269,15 @@ void SpeechRecognitionManagerImpl::OnRecognitionStart(int session_id) { if (!SessionExists(session_id)) return; - SessionsTable::iterator iter = sessions_.find(session_id); - if (iter->second->ui) { - // Notify the UI that the devices are being used. - iter->second->ui->OnStarted(base::Closure()); +#if !defined(OS_IOS) + const SpeechRecognitionSessionContext& context = + GetSessionContext(session_id); + if (!context.devices.empty()) { + // Notify the UI the devices are being used. + BrowserMainLoop::GetMediaStreamManager()->NotifyUIDevicesOpened( + context.label); } +#endif // !defined(OS_IOS) DCHECK_EQ(primary_session_id_, session_id); if (SpeechRecognitionEventListener* delegate_listener = GetDelegateListener()) @@ -390,6 +391,15 @@ void SpeechRecognitionManagerImpl::OnRecognitionEnd(int session_id) { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); if (!SessionExists(session_id)) return; +#if !defined(OS_IOS) + const SpeechRecognitionSessionContext& context = + GetSessionContext(session_id); + if (!context.devices.empty()) { + // Notify the UI the devices has been closed. + BrowserMainLoop::GetMediaStreamManager()->NotifyUIDevicesClosed( + context.label); + } +#endif // !defined(OS_IOS) if (SpeechRecognitionEventListener* delegate_listener = GetDelegateListener()) delegate_listener->OnRecognitionEnd(session_id); @@ -409,7 +419,7 @@ int SpeechRecognitionManagerImpl::GetSession( SessionsTable::const_iterator iter; for(iter = sessions_.begin(); iter != sessions_.end(); ++iter) { const int session_id = iter->first; - const SpeechRecognitionSessionContext& context = iter->second->context; + const SpeechRecognitionSessionContext& context = iter->second.context; if (context.render_process_id == render_process_id && context.render_view_id == render_view_id && context.request_id == request_id) { @@ -421,7 +431,7 @@ int SpeechRecognitionManagerImpl::GetSession( SpeechRecognitionSessionContext SpeechRecognitionManagerImpl::GetSessionContext(int session_id) const { - return GetSession(session_id)->context; + return GetSession(session_id).context; } void SpeechRecognitionManagerImpl::AbortAllSessionsForListener( @@ -432,10 +442,10 @@ void SpeechRecognitionManagerImpl::AbortAllSessionsForListener( DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); for (SessionsTable::iterator it = sessions_.begin(); it != sessions_.end(); ++it) { - Session* session = it->second; - if (session->config.event_listener == listener) { - AbortSession(session->id); - session->listener_is_active = false; + Session& session = it->second; + if (session.config.event_listener == listener) { + AbortSession(session.id); + session.listener_is_active = false; } } } @@ -446,10 +456,10 @@ void SpeechRecognitionManagerImpl::AbortAllSessionsForRenderView( DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); for (SessionsTable::iterator it = sessions_.begin(); it != sessions_.end(); ++it) { - Session* session = it->second; - if (session->context.render_process_id == render_process_id && - session->context.render_view_id == render_view_id) { - AbortSession(session->id); + Session& session = it->second; + if (session.context.render_process_id == render_process_id && + session.context.render_view_id == render_view_id) { + AbortSession(session.id); } } } @@ -464,7 +474,7 @@ void SpeechRecognitionManagerImpl::DispatchEvent(int session_id, if (!SessionExists(session_id)) return; - Session* session = GetSession(session_id); + const Session& session = GetSession(session_id); FSMState session_state = GetSessionState(session_id); DCHECK_LE(session_state, SESSION_STATE_MAX_VALUE); DCHECK_LE(event, EVENT_MAX_VALUE); @@ -484,7 +494,7 @@ void SpeechRecognitionManagerImpl::DispatchEvent(int session_id, // session) are always routed to the SpeechRecognitionEventListener(s) // regardless the choices taken in this FSM. void SpeechRecognitionManagerImpl::ExecuteTransitionAndGetNextState( - Session* session, FSMState session_state, FSMEvent event) { + const Session& session, FSMState session_state, FSMEvent event) { // Note: since we're not tracking the state of the recognizer object, rather // we're directly retrieving it (through GetSessionState), we see its events // (that are AUDIO_ENDED and RECOGNITION_ENDED) after its state evolution @@ -497,13 +507,13 @@ void SpeechRecognitionManagerImpl::ExecuteTransitionAndGetNextState( case SESSION_STATE_IDLE: switch (event) { case EVENT_START: - return SessionStart(*session); + return SessionStart(session); case EVENT_ABORT: - return SessionAbort(*session); + return SessionAbort(session); case EVENT_RECOGNITION_ENDED: return SessionDelete(session); case EVENT_STOP_CAPTURE: - return SessionStopAudioCapture(*session); + return SessionStopAudioCapture(session); case EVENT_AUDIO_ENDED: return; } @@ -511,39 +521,39 @@ void SpeechRecognitionManagerImpl::ExecuteTransitionAndGetNextState( case SESSION_STATE_CAPTURING_AUDIO: switch (event) { case EVENT_STOP_CAPTURE: - return SessionStopAudioCapture(*session); + return SessionStopAudioCapture(session); case EVENT_ABORT: - return SessionAbort(*session); + return SessionAbort(session); case EVENT_START: return; case EVENT_AUDIO_ENDED: case EVENT_RECOGNITION_ENDED: - return NotFeasible(*session, event); + return NotFeasible(session, event); } break; case SESSION_STATE_WAITING_FOR_RESULT: switch (event) { case EVENT_ABORT: - return SessionAbort(*session); + return SessionAbort(session); case EVENT_AUDIO_ENDED: - return ResetCapturingSessionId(*session); + return ResetCapturingSessionId(session); case EVENT_START: case EVENT_STOP_CAPTURE: return; case EVENT_RECOGNITION_ENDED: - return NotFeasible(*session, event); + return NotFeasible(session, event); } break; } - return NotFeasible(*session, event); + return NotFeasible(session, event); } SpeechRecognitionManagerImpl::FSMState SpeechRecognitionManagerImpl::GetSessionState(int session_id) const { - Session* session = GetSession(session_id); - if (!session->recognizer.get() || !session->recognizer->IsActive()) + const Session& session = GetSession(session_id); + if (!session.recognizer.get() || !session.recognizer->IsActive()) return SESSION_STATE_IDLE; - if (session->recognizer->IsCapturingAudio()) + if (session.recognizer->IsCapturingAudio()) return SESSION_STATE_CAPTURING_AUDIO; return SESSION_STATE_WAITING_FOR_RESULT; } @@ -576,12 +586,11 @@ void SpeechRecognitionManagerImpl::ResetCapturingSessionId( primary_session_id_ = kSessionIDInvalid; } -void SpeechRecognitionManagerImpl::SessionDelete(Session* session) { - DCHECK(session->recognizer == NULL || !session->recognizer->IsActive()); - if (primary_session_id_ == session->id) +void SpeechRecognitionManagerImpl::SessionDelete(const Session& session) { + DCHECK(session.recognizer == NULL || !session.recognizer->IsActive()); + if (primary_session_id_ == session.id) primary_session_id_ = kSessionIDInvalid; - sessions_.erase(session->id); - delete session; + sessions_.erase(session.id); } void SpeechRecognitionManagerImpl::NotFeasible(const Session& session, @@ -603,7 +612,7 @@ bool SpeechRecognitionManagerImpl::SessionExists(int session_id) const { return sessions_.find(session_id) != sessions_.end(); } -SpeechRecognitionManagerImpl::Session* +const SpeechRecognitionManagerImpl::Session& SpeechRecognitionManagerImpl::GetSession(int session_id) const { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); SessionsTable::const_iterator iter = sessions_.find(session_id); @@ -613,8 +622,8 @@ SpeechRecognitionManagerImpl::GetSession(int session_id) const { SpeechRecognitionEventListener* SpeechRecognitionManagerImpl::GetListener( int session_id) const { - Session* session = GetSession(session_id); - return session->listener_is_active ? session->config.event_listener : NULL; + const Session& session = GetSession(session_id); + return session.listener_is_active ? session.config.event_listener : NULL; } SpeechRecognitionEventListener* @@ -624,7 +633,7 @@ SpeechRecognitionManagerImpl::GetDelegateListener() const { const SpeechRecognitionSessionConfig& SpeechRecognitionManagerImpl::GetSessionConfig(int session_id) const { - return GetSession(session_id)->config; + return GetSession(session_id).config; } bool SpeechRecognitionManagerImpl::HasAudioInputDevices() { diff --git a/content/browser/speech/speech_recognition_manager_impl.h b/content/browser/speech/speech_recognition_manager_impl.h index af2dbda..5f4a3d3 100644 --- a/content/browser/speech/speech_recognition_manager_impl.h +++ b/content/browser/speech/speech_recognition_manager_impl.h @@ -21,7 +21,6 @@ namespace content { class BrowserMainLoop; -class MediaStreamUIProxy; class SpeechRecognitionManagerDelegate; class SpeechRecognizer; @@ -124,7 +123,6 @@ class CONTENT_EXPORT SpeechRecognitionManagerImpl : SpeechRecognitionSessionConfig config; SpeechRecognitionSessionContext context; scoped_refptr<SpeechRecognizer> recognizer; - scoped_ptr<MediaStreamUIProxy> ui; }; // Callback issued by the SpeechRecognitionManagerDelegate for reporting @@ -133,21 +131,19 @@ class CONTENT_EXPORT SpeechRecognitionManagerImpl : bool ask_user, bool is_allowed); - // Callback to get back the result of a media request. |devices| is an array - // of devices approved to be used for the request, |devices| is empty if the - // users deny the request. - void MediaRequestPermissionCallback(int session_id, - const MediaStreamDevices& devices, - scoped_ptr<MediaStreamUIProxy> stream_ui); + // Callback to get back the result of a media request. |label| is the string + // to identify the request; |devices| is an array of devices approved to be + // used for the request, |devices| is empty if the users deny the request. + void MediaRequestPermissionCallback(const std::string& label, + const MediaStreamDevices& devices); // Entry point for pushing any external event into the session handling FSM. void DispatchEvent(int session_id, FSMEvent event); // Defines the behavior of the session handling FSM, selecting the appropriate // transition according to the session, its current state and the event. - void ExecuteTransitionAndGetNextState(Session* session, - FSMState session_state, - FSMEvent event); + void ExecuteTransitionAndGetNextState( + const Session& session, FSMState session_state, FSMEvent event); // Retrieves the state of the session, enquiring directly the recognizer. FSMState GetSessionState(int session_id) const; @@ -157,16 +153,16 @@ class CONTENT_EXPORT SpeechRecognitionManagerImpl : void SessionAbort(const Session& session); void SessionStopAudioCapture(const Session& session); void ResetCapturingSessionId(const Session& session); - void SessionDelete(Session* session); + void SessionDelete(const Session& session); void NotFeasible(const Session& session, FSMEvent event); bool SessionExists(int session_id) const; - Session* GetSession(int session_id) const; + const Session& GetSession(int session_id) const; SpeechRecognitionEventListener* GetListener(int session_id) const; SpeechRecognitionEventListener* GetDelegateListener() const; int GetNextSessionID(); - typedef std::map<int, Session*> SessionsTable; + typedef std::map<int, Session> SessionsTable; SessionsTable sessions_; int primary_session_id_; int last_session_id_; |