diff options
21 files changed, 104 insertions, 688 deletions
diff --git a/chrome/browser/extensions/extension_function_registry.cc b/chrome/browser/extensions/extension_function_registry.cc index 4e29f7e..c7d0ce2c9 100644 --- a/chrome/browser/extensions/extension_function_registry.cc +++ b/chrome/browser/extensions/extension_function_registry.cc @@ -260,12 +260,10 @@ void ExtensionFunctionRegistry::ResetFunctions() { RegisterFunction<extensions::OmniboxSendSuggestionsFunction>(); RegisterFunction<extensions::OmniboxSetDefaultSuggestionFunction>(); -#if defined(ENABLE_INPUT_SPEECH) // Speech input. RegisterFunction<StartSpeechInputFunction>(); RegisterFunction<StopSpeechInputFunction>(); RegisterFunction<IsRecordingSpeechInputFunction>(); -#endif #if defined(TOOLKIT_VIEWS) // Input. diff --git a/chrome/chrome_browser.gypi b/chrome/chrome_browser.gypi index 1c8a8c5..411d265 100644 --- a/chrome/chrome_browser.gypi +++ b/chrome/chrome_browser.gypi @@ -4504,8 +4504,7 @@ }], ['input_speech==0', { 'sources/': [ - ['exclude', '^browser/speech/*speech_recognition*'], - ['exclude', '^browser/speech/*speech_input*'], + ['exclude', '^browser/speech/'], ], }], ['notifications==0', { diff --git a/content/browser/renderer_host/render_process_host_impl.cc b/content/browser/renderer_host/render_process_host_impl.cc index e2646d1..22cd22c 100644 --- a/content/browser/renderer_host/render_process_host_impl.cc +++ b/content/browser/renderer_host/render_process_host_impl.cc @@ -78,8 +78,6 @@ #include "content/browser/renderer_host/socket_stream_dispatcher_host.h" #include "content/browser/renderer_host/text_input_client_message_filter.h" #include "content/browser/resolve_proxy_msg_helper.h" -#include "content/browser/speech/input_tag_speech_dispatcher_host.h" -#include "content/browser/speech/speech_recognition_dispatcher_host.h" #include "content/browser/trace_message_filter.h" #include "content/browser/worker_host/worker_message_filter.h" #include "content/common/child_process_host_impl.h" @@ -117,6 +115,10 @@ #include "content/common/font_cache_dispatcher_win.h" #endif +#if defined(ENABLE_INPUT_SPEECH) +#include "content/browser/speech/input_tag_speech_dispatcher_host.h" +#endif + #include "third_party/skia/include/core/SkBitmap.h" using content::BrowserContext; @@ -502,9 +504,6 @@ void RenderProcessHostImpl::CreateMessageFilters() { channel_->AddFilter(new speech::InputTagSpeechDispatcherHost( GetID(), browser_context->GetRequestContext(), browser_context->GetSpeechRecognitionPreferences())); - channel_->AddFilter(new speech::SpeechRecognitionDispatcherHost( - GetID(), browser_context->GetRequestContext(), - browser_context->GetSpeechRecognitionPreferences())); #endif channel_->AddFilter(new FileAPIMessageFilter( GetID(), diff --git a/content/browser/speech/input_tag_speech_dispatcher_host.cc b/content/browser/speech/input_tag_speech_dispatcher_host.cc index ef6f793..0fa54be 100644 --- a/content/browser/speech/input_tag_speech_dispatcher_host.cc +++ b/content/browser/speech/input_tag_speech_dispatcher_host.cc @@ -6,16 +6,29 @@ #include "base/bind.h" #include "base/lazy_instance.h" +#include "content/browser/speech/speech_recognition_manager_impl.h" +#include "content/browser/speech/speech_recognizer_impl.h" #include "content/common/speech_recognition_messages.h" -#include "content/public/browser/speech_recognition_manager.h" #include "content/public/browser/speech_recognition_preferences.h" #include "content/public/browser/speech_recognition_session_config.h" #include "content/public/browser/speech_recognition_session_context.h" +using content::BrowserThread; using content::SpeechRecognitionManager; using content::SpeechRecognitionSessionConfig; using content::SpeechRecognitionSessionContext; +namespace { +bool IsSameContext(int render_process_id, + int render_view_id, + int render_request_id, + const SpeechRecognitionSessionContext& context) { + return context.render_process_id == render_process_id && + context.render_view_id == render_view_id && + context.render_request_id == render_request_id; +} +} // namespace + namespace speech { SpeechRecognitionManager* InputTagSpeechDispatcherHost::manager_for_tests_; @@ -29,25 +42,37 @@ InputTagSpeechDispatcherHost::InputTagSpeechDispatcherHost( net::URLRequestContextGetter* url_request_context_getter, content::SpeechRecognitionPreferences* recognition_preferences) : render_process_id_(render_process_id), + may_have_pending_requests_(false), url_request_context_getter_(url_request_context_getter), recognition_preferences_(recognition_preferences) { - // Do not add any non-trivial initialization here, instead do it lazily when - // required (e.g. see the method |manager()|) or add an Init() method. + // This is initialized by Browser. Do not add any non-trivial + // initialization here, instead do it lazily when required (e.g. see the + // method |manager()|) or add an Init() method. } InputTagSpeechDispatcherHost::~InputTagSpeechDispatcherHost() { - if (SpeechRecognitionManager* sr_manager = manager()) - sr_manager->AbortAllSessionsForListener(this); + // If the renderer crashed for some reason or if we didn't receive a proper + // Cancel/Stop call for an existing session, cancel such active sessions now. + // We first check if this dispatcher received any speech IPC requst so that + // we don't end up creating the speech input manager for web pages which don't + // use speech input. + if (may_have_pending_requests_) + manager()->AbortAllSessionsForListener(this); } SpeechRecognitionManager* InputTagSpeechDispatcherHost::manager() { if (manager_for_tests_) return manager_for_tests_; +#if defined(ENABLE_INPUT_SPEECH) return SpeechRecognitionManager::GetInstance(); +#else + return NULL; +#endif } bool InputTagSpeechDispatcherHost::OnMessageReceived( const IPC::Message& message, bool* message_was_ok) { + DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); bool handled = true; IPC_BEGIN_MESSAGE_MAP_EX(InputTagSpeechDispatcherHost, message, *message_was_ok) @@ -59,16 +84,19 @@ bool InputTagSpeechDispatcherHost::OnMessageReceived( OnStopRecording) IPC_MESSAGE_UNHANDLED(handled = false) IPC_END_MESSAGE_MAP() + if (handled) + may_have_pending_requests_ = true; return handled; } void InputTagSpeechDispatcherHost::OnStartRecognition( - const InputTagSpeechHostMsg_StartRecognition_Params& params) { + const InputTagSpeechHostMsg_StartRecognition_Params ¶ms) { + DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); SpeechRecognitionSessionContext context; context.render_process_id = render_process_id_; context.render_view_id = params.render_view_id; - context.request_id = params.request_id; + context.render_request_id = params.request_id; context.element_rect = params.element_rect; SpeechRecognitionSessionConfig config; @@ -84,24 +112,32 @@ void InputTagSpeechDispatcherHost::OnStartRecognition( config.event_listener = this; int session_id = manager()->CreateSession(config); - DCHECK_NE(session_id, content::SpeechRecognitionManager::kSessionIDInvalid); - manager()->StartSession(session_id); + if (session_id == SpeechRecognitionManager::kSessionIDInvalid) + return; + + manager()->StartSession(session_id); } void InputTagSpeechDispatcherHost::OnCancelRecognition(int render_view_id, int request_id) { - int session_id = manager()->GetSession(render_process_id_, - render_view_id, - request_id); + DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); + int session_id = manager()->LookupSessionByContext( + base::Bind(&IsSameContext, + render_process_id_, + render_view_id, + request_id)); if (session_id != SpeechRecognitionManager::kSessionIDInvalid) manager()->AbortSession(session_id); } void InputTagSpeechDispatcherHost::OnStopRecording(int render_view_id, int request_id) { - int session_id = manager()->GetSession(render_process_id_, - render_view_id, - request_id); + DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); + int session_id = manager()->LookupSessionByContext( + base::Bind(&IsSameContext, + render_process_id_, + render_view_id, + request_id)); DCHECK_NE(session_id, SpeechRecognitionManager::kSessionIDInvalid); manager()->StopAudioCaptureForSession(session_id); } @@ -110,34 +146,37 @@ void InputTagSpeechDispatcherHost::OnStopRecording(int render_view_id, void InputTagSpeechDispatcherHost::OnRecognitionResult( int session_id, const content::SpeechRecognitionResult& result) { VLOG(1) << "InputTagSpeechDispatcherHost::OnRecognitionResult enter"; + DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); const SpeechRecognitionSessionContext& context = manager()->GetSessionContext(session_id); Send(new InputTagSpeechMsg_SetRecognitionResult( context.render_view_id, - context.request_id, + context.render_request_id, result)); VLOG(1) << "InputTagSpeechDispatcherHost::OnRecognitionResult exit"; } void InputTagSpeechDispatcherHost::OnAudioEnd(int session_id) { VLOG(1) << "InputTagSpeechDispatcherHost::OnAudioEnd enter"; + DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); const SpeechRecognitionSessionContext& context = manager()->GetSessionContext(session_id); Send(new InputTagSpeechMsg_RecordingComplete(context.render_view_id, - context.request_id)); + context.render_request_id)); VLOG(1) << "InputTagSpeechDispatcherHost::OnAudioEnd exit"; } void InputTagSpeechDispatcherHost::OnRecognitionEnd(int session_id) { VLOG(1) << "InputTagSpeechDispatcherHost::OnRecognitionEnd enter"; + DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); const SpeechRecognitionSessionContext& context = manager()->GetSessionContext(session_id); Send(new InputTagSpeechMsg_RecognitionComplete(context.render_view_id, - context.request_id)); + context.render_request_id)); VLOG(1) << "InputTagSpeechDispatcherHost::OnRecognitionEnd exit"; } diff --git a/content/browser/speech/input_tag_speech_dispatcher_host.h b/content/browser/speech/input_tag_speech_dispatcher_host.h index 703174cc..c7c96edb 100644 --- a/content/browser/speech/input_tag_speech_dispatcher_host.h +++ b/content/browser/speech/input_tag_speech_dispatcher_host.h @@ -62,7 +62,7 @@ class CONTENT_EXPORT InputTagSpeechDispatcherHost virtual ~InputTagSpeechDispatcherHost(); void OnStartRecognition( - const InputTagSpeechHostMsg_StartRecognition_Params& params); + const InputTagSpeechHostMsg_StartRecognition_Params ¶ms); void OnCancelRecognition(int render_view_id, int request_id); void OnStopRecording(int render_view_id, int request_id); @@ -71,6 +71,8 @@ class CONTENT_EXPORT InputTagSpeechDispatcherHost content::SpeechRecognitionManager* manager(); int render_process_id_; + bool may_have_pending_requests_; // Set if we received any speech IPC request + scoped_refptr<net::URLRequestContextGetter> url_request_context_getter_; scoped_refptr<content::SpeechRecognitionPreferences> recognition_preferences_; diff --git a/content/browser/speech/speech_recognition_browsertest.cc b/content/browser/speech/speech_recognition_browsertest.cc index f812ebf..a939fb6 100644 --- a/content/browser/speech/speech_recognition_browsertest.cc +++ b/content/browser/speech/speech_recognition_browsertest.cc @@ -130,12 +130,12 @@ class FakeSpeechRecognitionManager : public content::SpeechRecognitionManager { virtual string16 GetAudioInputDeviceModel() OVERRIDE { return string16(); } virtual void ShowAudioInputSettings() OVERRIDE {} - virtual int GetSession(int render_process_id, - int render_view_id, - int request_id) const OVERRIDE { - return session_ctx_.render_process_id == render_process_id && - session_ctx_.render_view_id == render_view_id && - session_ctx_.request_id == request_id; + virtual int LookupSessionByContext( + base::Callback<bool( + const content::SpeechRecognitionSessionContext&)> matcher) + const OVERRIDE { + bool matched = matcher.Run(session_ctx_); + return matched ? session_id_ : 0; } virtual const SpeechRecognitionSessionConfig& GetSessionConfig( diff --git a/content/browser/speech/speech_recognition_dispatcher_host.cc b/content/browser/speech/speech_recognition_dispatcher_host.cc deleted file mode 100644 index 55d032e..0000000 --- a/content/browser/speech/speech_recognition_dispatcher_host.cc +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright (c) 2012 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "content/browser/speech/speech_recognition_dispatcher_host.h" - -#include "base/bind.h" -#include "base/command_line.h" -#include "base/lazy_instance.h" -#include "content/common/speech_recognition_messages.h" -#include "content/public/browser/speech_recognition_manager.h" -#include "content/public/browser/speech_recognition_preferences.h" -#include "content/public/browser/speech_recognition_session_config.h" -#include "content/public/browser/speech_recognition_session_context.h" -#include "content/public/common/content_switches.h" - -using content::SpeechRecognitionManager; -using content::SpeechRecognitionSessionConfig; -using content::SpeechRecognitionSessionContext; - -namespace speech { -SpeechRecognitionManager* SpeechRecognitionDispatcherHost::manager_for_tests_; - -void SpeechRecognitionDispatcherHost::SetManagerForTests( - SpeechRecognitionManager* manager) { - manager_for_tests_ = manager; -} - -SpeechRecognitionDispatcherHost::SpeechRecognitionDispatcherHost( - int render_process_id, - net::URLRequestContextGetter* context_getter, - content::SpeechRecognitionPreferences* recognition_preferences) - : render_process_id_(render_process_id), - context_getter_(context_getter), - recognition_preferences_(recognition_preferences) { - // Do not add any non-trivial initialization here, instead do it lazily when - // required (e.g. see the method |manager()|) or add an Init() method. -} - -SpeechRecognitionDispatcherHost::~SpeechRecognitionDispatcherHost() { - if (SpeechRecognitionManager* sr_manager = manager()) - sr_manager->AbortAllSessionsForListener(this); -} - -SpeechRecognitionManager* SpeechRecognitionDispatcherHost::manager() { - if (manager_for_tests_) - return manager_for_tests_; - - const CommandLine& command_line = *CommandLine::ForCurrentProcess(); - if (command_line.HasSwitch(switches::kEnableScriptedSpeech)) - return SpeechRecognitionManager::GetInstance(); - - return NULL; -} - -bool SpeechRecognitionDispatcherHost::OnMessageReceived( - const IPC::Message& message, bool* message_was_ok) { - bool handled = true; - IPC_BEGIN_MESSAGE_MAP_EX(SpeechRecognitionDispatcherHost, message, - *message_was_ok) - IPC_MESSAGE_HANDLER(SpeechRecognitionHostMsg_StartRequest, - OnStartRequest) - IPC_MESSAGE_HANDLER(SpeechRecognitionHostMsg_AbortRequest, - OnAbortRequest) - IPC_MESSAGE_HANDLER(SpeechRecognitionHostMsg_StopCaptureRequest, - OnStopCaptureRequest) - IPC_MESSAGE_UNHANDLED(handled = false) - IPC_END_MESSAGE_MAP() - return handled; -} - -void SpeechRecognitionDispatcherHost::OnStartRequest( - const SpeechRecognitionHostMsg_StartRequest_Params& params) { - - SpeechRecognitionSessionContext context; - context.render_process_id = render_process_id_; - context.render_view_id = params.render_view_id; - context.request_id = params.request_id; - - SpeechRecognitionSessionConfig config; - config.is_one_shot = params.is_one_shot; - config.language = params.language; - config.grammars = params.grammars; - config.origin_url = params.origin_url; - config.initial_context = context; - config.url_request_context_getter = context_getter_.get(); - config.filter_profanities = recognition_preferences_->FilterProfanities(); - config.event_listener = this; - - int session_id = manager()->CreateSession(config); - DCHECK_NE(session_id, content::SpeechRecognitionManager::kSessionIDInvalid); - manager()->StartSession(session_id); -} - -void SpeechRecognitionDispatcherHost::OnAbortRequest(int render_view_id, - int request_id) { - int session_id = manager()->GetSession(render_process_id_, - render_view_id, - request_id); - if (session_id != content::SpeechRecognitionManager::kSessionIDInvalid) - manager()->AbortSession(session_id); -} - -void SpeechRecognitionDispatcherHost::OnStopCaptureRequest( - int render_view_id, int request_id) { - int session_id = manager()->GetSession(render_process_id_, - render_view_id, - request_id); - if (session_id != content::SpeechRecognitionManager::kSessionIDInvalid) - manager()->StopAudioCaptureForSession(session_id); -} - -// -------- SpeechRecognitionEventListener interface implementation ----------- - -void SpeechRecognitionDispatcherHost::OnRecognitionStart(int session_id) { - const SpeechRecognitionSessionContext& context = - manager()->GetSessionContext(session_id); - Send(new SpeechRecognitionMsg_Started(context.render_view_id, - context.request_id)); -} - -void SpeechRecognitionDispatcherHost::OnAudioStart(int session_id) { - const SpeechRecognitionSessionContext& context = - manager()->GetSessionContext(session_id); - Send(new SpeechRecognitionMsg_AudioStarted(context.render_view_id, - context.request_id)); -} - -void SpeechRecognitionDispatcherHost::OnSoundStart(int session_id) { - const SpeechRecognitionSessionContext& context = - manager()->GetSessionContext(session_id); - Send(new SpeechRecognitionMsg_SoundStarted(context.render_view_id, - context.request_id)); -} - -void SpeechRecognitionDispatcherHost::OnSoundEnd(int session_id) { - const SpeechRecognitionSessionContext& context = - manager()->GetSessionContext(session_id); - Send(new SpeechRecognitionMsg_SoundEnded(context.render_view_id, - context.request_id)); -} - -void SpeechRecognitionDispatcherHost::OnAudioEnd(int session_id) { - const SpeechRecognitionSessionContext& context = - manager()->GetSessionContext(session_id); - Send(new SpeechRecognitionMsg_AudioEnded(context.render_view_id, - context.request_id)); -} - -void SpeechRecognitionDispatcherHost::OnRecognitionEnd(int session_id) { - const SpeechRecognitionSessionContext& context = - manager()->GetSessionContext(session_id); - Send(new SpeechRecognitionMsg_Ended(context.render_view_id, - context.request_id)); -} - -void SpeechRecognitionDispatcherHost::OnRecognitionResult( - int session_id, const content::SpeechRecognitionResult& result) { - const SpeechRecognitionSessionContext& context = - manager()->GetSessionContext(session_id); - Send(new SpeechRecognitionMsg_ResultRetrieved(context.render_view_id, - context.request_id, - result)); -} - -void SpeechRecognitionDispatcherHost::OnRecognitionError( - int session_id, const content::SpeechRecognitionError& error) { - const SpeechRecognitionSessionContext& context = - manager()->GetSessionContext(session_id); - Send(new SpeechRecognitionMsg_ErrorOccurred(context.render_view_id, - context.request_id, - error)); -} - -// The events below are currently not used by speech JS APIs implementation. -void SpeechRecognitionDispatcherHost::OnAudioLevelsChange( - int session_id, float volume, float noise_volume) {} -void SpeechRecognitionDispatcherHost::OnEnvironmentEstimationComplete( - int session_id) {} - -} // namespace speech diff --git a/content/browser/speech/speech_recognition_dispatcher_host.h b/content/browser/speech/speech_recognition_dispatcher_host.h deleted file mode 100644 index 3ebd8e4..0000000 --- a/content/browser/speech/speech_recognition_dispatcher_host.h +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright (c) 2012 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef CONTENT_BROWSER_SPEECH_SPEECH_RECOGNITION_DISPATCHER_HOST_H_ -#define CONTENT_BROWSER_SPEECH_SPEECH_RECOGNITION_DISPATCHER_HOST_H_ -#pragma once - -#include "base/memory/scoped_ptr.h" -#include "content/common/content_export.h" -#include "content/public/browser/browser_message_filter.h" -#include "content/public/browser/speech_recognition_event_listener.h" -#include "net/url_request/url_request_context_getter.h" - -struct SpeechRecognitionHostMsg_StartRequest_Params; - -namespace content { -class SpeechRecognitionManager; -class SpeechRecognitionPreferences; -struct SpeechRecognitionResult; -} - -namespace speech { - -// SpeechRecognitionDispatcherHost is a delegate for Speech API messages used by -// RenderMessageFilter. Basically it acts as a proxy, relaying the events coming -// from the SpeechRecognitionManager to IPC messages (and vice versa). -// It's the complement of SpeechRecognitionDispatcher (owned by RenderView). -class CONTENT_EXPORT SpeechRecognitionDispatcherHost - : public content::BrowserMessageFilter, - public content::SpeechRecognitionEventListener { - public: - SpeechRecognitionDispatcherHost( - int render_process_id, - net::URLRequestContextGetter* context_getter, - content::SpeechRecognitionPreferences* recognition_preferences); - - // SpeechRecognitionEventListener methods. - virtual void OnRecognitionStart(int session_id) OVERRIDE; - virtual void OnAudioStart(int session_id) OVERRIDE; - virtual void OnEnvironmentEstimationComplete(int session_id) OVERRIDE; - virtual void OnSoundStart(int session_id) OVERRIDE; - virtual void OnSoundEnd(int session_id) OVERRIDE; - virtual void OnAudioEnd(int session_id) OVERRIDE; - virtual void OnRecognitionEnd(int session_id) OVERRIDE; - virtual void OnRecognitionResult( - int session_id, const content::SpeechRecognitionResult& result) OVERRIDE; - virtual void OnRecognitionError( - int session_id, const content::SpeechRecognitionError& error) OVERRIDE; - virtual void OnAudioLevelsChange( - int session_id, float volume, float noise_volume) OVERRIDE; - - // content::BrowserMessageFilter implementation. - virtual bool OnMessageReceived(const IPC::Message& message, - bool* message_was_ok) OVERRIDE; - - // Singleton manager setter useful for tests. - static void SetManagerForTests(content::SpeechRecognitionManager* manager); - - private: - virtual ~SpeechRecognitionDispatcherHost(); - - void OnStartRequest( - const SpeechRecognitionHostMsg_StartRequest_Params& params); - void OnAbortRequest(int render_view_id, int request_id); - void OnStopCaptureRequest(int render_view_id, int request_id); - - // Returns the speech recognition manager to forward requests to. - content::SpeechRecognitionManager* manager(); - - int render_process_id_; - scoped_refptr<net::URLRequestContextGetter> context_getter_; - scoped_refptr<content::SpeechRecognitionPreferences> recognition_preferences_; - - static content::SpeechRecognitionManager* manager_for_tests_; - - DISALLOW_COPY_AND_ASSIGN(SpeechRecognitionDispatcherHost); -}; - -} // namespace speech - -#endif // CONTENT_BROWSER_SPEECH_SPEECH_RECOGNITION_DISPATCHER_HOST_H_ diff --git a/content/browser/speech/speech_recognition_manager_impl.cc b/content/browser/speech/speech_recognition_manager_impl.cc index 6bad70f..e1c3298 100644 --- a/content/browser/speech/speech_recognition_manager_impl.cc +++ b/content/browser/speech/speech_recognition_manager_impl.cc @@ -282,18 +282,20 @@ void SpeechRecognitionManagerImpl::OnRecognitionEnd(int session_id) { this->AsWeakPtr(), session_id, EVENT_RECOGNITION_ENDED)); } -int SpeechRecognitionManagerImpl::GetSession( - int render_process_id, int render_view_id, int request_id) const { +// TODO(primiano) After CL2: if we see that both InputTagDispatcherHost and +// SpeechRecognitionDispatcherHost do the same lookup operations, implement the +// lookup method directly here. +int SpeechRecognitionManagerImpl::LookupSessionByContext( + Callback<bool(const SpeechRecognitionSessionContext&)> matcher) const { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); SessionsTable::const_iterator iter; + // Note: the callback (matcher) must NEVER perform non-const calls on us. for(iter = sessions_.begin(); iter != sessions_.end(); ++iter) { const int session_id = iter->first; - const SpeechRecognitionSessionContext& context = iter->second.context; - if (context.render_process_id == render_process_id && - context.render_view_id == render_view_id && - context.request_id == request_id) { + const Session& session = iter->second; + bool matches = matcher.Run(session.context); + if (matches) return session_id; - } } return kSessionIDInvalid; } diff --git a/content/browser/speech/speech_recognition_manager_impl.h b/content/browser/speech/speech_recognition_manager_impl.h index 5d43dfc..b27c031 100644 --- a/content/browser/speech/speech_recognition_manager_impl.h +++ b/content/browser/speech/speech_recognition_manager_impl.h @@ -69,9 +69,10 @@ class CONTENT_EXPORT SpeechRecognitionManagerImpl : int session_id) const OVERRIDE; virtual content::SpeechRecognitionSessionContext GetSessionContext( int session_id) const OVERRIDE; - virtual int GetSession(int render_process_id, - int render_view_id, - int request_id) const OVERRIDE; + virtual int LookupSessionByContext( + base::Callback<bool( + const content::SpeechRecognitionSessionContext&)> matcher) + const OVERRIDE; virtual bool HasAudioInputDevices() OVERRIDE; virtual bool IsCapturingAudio() OVERRIDE; virtual string16 GetAudioInputDeviceModel() OVERRIDE; diff --git a/content/common/speech_recognition_messages.h b/content/common/speech_recognition_messages.h index 864b442..dc3ceb38 100644 --- a/content/common/speech_recognition_messages.h +++ b/content/common/speech_recognition_messages.h @@ -7,7 +7,6 @@ #include <string> #include "content/public/common/speech_recognition_error.h" -#include "content/public/common/speech_recognition_grammar.h" #include "content/public/common/speech_recognition_result.h" #include "ipc/ipc_message_macros.h" #include "ipc/ipc_param_traits.h" @@ -15,29 +14,17 @@ #define IPC_MESSAGE_START SpeechRecognitionMsgStart -IPC_ENUM_TRAITS(content::SpeechAudioErrorDetails) IPC_ENUM_TRAITS(content::SpeechRecognitionErrorCode) -IPC_STRUCT_TRAITS_BEGIN(content::SpeechRecognitionError) - IPC_STRUCT_TRAITS_MEMBER(code) - IPC_STRUCT_TRAITS_MEMBER(details) -IPC_STRUCT_TRAITS_END() - IPC_STRUCT_TRAITS_BEGIN(content::SpeechRecognitionHypothesis) IPC_STRUCT_TRAITS_MEMBER(utterance) IPC_STRUCT_TRAITS_MEMBER(confidence) IPC_STRUCT_TRAITS_END() IPC_STRUCT_TRAITS_BEGIN(content::SpeechRecognitionResult) - IPC_STRUCT_TRAITS_MEMBER(provisional) IPC_STRUCT_TRAITS_MEMBER(hypotheses) IPC_STRUCT_TRAITS_END() -IPC_STRUCT_TRAITS_BEGIN(content::SpeechRecognitionGrammar) - IPC_STRUCT_TRAITS_MEMBER(url) - IPC_STRUCT_TRAITS_MEMBER(weight) -IPC_STRUCT_TRAITS_END() - // Used to start a speech recognition session. IPC_STRUCT_BEGIN(InputTagSpeechHostMsg_StartRecognition_Params) // The render view requesting speech recognition. @@ -54,7 +41,7 @@ IPC_STRUCT_BEGIN(InputTagSpeechHostMsg_StartRecognition_Params) IPC_STRUCT_MEMBER(std::string, origin_url) IPC_STRUCT_END() -// Renderer -> Browser messages. +// Speech recognition messages sent from the renderer to the browser. // Requests the speech recognition service to start speech recognition on behalf // of the given |render_view_id|. @@ -76,89 +63,24 @@ IPC_MESSAGE_CONTROL2(InputTagSpeechHostMsg_StopRecording, int /* render_view_id */, int /* request_id */) -// Browser -> Renderer messages. +// Speech recognition messages sent from the browser to the renderer. -// Relays a speech recognition result, either partial or final. +// Relay a speech recognition result, either partial or final. IPC_MESSAGE_ROUTED2(InputTagSpeechMsg_SetRecognitionResult, int /* request_id */, content::SpeechRecognitionResult /* result */) -// Indicates that speech recognizer has stopped recording and started +// Indicate that speech recognizer has stopped recording and started // recognition. IPC_MESSAGE_ROUTED1(InputTagSpeechMsg_RecordingComplete, int /* request_id */) -// Indicates that speech recognizer has completed recognition. This will be the +// Indicate that speech recognizer has completed recognition. This will be the // last message sent in response to a InputTagSpeechHostMsg_StartRecognition. IPC_MESSAGE_ROUTED1(InputTagSpeechMsg_RecognitionComplete, int /* request_id */) -// Toggles speech recognition on or off on the speech input control for the +// Toggle speech recognition on or off on the speech input control for the // current focused element. Has no effect if the current element doesn't // support speech recognition. IPC_MESSAGE_ROUTED0(InputTagSpeechMsg_ToggleSpeechInput) - - -// ------- Messages for Speech JS APIs (SpeechRecognitionDispatcher) ---------- - -// Renderer -> Browser messages. - -// Used to start a speech recognition session. -IPC_STRUCT_BEGIN(SpeechRecognitionHostMsg_StartRequest_Params) - // The render view requesting speech recognition. - IPC_STRUCT_MEMBER(int, render_view_id) - // Unique ID associated with the JS object making the calls. - IPC_STRUCT_MEMBER(int, request_id) - // Language to use for speech recognition. - IPC_STRUCT_MEMBER(std::string, language) - // Speech grammars to use. - IPC_STRUCT_MEMBER(content::SpeechRecognitionGrammarArray, grammars) - // URL of the page (or iframe if applicable). - IPC_STRUCT_MEMBER(std::string, origin_url) - // One-shot/continuous recognition mode. - IPC_STRUCT_MEMBER(bool, is_one_shot) -IPC_STRUCT_END() - - -// Requests the speech recognition service to start speech recognition. -IPC_MESSAGE_CONTROL1(SpeechRecognitionHostMsg_StartRequest, - SpeechRecognitionHostMsg_StartRequest_Params) - -// Requests the speech recognition service to abort speech recognition on -// behalf of the given |render_view_id|. If speech recognition is not happening -// or is happening on behalf of some other render view, this call does nothing. -IPC_MESSAGE_CONTROL2(SpeechRecognitionHostMsg_AbortRequest, - int /* render_view_id */, - int /* request_id */) - -// Requests the speech recognition service to stop audio capture on behalf of -// the given |render_view_id|. Any audio recorded so far will be fed to the -// speech recognizer. If speech recognition is not happening nor or is -// happening on behalf of some other render view, this call does nothing. -IPC_MESSAGE_CONTROL2(SpeechRecognitionHostMsg_StopCaptureRequest, - int /* render_view_id */, - int /* request_id */) - -// Browser -> Renderer messages. - -// The messages below follow exactly the same semantic of the corresponding -// events defined in content/public/browser/speech_recognition_event_listener.h. -IPC_MESSAGE_ROUTED2(SpeechRecognitionMsg_ResultRetrieved, - int /* request_id */, - content::SpeechRecognitionResult /* result */) - -IPC_MESSAGE_ROUTED2(SpeechRecognitionMsg_ErrorOccurred, - int /* request_id */, - content::SpeechRecognitionError /* error */) - -IPC_MESSAGE_ROUTED1(SpeechRecognitionMsg_Started, int /* request_id */) - -IPC_MESSAGE_ROUTED1(SpeechRecognitionMsg_AudioStarted, int /* request_id */) - -IPC_MESSAGE_ROUTED1(SpeechRecognitionMsg_SoundStarted, int /* request_id */) - -IPC_MESSAGE_ROUTED1(SpeechRecognitionMsg_SoundEnded, int /* request_id */) - -IPC_MESSAGE_ROUTED1(SpeechRecognitionMsg_AudioEnded, int /* request_id */) - -IPC_MESSAGE_ROUTED1(SpeechRecognitionMsg_Ended, int /* request_id */) diff --git a/content/content_browser.gypi b/content/content_browser.gypi index 552cccc..2ceca05 100644 --- a/content/content_browser.gypi +++ b/content/content_browser.gypi @@ -655,8 +655,6 @@ 'browser/speech/google_one_shot_remote_engine.h', 'browser/speech/input_tag_speech_dispatcher_host.cc', 'browser/speech/input_tag_speech_dispatcher_host.h', - 'browser/speech/speech_recognition_dispatcher_host.cc', - 'browser/speech/speech_recognition_dispatcher_host.h', 'browser/speech/speech_recognition_engine.cc', 'browser/speech/speech_recognition_engine.h', 'browser/speech/speech_recognition_manager_impl.cc', diff --git a/content/content_renderer.gypi b/content/content_renderer.gypi index 59dc1af..0d67650 100644 --- a/content/content_renderer.gypi +++ b/content/content_renderer.gypi @@ -199,8 +199,6 @@ 'renderer/renderer_webcolorchooser_impl.h', 'renderer/renderer_webkitplatformsupport_impl.cc', 'renderer/renderer_webkitplatformsupport_impl.h', - 'renderer/speech_recognition_dispatcher.cc', - 'renderer/speech_recognition_dispatcher.h', 'renderer/text_input_client_observer.cc', 'renderer/text_input_client_observer.h', 'renderer/v8_value_converter_impl.cc', @@ -241,8 +239,6 @@ 'sources!': [ 'renderer/input_tag_speech_dispatcher.cc', 'renderer/input_tag_speech_dispatcher.h', - 'renderer/speech_recognition_dispatcher.cc', - 'renderer/speech_recognition_dispatcher.h', ] }], ['notifications==0', { diff --git a/content/public/browser/speech_recognition_manager.h b/content/public/browser/speech_recognition_manager.h index 0cd2a75..2d6d597 100644 --- a/content/public/browser/speech_recognition_manager.h +++ b/content/public/browser/speech_recognition_manager.h @@ -62,11 +62,11 @@ class SpeechRecognitionManager { virtual SpeechRecognitionSessionContext GetSessionContext( int session_id) const = 0; - // Looks-up an existing session from the context tuple - // {render_view_id, render_view_id, request_id}. - virtual int GetSession(int render_process_id, - int render_view_id, - int request_id) const = 0; + // Looks-up an existing session using a caller-provided matcher function. + virtual int LookupSessionByContext( + base::Callback<bool( + const content::SpeechRecognitionSessionContext&)> matcher) + const = 0; // Returns true if the OS reports existence of audio recording devices. virtual bool HasAudioInputDevices() = 0; diff --git a/content/public/browser/speech_recognition_session_context.cc b/content/public/browser/speech_recognition_session_context.cc index 6117bbe..f5bf32c 100644 --- a/content/public/browser/speech_recognition_session_context.cc +++ b/content/public/browser/speech_recognition_session_context.cc @@ -9,7 +9,7 @@ namespace content { SpeechRecognitionSessionContext::SpeechRecognitionSessionContext() : render_process_id(0), render_view_id(0), - request_id(0), + render_request_id(0), requested_by_page_element(true), is_first_request_for_context(false) { } diff --git a/content/public/browser/speech_recognition_session_context.h b/content/public/browser/speech_recognition_session_context.h index 4b6d9f7..eaa4f39 100644 --- a/content/public/browser/speech_recognition_session_context.h +++ b/content/public/browser/speech_recognition_session_context.h @@ -15,9 +15,11 @@ namespace content { // The context information required by clients of the SpeechRecognitionManager // and its delegates for mapping the recognition session to other browser // elements involved with it (e.g., the page element that requested the -// recognition). The manager keeps this struct attached to the recognition -// session during all the session lifetime, making its contents available to -// clients (In this regard, see SpeechRecognitionManager::GetSessionContext and +// recognition). The SpeechRecognitionManager is not aware of the content of +// this struct and does NOT use it for its purposes. However the manager keeps +// this struct "attached" to the recognition session during all the session +// lifetime, making its contents available to clients (In this regard, see +// SpeechRecognitionManager::GetSessionContext and // SpeechRecognitionManager::LookupSessionByContext methods). struct CONTENT_EXPORT SpeechRecognitionSessionContext { SpeechRecognitionSessionContext(); @@ -25,7 +27,8 @@ struct CONTENT_EXPORT SpeechRecognitionSessionContext { int render_process_id; int render_view_id; - int request_id; + int render_request_id; + int js_handle_id; // Determines whether recognition was requested by a page element (in which // case its coordinates are passed in |element_rect|). diff --git a/content/renderer/input_tag_speech_dispatcher.cc b/content/renderer/input_tag_speech_dispatcher.cc index 41f3dd7..7aa6d9f 100644 --- a/content/renderer/input_tag_speech_dispatcher.cc +++ b/content/renderer/input_tag_speech_dispatcher.cc @@ -81,7 +81,7 @@ void InputTagSpeechDispatcher::cancelRecognition(int request_id) { void InputTagSpeechDispatcher::stopRecording(int request_id) { VLOG(1) << "InputTagSpeechDispatcher::stopRecording enter"; Send(new InputTagSpeechHostMsg_StopRecording(routing_id(), - request_id)); + request_id)); VLOG(1) << "InputTagSpeechDispatcher::stopRecording exit"; } diff --git a/content/renderer/render_view_impl.cc b/content/renderer/render_view_impl.cc index 83d6546..2bb8d16 100644 --- a/content/renderer/render_view_impl.cc +++ b/content/renderer/render_view_impl.cc @@ -87,7 +87,6 @@ #include "content/renderer/renderer_accessibility.h" #include "content/renderer/renderer_webapplicationcachehost_impl.h" #include "content/renderer/renderer_webcolorchooser_impl.h" -#include "content/renderer/speech_recognition_dispatcher.h" #include "content/renderer/text_input_client_observer.h" #include "content/renderer/v8_value_converter_impl.h" #include "content/renderer/web_intents_host.h" @@ -526,7 +525,6 @@ RenderViewImpl::RenderViewImpl( ALLOW_THIS_IN_INITIALIZER_LIST(cookie_jar_(this)), geolocation_dispatcher_(NULL), input_tag_speech_dispatcher_(NULL), - speech_recognition_dispatcher_(NULL), device_orientation_dispatcher_(NULL), media_stream_dispatcher_(NULL), media_stream_impl_(NULL), @@ -5356,14 +5354,6 @@ WebKit::WebSpeechInputController* RenderViewImpl::speechInputController( return input_tag_speech_dispatcher_; } -WebKit::WebSpeechRecognizer* RenderViewImpl::speechRecognizer() { -#if defined(ENABLE_INPUT_SPEECH) - if (!speech_recognition_dispatcher_) - speech_recognition_dispatcher_ = new SpeechRecognitionDispatcher(this); -#endif - return speech_recognition_dispatcher_; -} - WebKit::WebDeviceOrientationClient* RenderViewImpl::deviceOrientationClient() { if (!device_orientation_dispatcher_) device_orientation_dispatcher_ = new DeviceOrientationDispatcher(this); diff --git a/content/renderer/render_view_impl.h b/content/renderer/render_view_impl.h index 136d5e4..bc2dc0a 100644 --- a/content/renderer/render_view_impl.h +++ b/content/renderer/render_view_impl.h @@ -81,7 +81,6 @@ class RendererAccessibility; class RendererWebColorChooserImpl; class SkBitmap; class InputTagSpeechDispatcher; -class SpeechRecognitionDispatcher; struct ViewMsg_Navigate_Params; struct ViewMsg_PostMessage_Params; struct ViewMsg_StopFinding_Params; @@ -141,7 +140,6 @@ class WebPeerConnectionHandlerClient; class WebSocketStreamHandle; class WebSpeechInputController; class WebSpeechInputListener; -class WebSpeechRecognizer; class WebStorageNamespace; class WebTouchEvent; class WebURLLoader; @@ -465,7 +463,6 @@ class RenderViewImpl : public RenderWidget, virtual WebKit::WebGeolocationClient* geolocationClient(); virtual WebKit::WebSpeechInputController* speechInputController( WebKit::WebSpeechInputListener* listener); - virtual WebKit::WebSpeechRecognizer* speechRecognizer(); virtual WebKit::WebDeviceOrientationClient* deviceOrientationClient(); virtual void zoomLimitsChanged(double minimum_level, double maximum_level); virtual void zoomLevelChanged(); @@ -1269,10 +1266,6 @@ class RenderViewImpl : public RenderWidget, // The speech dispatcher attached to this view, lazily initialized. InputTagSpeechDispatcher* input_tag_speech_dispatcher_; - // The speech recognition dispatcher attached to this view, lazily - // initialized. - SpeechRecognitionDispatcher* speech_recognition_dispatcher_; - // Device orientation dispatcher attached to this view; lazily initialized. DeviceOrientationDispatcher* device_orientation_dispatcher_; diff --git a/content/renderer/speech_recognition_dispatcher.cc b/content/renderer/speech_recognition_dispatcher.cc deleted file mode 100644 index 90123dd..0000000 --- a/content/renderer/speech_recognition_dispatcher.cc +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright (c) 2012 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "content/renderer/speech_recognition_dispatcher.h" - -#include "base/basictypes.h" -#include "base/utf_string_conversions.h" -#include "content/common/speech_recognition_messages.h" -#include "content/renderer/render_view_impl.h" -#include "third_party/WebKit/Source/WebKit/chromium/public/platform/WebString.h" -#include "third_party/WebKit/Source/WebKit/chromium/public/platform/WebVector.h" -#include "third_party/WebKit/Source/WebKit/chromium/public/WebSpeechGrammar.h" -#include "third_party/WebKit/Source/WebKit/chromium/public/WebSpeechRecognitionParams.h" -#include "third_party/WebKit/Source/WebKit/chromium/public/WebSpeechRecognitionResult.h" -#include "third_party/WebKit/Source/WebKit/chromium/public/WebSpeechRecognizerClient.h" - -using content::SpeechRecognitionError; -using content::SpeechRecognitionResult; -using WebKit::WebVector; -using WebKit::WebString; -using WebKit::WebSpeechGrammar; -using WebKit::WebSpeechRecognitionHandle; -using WebKit::WebSpeechRecognitionResult; -using WebKit::WebSpeechRecognitionParams; -using WebKit::WebSpeechRecognizerClient; - -SpeechRecognitionDispatcher::SpeechRecognitionDispatcher( - RenderViewImpl* render_view) - : content::RenderViewObserver(render_view), - recognizer_client_(NULL), - next_id_(1) { -} - -SpeechRecognitionDispatcher::~SpeechRecognitionDispatcher() { -} - -bool SpeechRecognitionDispatcher::OnMessageReceived( - const IPC::Message& message) { - bool handled = true; - IPC_BEGIN_MESSAGE_MAP(SpeechRecognitionDispatcher, message) - IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_Started, OnRecognitionStarted) - IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_AudioStarted, OnAudioStarted) - IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_SoundStarted, OnSoundStarted) - IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_SoundEnded, OnSoundEnded) - IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_AudioEnded, OnAudioEnded) - IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_ErrorOccurred, OnErrorOccurred) - IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_Ended, OnRecognitionEnded) - IPC_MESSAGE_HANDLER(SpeechRecognitionMsg_ResultRetrieved, OnResultRetrieved) - IPC_MESSAGE_UNHANDLED(handled = false) - IPC_END_MESSAGE_MAP() - return handled; -} - -void SpeechRecognitionDispatcher::start( - const WebSpeechRecognitionHandle& handle, - const WebSpeechRecognitionParams& params, - WebSpeechRecognizerClient* recognizer_client) { - //TODO(primiano) What to do if a start is issued to an already started object? - DCHECK(!recognizer_client_ || recognizer_client_ == recognizer_client); - recognizer_client_ = recognizer_client; - - SpeechRecognitionHostMsg_StartRequest_Params msg_params; - for (size_t i = 0; i < params.grammars().size(); ++i) { - const WebSpeechGrammar& grammar = params.grammars()[i]; - msg_params.grammars.push_back( - content::SpeechRecognitionGrammar(grammar.src().spec(), - grammar.weight())); - } - msg_params.language = UTF16ToUTF8(params.language()); - msg_params.is_one_shot = !params.continuous(); - msg_params.origin_url = ""; // TODO(primiano) we need an origin from WebKit. - msg_params.render_view_id = routing_id(); - msg_params.request_id = GetIDForHandle(handle); - Send(new SpeechRecognitionHostMsg_StartRequest(msg_params)); -} - -void SpeechRecognitionDispatcher::stop( - const WebSpeechRecognitionHandle& handle, - WebSpeechRecognizerClient* recognizer_client) { - DCHECK(recognizer_client_ == recognizer_client); - Send(new SpeechRecognitionHostMsg_StopCaptureRequest(routing_id(), - GetIDForHandle(handle))); -} - -void SpeechRecognitionDispatcher::abort( - const WebSpeechRecognitionHandle& handle, - WebSpeechRecognizerClient* recognizer_client) { - Send(new SpeechRecognitionHostMsg_AbortRequest(routing_id(), - GetIDForHandle(handle))); -} - -void SpeechRecognitionDispatcher::OnRecognitionStarted(int request_id) { - recognizer_client_->didStart(GetHandleFromID(request_id)); -} - -void SpeechRecognitionDispatcher::OnAudioStarted(int request_id) { - recognizer_client_->didStartAudio(GetHandleFromID(request_id)); -} - -void SpeechRecognitionDispatcher::OnSoundStarted(int request_id) { - recognizer_client_->didStartSound(GetHandleFromID(request_id)); -} - -void SpeechRecognitionDispatcher::OnSoundEnded(int request_id) { - recognizer_client_->didEndSound(GetHandleFromID(request_id)); -} - -void SpeechRecognitionDispatcher::OnAudioEnded(int request_id) { - recognizer_client_->didEndAudio(GetHandleFromID(request_id)); -} - -void SpeechRecognitionDispatcher::OnErrorOccurred( - int request_id, const SpeechRecognitionError& error) { - if (error.code == content::SPEECH_RECOGNITION_ERROR_NO_MATCH) { - recognizer_client_->didReceiveNoMatch(GetHandleFromID(request_id), - WebSpeechRecognitionResult()); - } else { - // TODO(primiano) speech_recognition_error.h must be updated with the new - // API specs soon. - WebSpeechRecognizerClient::ErrorCode wk_error_code; - switch (error.code) { - case content::SPEECH_RECOGNITION_ERROR_ABORTED: - wk_error_code = WebSpeechRecognizerClient::AbortedError; - break; - case content::SPEECH_RECOGNITION_ERROR_AUDIO: - wk_error_code = WebSpeechRecognizerClient::AudioCaptureError; - break; - case content::SPEECH_RECOGNITION_ERROR_NETWORK: - wk_error_code = WebSpeechRecognizerClient::NetworkError; - break; - case content::SPEECH_RECOGNITION_ERROR_NO_SPEECH: - wk_error_code = WebSpeechRecognizerClient::NoSpeechError; - break; - case content::SPEECH_RECOGNITION_ERROR_BAD_GRAMMAR: - wk_error_code = WebSpeechRecognizerClient::BadGrammarError; - break; - default: - NOTREACHED(); - wk_error_code = WebSpeechRecognizerClient::OtherError; - } - recognizer_client_->didReceiveError(GetHandleFromID(request_id), - WebString(), // TODO(primiano) message? - wk_error_code); - } -} - -void SpeechRecognitionDispatcher::OnRecognitionEnded(int request_id) { - recognizer_client_->didEnd(GetHandleFromID(request_id)); - handle_map_.erase(request_id); -} - -void SpeechRecognitionDispatcher::OnResultRetrieved( - int request_id, const SpeechRecognitionResult& result) { - const size_t num_hypotheses = result.hypotheses.size(); - WebSpeechRecognitionResult webkit_result; - WebVector<WebString> transcripts(num_hypotheses); - WebVector<float> confidences(num_hypotheses); - for (size_t i = 0; i < num_hypotheses; ++i) { - transcripts[i] = result.hypotheses[i].utterance; - confidences[i] = static_cast<float>(result.hypotheses[i].confidence); - } - webkit_result.assign(transcripts, confidences, !result.provisional); - // TODO(primiano) Handle history, currently empty. - WebVector<WebSpeechRecognitionResult> empty_history; - recognizer_client_->didReceiveResult(GetHandleFromID(request_id), - webkit_result, - 0, // result_index - empty_history); -} - -int SpeechRecognitionDispatcher::GetIDForHandle( - const WebSpeechRecognitionHandle& handle) { - // Search first for an existing mapping. - for (HandleMap::iterator iter = handle_map_.begin(); - iter != handle_map_.end(); - ++iter) { - if (iter->second.equals(handle)) - return iter->first; - } - // If no existing mapping found, create a new one. - const int new_id = next_id_; - handle_map_[new_id] = handle; - ++next_id_; - return new_id; -} - -const WebSpeechRecognitionHandle& SpeechRecognitionDispatcher::GetHandleFromID( - int request_id) { - HandleMap::iterator iter = handle_map_.find(request_id); - DCHECK(iter != handle_map_.end()); - return iter->second; -} diff --git a/content/renderer/speech_recognition_dispatcher.h b/content/renderer/speech_recognition_dispatcher.h deleted file mode 100644 index 3e3b141..0000000 --- a/content/renderer/speech_recognition_dispatcher.h +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright (c) 2012 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef CONTENT_RENDERER_SPEECH_RECOGNITION_DISPATCHER_H_ -#define CONTENT_RENDERER_SPEECH_RECOGNITION_DISPATCHER_H_ -#pragma once - -#include <map> - -#include "base/basictypes.h" -#include "content/public/renderer/render_view_observer.h" -#include "third_party/WebKit/Source/WebKit/chromium/public/platform/WebVector.h" -#include "third_party/WebKit/Source/WebKit/chromium/public/WebSpeechRecognitionHandle.h" -#include "third_party/WebKit/Source/WebKit/chromium/public/WebSpeechRecognizer.h" - -class RenderViewImpl; - -namespace content { -struct SpeechRecognitionError; -struct SpeechRecognitionResult; -} - -// SpeechRecognitionDispatcher is a delegate for methods used by WebKit for -// scripted JS speech APIs. It's the complement of -// SpeechRecognitionDispatcherHost (owned by RenderViewHost). -class SpeechRecognitionDispatcher : public content::RenderViewObserver, - public WebKit::WebSpeechRecognizer { - public: - explicit SpeechRecognitionDispatcher(RenderViewImpl* render_view); - virtual ~SpeechRecognitionDispatcher(); - - private: - // RenderViewObserver implementation. - virtual bool OnMessageReceived(const IPC::Message& message) OVERRIDE; - - // WebKit::WebSpeechRecognizer implementation. - virtual void start(const WebKit::WebSpeechRecognitionHandle&, - const WebKit::WebSpeechRecognitionParams&, - WebKit::WebSpeechRecognizerClient*) OVERRIDE; - virtual void stop(const WebKit::WebSpeechRecognitionHandle&, - WebKit::WebSpeechRecognizerClient*) OVERRIDE; - virtual void abort(const WebKit::WebSpeechRecognitionHandle&, - WebKit::WebSpeechRecognizerClient*) OVERRIDE; - - void OnRecognitionStarted(int request_id); - void OnAudioStarted(int request_id); - void OnSoundStarted(int request_id); - void OnSoundEnded(int request_id); - void OnAudioEnded(int request_id); - void OnErrorOccurred(int request_id, - const content::SpeechRecognitionError& error); - void OnRecognitionEnded(int request_id); - void OnResultRetrieved(int request_id, - const content::SpeechRecognitionResult& result); - - int GetIDForHandle(const WebKit::WebSpeechRecognitionHandle& handle); - const WebKit::WebSpeechRecognitionHandle& GetHandleFromID(int handle_id); - - // The WebKit client class that we use to send events back to the JS world. - WebKit::WebSpeechRecognizerClient* recognizer_client_; - - typedef std::map<int, WebKit::WebSpeechRecognitionHandle> HandleMap; - HandleMap handle_map_; - int next_id_; - - DISALLOW_COPY_AND_ASSIGN(SpeechRecognitionDispatcher); -}; - -#endif // CONTENT_RENDERER_SPEECH_RECOGNITION_DISPATCHER_H_ |