diff options
author | satish@chromium.org <satish@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2011-01-20 13:57:05 +0000 |
---|---|---|
committer | satish@chromium.org <satish@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2011-01-20 13:57:05 +0000 |
commit | ca97f30c1c6e282f62880c6b1d5e165b9ece716f (patch) | |
tree | 75f7f5abe47a49e5c603eacbe7abf993d232aad2 | |
parent | c33ec4af81ba1d13ec18eb7b0e542c05a2c9d4b0 (diff) | |
download | chromium_src-ca97f30c1c6e282f62880c6b1d5e165b9ece716f.zip chromium_src-ca97f30c1c6e282f62880c6b1d5e165b9ece716f.tar.gz chromium_src-ca97f30c1c6e282f62880c6b1d5e165b9ece716f.tar.bz2 |
If user had consented for metrics reporting, send speech input request origin to the server.
This is the chromium side of the webkit patch https://bugs.webkit.org/show_bug.cgi?id=52718.
Suggested reviewer split:
wtc@ - the 2 url_fetcher.* files
hans@ - rest of the files
I needed to add a URLFetcher::set_referrer() method to send the origin url in the Referer header.
Also I had to create a new IPC params struct for startRecognition since the
number of parameters exceed what is provided by the macros. And in the process I also moved
the speech input IPC messages to their own source files.
BUG=none
TEST=No change in functionality except additional debug info sent to server.
Review URL: http://codereview.chromium.org/6308009
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@71949 0039d316-1c4b-4281-b951-d872f2087c98
22 files changed, 286 insertions, 145 deletions
diff --git a/chrome/browser/speech/speech_input_browsertest.cc b/chrome/browser/speech/speech_input_browsertest.cc index 5563fe9..e0b2aff 100644 --- a/chrome/browser/speech/speech_input_browsertest.cc +++ b/chrome/browser/speech/speech_input_browsertest.cc @@ -48,7 +48,8 @@ class FakeSpeechInputManager : public SpeechInputManager { int render_view_id, const gfx::Rect& element_rect, const std::string& language, - const std::string& grammar) { + const std::string& grammar, + const std::string& origin_url) { VLOG(1) << "StartRecognition invoked."; EXPECT_EQ(0, caller_id_); EXPECT_EQ(NULL, delegate_); diff --git a/chrome/browser/speech/speech_input_dispatcher_host.cc b/chrome/browser/speech/speech_input_dispatcher_host.cc index 50b9aec..cfcbed7 100644 --- a/chrome/browser/speech/speech_input_dispatcher_host.cc +++ b/chrome/browser/speech/speech_input_dispatcher_host.cc @@ -5,7 +5,7 @@ #include "chrome/browser/speech/speech_input_dispatcher_host.h" #include "base/lazy_instance.h" -#include "chrome/common/render_messages.h" +#include "chrome/common/speech_input_messages.h" namespace speech_input { @@ -123,9 +123,9 @@ bool SpeechInputDispatcherHost::OnMessageReceived( DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); uint32 message_type = message.type(); - if (message_type == ViewHostMsg_SpeechInput_StartRecognition::ID || - message_type == ViewHostMsg_SpeechInput_CancelRecognition::ID || - message_type == ViewHostMsg_SpeechInput_StopRecording::ID) { + if (message_type == SpeechInputHostMsg_StartRecognition::ID || + message_type == SpeechInputHostMsg_CancelRecognition::ID || + message_type == SpeechInputHostMsg_StopRecording::ID) { if (!SpeechInputManager::IsFeatureEnabled()) { *message_was_ok = false; return true; @@ -133,11 +133,11 @@ bool SpeechInputDispatcherHost::OnMessageReceived( IPC_BEGIN_MESSAGE_MAP_EX(SpeechInputDispatcherHost, message, *message_was_ok) - IPC_MESSAGE_HANDLER(ViewHostMsg_SpeechInput_StartRecognition, + IPC_MESSAGE_HANDLER(SpeechInputHostMsg_StartRecognition, OnStartRecognition) - IPC_MESSAGE_HANDLER(ViewHostMsg_SpeechInput_CancelRecognition, + IPC_MESSAGE_HANDLER(SpeechInputHostMsg_CancelRecognition, OnCancelRecognition) - IPC_MESSAGE_HANDLER(ViewHostMsg_SpeechInput_StopRecording, + IPC_MESSAGE_HANDLER(SpeechInputHostMsg_StopRecording, OnStopRecording) IPC_END_MESSAGE_MAP() return true; @@ -147,17 +147,14 @@ bool SpeechInputDispatcherHost::OnMessageReceived( } void SpeechInputDispatcherHost::OnStartRecognition( - int render_view_id, - int request_id, - const gfx::Rect& element_rect, - const std::string& language, - const std::string& grammar) { + const SpeechInputHostMsg_StartRecognition_Params ¶ms) { int caller_id = g_speech_input_callers.Get().CreateId( - render_process_id_, render_view_id, request_id); + render_process_id_, params.render_view_id, params.request_id); manager()->StartRecognition(this, caller_id, render_process_id_, - render_view_id, element_rect, - language, grammar); + params.render_view_id, params.element_rect, + params.language, params.grammar, + params.origin_url); } void SpeechInputDispatcherHost::OnCancelRecognition(int render_view_id, @@ -186,9 +183,9 @@ void SpeechInputDispatcherHost::SetRecognitionResult( int caller_render_view_id = g_speech_input_callers.Get().render_view_id(caller_id); int caller_request_id = g_speech_input_callers.Get().request_id(caller_id); - Send(new ViewMsg_SpeechInput_SetRecognitionResult(caller_render_view_id, - caller_request_id, - result)); + Send(new SpeechInputMsg_SetRecognitionResult(caller_render_view_id, + caller_request_id, + result)); VLOG(1) << "SpeechInputDispatcherHost::SetRecognitionResult exit"; } @@ -198,8 +195,8 @@ void SpeechInputDispatcherHost::DidCompleteRecording(int caller_id) { int caller_render_view_id = g_speech_input_callers.Get().render_view_id(caller_id); int caller_request_id = g_speech_input_callers.Get().request_id(caller_id); - Send(new ViewMsg_SpeechInput_RecordingComplete(caller_render_view_id, - caller_request_id)); + Send(new SpeechInputMsg_RecordingComplete(caller_render_view_id, + caller_request_id)); VLOG(1) << "SpeechInputDispatcherHost::DidCompleteRecording exit"; } @@ -209,8 +206,8 @@ void SpeechInputDispatcherHost::DidCompleteRecognition(int caller_id) { int caller_render_view_id = g_speech_input_callers.Get().render_view_id(caller_id); int caller_request_id = g_speech_input_callers.Get().request_id(caller_id); - Send(new ViewMsg_SpeechInput_RecognitionComplete(caller_render_view_id, - caller_request_id)); + Send(new SpeechInputMsg_RecognitionComplete(caller_render_view_id, + caller_request_id)); // Request sequence ended, so remove mapping. g_speech_input_callers.Get().RemoveId(caller_id); VLOG(1) << "SpeechInputDispatcherHost::DidCompleteRecognition exit"; diff --git a/chrome/browser/speech/speech_input_dispatcher_host.h b/chrome/browser/speech/speech_input_dispatcher_host.h index 51ae04c..d8befd3 100644 --- a/chrome/browser/speech/speech_input_dispatcher_host.h +++ b/chrome/browser/speech/speech_input_dispatcher_host.h @@ -9,6 +9,8 @@ #include "chrome/browser/browser_message_filter.h" #include "chrome/browser/speech/speech_input_manager.h" +struct SpeechInputHostMsg_StartRecognition_Params; + namespace speech_input { // SpeechInputDispatcherHost is a delegate for Speech API messages used by @@ -39,10 +41,8 @@ class SpeechInputDispatcherHost : public BrowserMessageFilter, private: virtual ~SpeechInputDispatcherHost(); - void OnStartRecognition(int render_view_id, int request_id, - const gfx::Rect& element_rect, - const std::string& language, - const std::string& grammar); + void OnStartRecognition( + const SpeechInputHostMsg_StartRecognition_Params ¶ms); void OnCancelRecognition(int render_view_id, int request_id); void OnStopRecording(int render_view_id, int request_id); diff --git a/chrome/browser/speech/speech_input_manager.cc b/chrome/browser/speech/speech_input_manager.cc index 25f0550..f59a3b7 100644 --- a/chrome/browser/speech/speech_input_manager.cc +++ b/chrome/browser/speech/speech_input_manager.cc @@ -14,6 +14,7 @@ #include "base/ref_counted.h" #include "base/threading/thread_restrictions.h" #include "base/utf_string_conversions.h" +#include "chrome/browser/browser_process.h" #include "chrome/browser/browser_thread.h" #include "chrome/browser/platform_util.h" #include "chrome/browser/prefs/pref_service.h" @@ -27,26 +28,26 @@ #include "media/audio/audio_manager.h" #if defined(OS_WIN) -#include "chrome/browser/browser_process.h" #include "chrome/installer/util/wmi.h" #endif namespace { -// Asynchronously fetches the PC and audio hardware/driver info on windows if +// Asynchronously fetches the PC and audio hardware/driver info if // the user has opted into UMA. This information is sent with speech input // requests to the server for identifying and improving quality issues with // specific device configurations. -class HardwareInfo : public base::RefCountedThreadSafe<HardwareInfo> { +class OptionalRequestInfo + : public base::RefCountedThreadSafe<OptionalRequestInfo> { public: - HardwareInfo() {} + OptionalRequestInfo() : can_report_metrics_(false) {} -#if defined(OS_WIN) void Refresh() { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); // UMA opt-in can be checked only from the UI thread, so switch to that. BrowserThread::PostTask(BrowserThread::UI, FROM_HERE, - NewRunnableMethod(this, &HardwareInfo::CheckUMAAndGetHardwareInfo)); + NewRunnableMethod(this, + &OptionalRequestInfo::CheckUMAAndGetHardwareInfo)); } void CheckUMAAndGetHardwareInfo() { @@ -55,16 +56,22 @@ class HardwareInfo : public base::RefCountedThreadSafe<HardwareInfo> { prefs::kMetricsReportingEnabled)) { // Access potentially slow OS calls from the FILE thread. BrowserThread::PostTask(BrowserThread::FILE, FROM_HERE, - NewRunnableMethod(this, &HardwareInfo::GetHardwareInfo)); + NewRunnableMethod(this, &OptionalRequestInfo::GetHardwareInfo)); } } void GetHardwareInfo() { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::FILE)); AutoLock lock(lock_); + can_report_metrics_ = true; +#if defined(OS_WIN) value_ = UTF16ToUTF8( installer::WMIComputerSystem::GetModel() + L"|" + AudioManager::GetAudioManager()->GetAudioInputDeviceModel()); +#else // defined(OS_WIN) + value_ = UTF16ToUTF8( + AudioManager::GetAudioManager()->GetAudioInputDeviceModel()); +#endif // defined(OS_WIN) } std::string value() { @@ -72,16 +79,17 @@ class HardwareInfo : public base::RefCountedThreadSafe<HardwareInfo> { return value_; } + bool can_report_metrics() { + AutoLock lock(lock_); + return can_report_metrics_; + } + private: Lock lock_; std::string value_; + bool can_report_metrics_; -#else // defined(OS_WIN) - void Refresh() {} - std::string value() { return std::string(); } -#endif // defined(OS_WIN) - - DISALLOW_COPY_AND_ASSIGN(HardwareInfo); + DISALLOW_COPY_AND_ASSIGN(OptionalRequestInfo); }; } // namespace @@ -99,7 +107,8 @@ class SpeechInputManagerImpl : public SpeechInputManager, int render_view_id, const gfx::Rect& element_rect, const std::string& language, - const std::string& grammar); + const std::string& grammar, + const std::string& origin_url); virtual void CancelRecognition(int caller_id); virtual void StopRecording(int caller_id); @@ -143,7 +152,7 @@ class SpeechInputManagerImpl : public SpeechInputManager, SpeechRecognizerMap requests_; int recording_caller_id_; scoped_refptr<SpeechInputBubbleController> bubble_controller_; - scoped_refptr<HardwareInfo> hardware_info_; + scoped_refptr<OptionalRequestInfo> optional_request_info_; }; static ::base::LazyInstance<SpeechInputManagerImpl> g_speech_input_manager_impl( @@ -200,14 +209,15 @@ void SpeechInputManagerImpl::StartRecognition( int render_view_id, const gfx::Rect& element_rect, const std::string& language, - const std::string& grammar) { + const std::string& grammar, + const std::string& origin_url) { DCHECK(!HasPendingRequest(caller_id)); bubble_controller_->CreateBubble(caller_id, render_process_id, render_view_id, element_rect); - if (!hardware_info_.get()) { - hardware_info_ = new HardwareInfo(); + if (!optional_request_info_.get()) { + optional_request_info_ = new OptionalRequestInfo(); // Since hardware info is optional with speech input requests, we start an // asynchronous fetch here and move on with recording audio. This first // speech input request would send an empty string for hardware info and @@ -215,13 +225,14 @@ void SpeechInputManagerImpl::StartRecognition( // completed before them. This way we don't end up stalling the user with // a long wait and disk seeks when they click on a UI element and start // speaking. - hardware_info_->Refresh(); + optional_request_info_->Refresh(); } SpeechInputRequest* request = &requests_[caller_id]; request->delegate = delegate; - request->recognizer = new SpeechRecognizer(this, caller_id, language, - grammar, hardware_info_->value()); + request->recognizer = new SpeechRecognizer( + this, caller_id, language, grammar, optional_request_info_->value(), + optional_request_info_->can_report_metrics() ? origin_url : ""); request->is_active = false; StartRecognitionForRequest(caller_id); diff --git a/chrome/browser/speech/speech_input_manager.h b/chrome/browser/speech/speech_input_manager.h index ffeaba0..83bde90 100644 --- a/chrome/browser/speech/speech_input_manager.h +++ b/chrome/browser/speech/speech_input_manager.h @@ -37,7 +37,7 @@ class SpeechInputManager { static bool IsFeatureEnabled(); // Factory method to access the singleton. We have this method here instead of - // using Singleton<> directly in the calling code to aid tests in injection + // using Singleton directly in the calling code to aid tests in injection // mocks. static SpeechInputManager* Get(); // Factory method definition useful for tests. @@ -59,7 +59,8 @@ class SpeechInputManager { int render_view_id, const gfx::Rect& element_rect, const std::string& language, - const std::string& grammar) = 0; + const std::string& grammar, + const std::string& origin_url) = 0; virtual void CancelRecognition(int caller_id) = 0; virtual void StopRecording(int caller_id) = 0; }; diff --git a/chrome/browser/speech/speech_recognition_request.cc b/chrome/browser/speech/speech_recognition_request.cc index 1281666..754534a 100644 --- a/chrome/browser/speech/speech_recognition_request.cc +++ b/chrome/browser/speech/speech_recognition_request.cc @@ -123,6 +123,7 @@ SpeechRecognitionRequest::~SpeechRecognitionRequest() {} bool SpeechRecognitionRequest::Send(const std::string& language, const std::string& grammar, const std::string& hardware_info, + const std::string& origin_url, const std::string& content_type, const std::string& audio_data) { DCHECK(!url_fetcher_.get()); @@ -161,6 +162,7 @@ bool SpeechRecognitionRequest::Send(const std::string& language, this)); url_fetcher_->set_upload_data(content_type, audio_data); url_fetcher_->set_request_context(url_context_); + url_fetcher_->set_referrer(origin_url); // The speech recognition API does not require user identification as part // of requests, so we don't send cookies or auth data for these requests to diff --git a/chrome/browser/speech/speech_recognition_request.h b/chrome/browser/speech/speech_recognition_request.h index c12fc4d..9b022cf 100644 --- a/chrome/browser/speech/speech_recognition_request.h +++ b/chrome/browser/speech/speech_recognition_request.h @@ -48,6 +48,7 @@ class SpeechRecognitionRequest : public URLFetcher::Delegate { bool Send(const std::string& language, const std::string& grammar, const std::string& hardware_info, + const std::string& origin_url, const std::string& content_type, const std::string& audio_data); diff --git a/chrome/browser/speech/speech_recognition_request_unittest.cc b/chrome/browser/speech/speech_recognition_request_unittest.cc index cf3f60b..bd2a26e 100644 --- a/chrome/browser/speech/speech_recognition_request_unittest.cc +++ b/chrome/browser/speech/speech_recognition_request_unittest.cc @@ -47,7 +47,7 @@ void SpeechRecognitionRequestTest::CreateAndTestRequest( bool success, const std::string& http_response) { SpeechRecognitionRequest request(NULL, this); request.Send(std::string(), std::string(), std::string(), std::string(), - std::string()); + std::string(), std::string()); TestURLFetcher* fetcher = url_fetcher_factory_.GetFetcherByID(0); ASSERT_TRUE(fetcher); net::URLRequestStatus status; diff --git a/chrome/browser/speech/speech_recognizer.cc b/chrome/browser/speech/speech_recognizer.cc index 6d46a72..38bbeca 100644 --- a/chrome/browser/speech/speech_recognizer.cc +++ b/chrome/browser/speech/speech_recognizer.cc @@ -39,12 +39,14 @@ SpeechRecognizer::SpeechRecognizer(Delegate* delegate, int caller_id, const std::string& language, const std::string& grammar, - const std::string& hardware_info) + const std::string& hardware_info, + const std::string& origin_url) : delegate_(delegate), caller_id_(caller_id), language_(language), grammar_(grammar), hardware_info_(hardware_info), + origin_url_(origin_url), codec_(AudioEncoder::CODEC_SPEEX), encoder_(NULL), endpointer_(kAudioSampleRate), @@ -136,8 +138,8 @@ void SpeechRecognizer::StopRecording() { DCHECK(!request_.get()); request_.reset(new SpeechRecognitionRequest( Profile::GetDefaultRequestContext(), this)); - request_->Send(language_, grammar_, hardware_info_, encoder_->mime_type(), - data); + request_->Send(language_, grammar_, hardware_info_, origin_url_, + encoder_->mime_type(), data); } encoder_.reset(); } diff --git a/chrome/browser/speech/speech_recognizer.h b/chrome/browser/speech/speech_recognizer.h index 5e8511f..2570fba 100644 --- a/chrome/browser/speech/speech_recognizer.h +++ b/chrome/browser/speech/speech_recognizer.h @@ -76,7 +76,8 @@ class SpeechRecognizer int caller_id, const std::string& language, const std::string& grammar, - const std::string& hardware_info); + const std::string& hardware_info, + const std::string& origin_url); ~SpeechRecognizer(); // Starts audio recording and does recognition after recording ends. The same @@ -126,6 +127,7 @@ class SpeechRecognizer std::string language_; std::string grammar_; std::string hardware_info_; + std::string origin_url_; scoped_ptr<SpeechRecognitionRequest> request_; scoped_refptr<media::AudioInputController> audio_controller_; diff --git a/chrome/browser/speech/speech_recognizer_unittest.cc b/chrome/browser/speech/speech_recognizer_unittest.cc index 05830d5d..855f35a 100644 --- a/chrome/browser/speech/speech_recognizer_unittest.cc +++ b/chrome/browser/speech/speech_recognizer_unittest.cc @@ -24,7 +24,8 @@ class SpeechRecognizerTest : public SpeechRecognizerDelegate, : io_thread_(BrowserThread::IO, &message_loop_), ALLOW_THIS_IN_INITIALIZER_LIST( recognizer_(new SpeechRecognizer(this, 1, std::string(), - std::string(), std::string()))), + std::string(), std::string(), + std::string()))), recording_complete_(false), recognition_complete_(false), result_received_(false), diff --git a/chrome/chrome_common.gypi b/chrome/chrome_common.gypi index 869653f..dfec0d6 100644 --- a/chrome/chrome_common.gypi +++ b/chrome/chrome_common.gypi @@ -157,6 +157,8 @@ 'common/set_process_title.h', 'common/set_process_title_linux.cc', 'common/set_process_title_linux.h', + 'common/speech_input_messages.cc', + 'common/speech_input_messages.h', 'common/switch_utils.cc', 'common/switch_utils.h', 'common/time_format.cc', diff --git a/chrome/common/net/url_fetcher.cc b/chrome/common/net/url_fetcher.cc index 53487b7..e2aad81 100644 --- a/chrome/common/net/url_fetcher.cc +++ b/chrome/common/net/url_fetcher.cc @@ -127,6 +127,7 @@ class URLFetcher::Core std::string upload_content_; // HTTP POST payload std::string upload_content_type_; // MIME type of POST payload + std::string referrer_; // HTTP Referer header value // Used to determine how long to wait before making a request or doing a // retry. @@ -336,6 +337,7 @@ void URLFetcher::Core::StartURLRequest() { } request_->set_load_flags(flags); request_->set_context(request_context_getter_->GetURLRequestContext()); + request_->set_referrer(referrer_); switch (request_type_) { case GET: @@ -481,6 +483,10 @@ const std::string& URLFetcher::upload_data() const { return core_->upload_content_; } +void URLFetcher::set_referrer(const std::string& referrer) { + core_->referrer_ = referrer; +} + void URLFetcher::set_load_flags(int load_flags) { core_->load_flags_ = load_flags; } diff --git a/chrome/common/net/url_fetcher.h b/chrome/common/net/url_fetcher.h index dfa29a9..19edbb9 100644 --- a/chrome/common/net/url_fetcher.h +++ b/chrome/common/net/url_fetcher.h @@ -139,6 +139,10 @@ class URLFetcher { // Returns the current load flags. int load_flags() const; + // The referrer URL for the request. Must be called before the request is + // started. + void set_referrer(const std::string& referrer); + // Set extra headers on the request. Must be called before the request // is started. void set_extra_request_headers(const std::string& extra_request_headers); diff --git a/chrome/common/render_messages.cc b/chrome/common/render_messages.cc index 981ef91..eaf1504 100644 --- a/chrome/common/render_messages.cc +++ b/chrome/common/render_messages.cc @@ -9,7 +9,6 @@ #include "chrome/common/gpu_param_traits.h" #include "chrome/common/render_messages_params.h" #include "chrome/common/resource_response.h" -#include "chrome/common/speech_input_result.h" #include "chrome/common/thumbnail_score.h" #include "chrome/common/web_apps.h" #include "gfx/rect.h" @@ -1220,28 +1219,6 @@ void ParamTraits<AudioBuffersState>::Log(const param_type& p, std::string* l) { l->append(")"); } -void ParamTraits<speech_input::SpeechInputResultItem>::Write( - Message* m, const param_type& p) { - WriteParam(m, p.utterance); - WriteParam(m, p.confidence); -} - -bool ParamTraits<speech_input::SpeechInputResultItem>::Read(const Message* m, - void** iter, - param_type* p) { - return ReadParam(m, iter, &p->utterance) && - ReadParam(m, iter, &p->confidence); -} - -void ParamTraits<speech_input::SpeechInputResultItem>::Log(const param_type& p, - std::string* l) { - l->append("("); - LogParam(p.utterance, l); - l->append(":"); - LogParam(p.confidence, l); - l->append(")"); -} - void ParamTraits<PP_Flash_NetAddress>::Write(Message* m, const param_type& p) { WriteParam(m, p.size); m->WriteBytes(p.data, p.size); diff --git a/chrome/common/render_messages.h b/chrome/common/render_messages.h index 2902124..b681f8f 100644 --- a/chrome/common/render_messages.h +++ b/chrome/common/render_messages.h @@ -48,10 +48,6 @@ namespace webkit_blob { class BlobData; } -namespace speech_input { -struct SpeechInputResultItem; -} - namespace webkit_glue { struct FormData; class FormField; @@ -557,14 +553,6 @@ struct ParamTraits<AudioBuffersState> { }; template <> -struct ParamTraits<speech_input::SpeechInputResultItem> { - typedef speech_input::SpeechInputResultItem param_type; - static void Write(Message* m, const param_type& p); - static bool Read(const Message* m, void** iter, param_type* p); - static void Log(const param_type& p, std::string* l); -}; - -template <> struct ParamTraits<PP_Flash_NetAddress> { typedef PP_Flash_NetAddress param_type; static void Write(Message* m, const param_type& p); diff --git a/chrome/common/render_messages_internal.h b/chrome/common/render_messages_internal.h index 2a6f683..f2c588c 100644 --- a/chrome/common/render_messages_internal.h +++ b/chrome/common/render_messages_internal.h @@ -19,7 +19,6 @@ #include "chrome/common/nacl_types.h" #include "chrome/common/notification_type.h" #include "chrome/common/page_zoom.h" -#include "chrome/common/speech_input_result.h" #include "chrome/common/translate_errors.h" #include "chrome/common/window_container_type.h" #include "ipc/ipc_message_macros.h" @@ -1018,22 +1017,6 @@ IPC_MESSAGE_ROUTED1(ViewMsg_AccessibilityDoDefaultAction, // message was processed and it can send addition notifications. IPC_MESSAGE_ROUTED0(ViewMsg_AccessibilityNotifications_ACK) -// Relay a speech recognition result, either partial or final. -IPC_MESSAGE_ROUTED2(ViewMsg_SpeechInput_SetRecognitionResult, - int /* request id */, - speech_input::SpeechInputResultArray /* result */) - -// Indicate that speech recognizer has stopped recording and started -// recognition. -IPC_MESSAGE_ROUTED1(ViewMsg_SpeechInput_RecordingComplete, - int /* request id */) - -// Indicate that speech recognizer has completed recognition. This will be -// the last message sent in response to a -// ViewHostMsg_SpeechInput_StartRecognition. -IPC_MESSAGE_ROUTED1(ViewMsg_SpeechInput_RecognitionComplete, - int /* request id */) - // Notification that the device's orientation has changed. IPC_MESSAGE_ROUTED1(ViewMsg_DeviceOrientationUpdated, ViewMsg_DeviceOrientationUpdated_Params) @@ -2462,30 +2445,6 @@ IPC_MESSAGE_ROUTED3(ViewHostMsg_UpdateZoomLimits, int /* maximum_percent */, bool /* remember */) -// Requests the speech input service to start speech recognition on behalf of -// the given |render_view_id|. -IPC_MESSAGE_CONTROL5(ViewHostMsg_SpeechInput_StartRecognition, - int /* render_view_id */, - int /* request_id */, - gfx::Rect /* element_rect */, - std::string /* language */, - std::string /* grammar */) - -// Requests the speech input service to cancel speech recognition on behalf of -// the given |render_view_id|. If speech recognition is not happening nor or -// is happening on behalf of some other render view, this call does nothing. -IPC_MESSAGE_CONTROL2(ViewHostMsg_SpeechInput_CancelRecognition, - int /* render_view_id */, - int /* request id */) - -// Requests the speech input service to stop audio recording on behalf of -// the given |render_view_id|. Any audio recorded so far will be fed to the -// speech recognizer. If speech recognition is not happening nor or is -// happening on behalf of some other render view, this call does nothing. -IPC_MESSAGE_CONTROL2(ViewHostMsg_SpeechInput_StopRecording, - int /* render_view_id */, - int /* request id */) - //--------------------------------------------------------------------------- // Device orientation services messages: diff --git a/chrome/common/speech_input_messages.cc b/chrome/common/speech_input_messages.cc new file mode 100644 index 0000000..c93062f --- /dev/null +++ b/chrome/common/speech_input_messages.cc @@ -0,0 +1,83 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "chrome/common/common_param_traits.h" + +#define IPC_MESSAGE_IMPL +#include "chrome/common/speech_input_messages.h" + +SpeechInputHostMsg_StartRecognition_Params:: +SpeechInputHostMsg_StartRecognition_Params() + : render_view_id(0), + request_id(0) { +} + +SpeechInputHostMsg_StartRecognition_Params:: +~SpeechInputHostMsg_StartRecognition_Params() { +} + +namespace IPC { + +void ParamTraits<speech_input::SpeechInputResultItem>::Write( + Message* m, const param_type& p) { + WriteParam(m, p.utterance); + WriteParam(m, p.confidence); +} + +bool ParamTraits<speech_input::SpeechInputResultItem>::Read(const Message* m, + void** iter, + param_type* p) { + return ReadParam(m, iter, &p->utterance) && + ReadParam(m, iter, &p->confidence); +} + +void ParamTraits<speech_input::SpeechInputResultItem>::Log(const param_type& p, + std::string* l) { + l->append("("); + LogParam(p.utterance, l); + l->append(":"); + LogParam(p.confidence, l); + l->append(")"); +} + +void ParamTraits<SpeechInputHostMsg_StartRecognition_Params>::Write( + Message* m, + const param_type& p) { + WriteParam(m, p.render_view_id); + WriteParam(m, p.request_id); + WriteParam(m, p.element_rect); + WriteParam(m, p.language); + WriteParam(m, p.grammar); + WriteParam(m, p.origin_url); +} + +bool ParamTraits<SpeechInputHostMsg_StartRecognition_Params>::Read( + const Message* m, void** iter, param_type* p) { + return + ReadParam(m, iter, &p->render_view_id) && + ReadParam(m, iter, &p->request_id) && + ReadParam(m, iter, &p->element_rect) && + ReadParam(m, iter, &p->language) && + ReadParam(m, iter, &p->grammar) && + ReadParam(m, iter, &p->origin_url); +} + +void ParamTraits<SpeechInputHostMsg_StartRecognition_Params>::Log( + const param_type& p, std::string* l) { + l->append("("); + LogParam(p.render_view_id, l); + l->append(", "); + LogParam(p.request_id, l); + l->append(", "); + LogParam(p.element_rect, l); + l->append(", "); + LogParam(p.language, l); + l->append(", "); + LogParam(p.grammar, l); + l->append(", "); + LogParam(p.origin_url, l); + l->append(")"); +} + +} // namespace IPC diff --git a/chrome/common/speech_input_messages.h b/chrome/common/speech_input_messages.h new file mode 100644 index 0000000..2a856e5 --- /dev/null +++ b/chrome/common/speech_input_messages.h @@ -0,0 +1,93 @@ +// Copyright (c) 2011 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef CHROME_COMMON_SPEECH_INPUT_MESSAGES_H_ +#define CHROME_COMMON_SPEECH_INPUT_MESSAGES_H_ +#pragma once + +#include "chrome/common/speech_input_result.h" +#include "gfx/rect.h" +#include "ipc/ipc_message_macros.h" +#include "ipc/ipc_param_traits.h" + +#define IPC_MESSAGE_START SpeechInputMsgStart + +namespace speech_input { +struct SpeechInputResultItem; +} + +// Used to start a speech recognition session. +struct SpeechInputHostMsg_StartRecognition_Params { + SpeechInputHostMsg_StartRecognition_Params(); + ~SpeechInputHostMsg_StartRecognition_Params(); + + int render_view_id; // The render view requesting speech recognition. + int request_id; // Request ID used within the render view. + gfx::Rect element_rect; // Position of the UI element in page coordinates. + std::string language; // Language to use for speech recognition. + std::string grammar; // Speech grammar given by the speech input element. + std::string origin_url; // URL of the page (or iframe if applicable). +}; + +namespace IPC { + +template <> +struct ParamTraits<speech_input::SpeechInputResultItem> { + typedef speech_input::SpeechInputResultItem param_type; + static void Write(Message* m, const param_type& p); + static bool Read(const Message* m, void** iter, param_type* p); + static void Log(const param_type& p, std::string* l); +}; + +template <> +struct ParamTraits<SpeechInputHostMsg_StartRecognition_Params> { + typedef SpeechInputHostMsg_StartRecognition_Params param_type; + static void Write(Message* m, const param_type& p); + static bool Read(const Message* m, void** iter, param_type* p); + static void Log(const param_type& p, std::string* l); +}; + +} // namespace IPC + +// Speech input messages sent from the renderer to the browser. + +// Requests the speech input service to start speech recognition on behalf of +// the given |render_view_id|. +IPC_MESSAGE_CONTROL1(SpeechInputHostMsg_StartRecognition, + SpeechInputHostMsg_StartRecognition_Params) + +// Requests the speech input service to cancel speech recognition on behalf of +// the given |render_view_id|. If speech recognition is not happening or +// is happening on behalf of some other render view, this call does nothing. +IPC_MESSAGE_CONTROL2(SpeechInputHostMsg_CancelRecognition, + int /* render_view_id */, + int /* request_id */) + +// Requests the speech input service to stop audio recording on behalf of +// the given |render_view_id|. Any audio recorded so far will be fed to the +// speech recognizer. If speech recognition is not happening nor or is +// happening on behalf of some other render view, this call does nothing. +IPC_MESSAGE_CONTROL2(SpeechInputHostMsg_StopRecording, + int /* render_view_id */, + int /* request_id */) + +// Speech input messages sent from the browser to the renderer. + +// Relay a speech recognition result, either partial or final. +IPC_MESSAGE_ROUTED2(SpeechInputMsg_SetRecognitionResult, + int /* request_id */, + speech_input::SpeechInputResultArray /* result */) + +// Indicate that speech recognizer has stopped recording and started +// recognition. +IPC_MESSAGE_ROUTED1(SpeechInputMsg_RecordingComplete, + int /* request_id */) + +// Indicate that speech recognizer has completed recognition. This will be +// the last message sent in response to a +// ViewHostMsg_SpeechInput_StartRecognition. +IPC_MESSAGE_ROUTED1(SpeechInputMsg_RecognitionComplete, + int /* request_id */) + +#endif // CHROME_COMMON_SPEECH_INPUT_MESSAGES_H_ diff --git a/chrome/renderer/speech_input_dispatcher.cc b/chrome/renderer/speech_input_dispatcher.cc index 9ea368f..ec2b0a9 100644 --- a/chrome/renderer/speech_input_dispatcher.cc +++ b/chrome/renderer/speech_input_dispatcher.cc @@ -5,10 +5,12 @@ #include "chrome/renderer/speech_input_dispatcher.h" #include "base/utf_string_conversions.h" +#include "chrome/common/speech_input_messages.h" #include "chrome/renderer/render_view.h" #include "third_party/WebKit/Source/WebKit/chromium/public/WebFrame.h" -#include "third_party/WebKit/Source/WebKit/chromium/public/WebSpeechInputListener.h" #include "third_party/WebKit/Source/WebKit/chromium/public/WebSize.h" +#include "third_party/WebKit/Source/WebKit/chromium/public/WebSecurityOrigin.h" +#include "third_party/WebKit/Source/WebKit/chromium/public/WebSpeechInputListener.h" #include "third_party/WebKit/Source/WebKit/chromium/public/WebString.h" #include "third_party/WebKit/Source/WebKit/chromium/public/WebView.h" @@ -24,11 +26,11 @@ SpeechInputDispatcher::SpeechInputDispatcher( bool SpeechInputDispatcher::OnMessageReceived(const IPC::Message& message) { bool handled = true; IPC_BEGIN_MESSAGE_MAP(SpeechInputDispatcher, message) - IPC_MESSAGE_HANDLER(ViewMsg_SpeechInput_SetRecognitionResult, + IPC_MESSAGE_HANDLER(SpeechInputMsg_SetRecognitionResult, OnSpeechRecognitionResult) - IPC_MESSAGE_HANDLER(ViewMsg_SpeechInput_RecordingComplete, + IPC_MESSAGE_HANDLER(SpeechInputMsg_RecordingComplete, OnSpeechRecordingComplete) - IPC_MESSAGE_HANDLER(ViewMsg_SpeechInput_RecognitionComplete, + IPC_MESSAGE_HANDLER(SpeechInputMsg_RecognitionComplete, OnSpeechRecognitionComplete) IPC_MESSAGE_UNHANDLED(handled = false) IPC_END_MESSAGE_MAP() @@ -39,27 +41,34 @@ bool SpeechInputDispatcher::startRecognition( int request_id, const WebKit::WebRect& element_rect, const WebKit::WebString& language, - const WebKit::WebString& grammar) { + const WebKit::WebString& grammar, + const WebKit::WebSecurityOrigin& origin) { VLOG(1) << "SpeechInputDispatcher::startRecognition enter"; + + SpeechInputHostMsg_StartRecognition_Params params; + params.grammar = UTF16ToUTF8(grammar); + params.language = UTF16ToUTF8(language); + params.origin_url = UTF16ToUTF8(origin.toString()); + params.render_view_id = routing_id(); + params.request_id = request_id; gfx::Size scroll = render_view()->webview()->mainFrame()->scrollOffset(); - gfx::Rect rect = element_rect; - rect.Offset(-scroll.width(), -scroll.height()); - Send(new ViewHostMsg_SpeechInput_StartRecognition( - routing_id(), request_id, rect, - UTF16ToUTF8(language), UTF16ToUTF8(grammar))); + params.element_rect = element_rect; + params.element_rect.Offset(-scroll.width(), -scroll.height()); + + Send(new SpeechInputHostMsg_StartRecognition(params)); VLOG(1) << "SpeechInputDispatcher::startRecognition exit"; return true; } void SpeechInputDispatcher::cancelRecognition(int request_id) { VLOG(1) << "SpeechInputDispatcher::cancelRecognition enter"; - Send(new ViewHostMsg_SpeechInput_CancelRecognition(routing_id(), request_id)); + Send(new SpeechInputHostMsg_CancelRecognition(routing_id(), request_id)); VLOG(1) << "SpeechInputDispatcher::cancelRecognition exit"; } void SpeechInputDispatcher::stopRecording(int request_id) { VLOG(1) << "SpeechInputDispatcher::stopRecording enter"; - Send(new ViewHostMsg_SpeechInput_StopRecording(routing_id(), request_id)); + Send(new SpeechInputHostMsg_StopRecording(routing_id(), request_id)); VLOG(1) << "SpeechInputDispatcher::stopRecording exit"; } diff --git a/chrome/renderer/speech_input_dispatcher.h b/chrome/renderer/speech_input_dispatcher.h index 0cd921e..cbfc86e 100644 --- a/chrome/renderer/speech_input_dispatcher.h +++ b/chrome/renderer/speech_input_dispatcher.h @@ -32,7 +32,8 @@ class SpeechInputDispatcher : public RenderViewObserver, virtual bool startRecognition(int request_id, const WebKit::WebRect& element_rect, const WebKit::WebString& language, - const WebKit::WebString& grammar); + const WebKit::WebString& grammar, + const WebKit::WebSecurityOrigin& origin); virtual void cancelRecognition(int request_id); virtual void stopRecording(int request_id); diff --git a/ipc/ipc_message_utils.h b/ipc/ipc_message_utils.h index b16e05a..e945da0 100644 --- a/ipc/ipc_message_utils.h +++ b/ipc/ipc_message_utils.h @@ -65,6 +65,7 @@ enum IPCMessageStart { DOMStorageMsgStart, IndexedDBMsgStart, PepperFileMsgStart, + SpeechInputMsgStart, }; class DictionaryValue; |