blob: bd643ede84b2619bfde785c08325190290fd1cd0 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
|
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CONTENT_RENDERER_SPEECH_RECOGNITION_DISPATCHER_H_
#define CONTENT_RENDERER_SPEECH_RECOGNITION_DISPATCHER_H_
#pragma once
#include <map>
#include "base/basictypes.h"
#include "content/public/renderer/render_view_observer.h"
#include "third_party/WebKit/Source/WebKit/chromium/public/platform/WebVector.h"
#include "third_party/WebKit/Source/WebKit/chromium/public/WebSpeechRecognitionHandle.h"
#include "third_party/WebKit/Source/WebKit/chromium/public/WebSpeechRecognizer.h"
class RenderViewImpl;
namespace content {
struct SpeechRecognitionError;
struct SpeechRecognitionResult;
}
// SpeechRecognitionDispatcher is a delegate for methods used by WebKit for
// scripted JS speech APIs. It's the complement of
// SpeechRecognitionDispatcherHost (owned by RenderViewHost).
class SpeechRecognitionDispatcher : public content::RenderViewObserver,
public WebKit::WebSpeechRecognizer {
public:
explicit SpeechRecognitionDispatcher(RenderViewImpl* render_view);
virtual ~SpeechRecognitionDispatcher();
private:
// RenderViewObserver implementation.
virtual bool OnMessageReceived(const IPC::Message& message) OVERRIDE;
// WebKit::WebSpeechRecognizer implementation.
virtual void start(const WebKit::WebSpeechRecognitionHandle&,
const WebKit::WebSpeechRecognitionParams&,
WebKit::WebSpeechRecognizerClient*) OVERRIDE;
virtual void stop(const WebKit::WebSpeechRecognitionHandle&,
WebKit::WebSpeechRecognizerClient*) OVERRIDE;
virtual void abort(const WebKit::WebSpeechRecognitionHandle&,
WebKit::WebSpeechRecognizerClient*) OVERRIDE;
void OnRecognitionStarted(int request_id);
void OnAudioStarted(int request_id);
void OnSoundStarted(int request_id);
void OnSoundEnded(int request_id);
void OnAudioEnded(int request_id);
void OnErrorOccurred(int request_id,
const content::SpeechRecognitionError& error);
void OnRecognitionEnded(int request_id);
void OnResultRetrieved(int request_id,
const content::SpeechRecognitionResult& result);
int GetOrCreateIDForHandle(const WebKit::WebSpeechRecognitionHandle& handle);
bool HandleExists(const WebKit::WebSpeechRecognitionHandle& handle);
const WebKit::WebSpeechRecognitionHandle& GetHandleFromID(int handle_id);
// The WebKit client class that we use to send events back to the JS world.
WebKit::WebSpeechRecognizerClient* recognizer_client_;
typedef std::map<int, WebKit::WebSpeechRecognitionHandle> HandleMap;
HandleMap handle_map_;
int next_id_;
DISALLOW_COPY_AND_ASSIGN(SpeechRecognitionDispatcher);
};
#endif // CONTENT_RENDERER_SPEECH_RECOGNITION_DISPATCHER_H_
|