summaryrefslogtreecommitdiffstats
path: root/ppapi/proxy/host_dispatcher.cc
blob: efd1f56ab282e186e6acb9880848e296f6ccb0fb (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "ppapi/proxy/host_dispatcher.h"

#include <map>

#include "base/debug/trace_event.h"
#include "base/logging.h"
#include "ppapi/c/private/ppb_proxy_private.h"
#include "ppapi/c/ppb_var.h"
#include "ppapi/proxy/host_var_serialization_rules.h"
#include "ppapi/proxy/interface_list.h"
#include "ppapi/proxy/ppapi_messages.h"
#include "ppapi/proxy/resource_creation_proxy.h"

namespace ppapi {
namespace proxy {

namespace {

typedef std::map<PP_Instance, HostDispatcher*> InstanceToDispatcherMap;
InstanceToDispatcherMap* g_instance_to_dispatcher = NULL;

typedef std::map<PP_Module, HostDispatcher*> ModuleToDispatcherMap;
ModuleToDispatcherMap* g_module_to_dispatcher = NULL;

PP_Bool ReserveInstanceID(PP_Module module, PP_Instance instance) {
  // Default to returning true (usable) failure. Otherwise, if there's some
  // kind of communication error or the plugin just crashed, we'll get into an
  // infinite loop generating new instnace IDs since we think they're all in
  // use.
  ModuleToDispatcherMap::const_iterator found =
      g_module_to_dispatcher->find(module);
  if (found == g_module_to_dispatcher->end()) {
    NOTREACHED();
    return PP_TRUE;
  }

  bool usable = true;
  if (!found->second->Send(new PpapiMsg_ReserveInstanceId(instance, &usable)))
    return PP_TRUE;
  return PP_FromBool(usable);
}

// Saves the state of the given bool and puts it back when it goes out of
// scope.
class BoolRestorer {
 public:
  BoolRestorer(bool* var) : var_(var), old_value_(*var) {
  }
  ~BoolRestorer() {
    *var_ = old_value_;
  }
 private:
  bool* var_;
  bool old_value_;
};

}  // namespace

HostDispatcher::HostDispatcher(base::ProcessHandle remote_process_handle,
                               PP_Module module,
                               GetInterfaceFunc local_get_interface)
    : Dispatcher(remote_process_handle, local_get_interface),
      pp_module_(module),
      ppb_proxy_(NULL),
      allow_plugin_reentrancy_(false) {
  if (!g_module_to_dispatcher)
    g_module_to_dispatcher = new ModuleToDispatcherMap;
  (*g_module_to_dispatcher)[pp_module_] = this;

  SetSerializationRules(new HostVarSerializationRules(module));

  ppb_proxy_ = reinterpret_cast<const PPB_Proxy_Private*>(
      local_get_interface(PPB_PROXY_PRIVATE_INTERFACE));
  DCHECK(ppb_proxy_) << "The proxy interface should always be supported.";

  ppb_proxy_->SetReserveInstanceIDCallback(pp_module_, &ReserveInstanceID);
}

HostDispatcher::~HostDispatcher() {
  g_module_to_dispatcher->erase(pp_module_);
}

bool HostDispatcher::InitHostWithChannel(
    Delegate* delegate,
    const IPC::ChannelHandle& channel_handle,
    bool is_client,
    const ppapi::Preferences& preferences) {
  if (!Dispatcher::InitWithChannel(delegate, channel_handle, is_client))
    return false;
  Send(new PpapiMsg_SetPreferences(preferences));
  return true;
}

// static
HostDispatcher* HostDispatcher::GetForInstance(PP_Instance instance) {
  if (!g_instance_to_dispatcher)
    return NULL;
  InstanceToDispatcherMap::iterator found = g_instance_to_dispatcher->find(
      instance);
  if (found == g_instance_to_dispatcher->end())
    return NULL;
  return found->second;
}

// static
void HostDispatcher::SetForInstance(PP_Instance instance,
                                    HostDispatcher* dispatcher) {
  if (!g_instance_to_dispatcher)
    g_instance_to_dispatcher = new InstanceToDispatcherMap;
  (*g_instance_to_dispatcher)[instance] = dispatcher;
}

// static
void HostDispatcher::RemoveForInstance(PP_Instance instance) {
  if (!g_instance_to_dispatcher)
    return;
  InstanceToDispatcherMap::iterator found = g_instance_to_dispatcher->find(
      instance);
  if (found != g_instance_to_dispatcher->end())
    g_instance_to_dispatcher->erase(found);
}

bool HostDispatcher::IsPlugin() const {
  return false;
}

bool HostDispatcher::Send(IPC::Message* msg) {
  TRACE_EVENT2("ppapi proxy", "HostDispatcher::Send",
               "Class", IPC_MESSAGE_ID_CLASS(msg->type()),
               "Line", IPC_MESSAGE_ID_LINE(msg->type()));

  // Normal sync messages are set to unblock, which would normally cause the
  // plugin to be reentered to process them. We only want to do this when we
  // know the plugin is in a state to accept reentrancy. Since the plugin side
  // never clears this flag on messages it sends, we can't get deadlock, but we
  // may still get reentrancy in the host as a result.
  if (!allow_plugin_reentrancy_)
    msg->set_unblock(false);

  if (msg->is_sync()) {
    // Don't allow sending sync messages during module shutdown. Seee the "else"
    // block below for why.
    CHECK(!PP_ToBool(ppb_proxy()->IsInModuleDestructor(pp_module())));

    // Prevent the dispatcher from going away during sync calls. Scenarios
    // where this could happen include a Send for a sync message which while
    // waiting for the reply, dispatches an incoming ExecuteScript call which
    // destroys the plugin module and in turn the dispatcher.
    ScopedModuleReference scoped_ref(this);
    return Dispatcher::Send(msg);
  } else {
    // We don't want to have a scoped ref for async message cases since since
    // async messages are sent during module desruction. In this case, the
    // module will have a 0 refcount and addrefing and releasing it will
    // reenter the destructor and it will crash.
    return Dispatcher::Send(msg);
  }
}

bool HostDispatcher::OnMessageReceived(const IPC::Message& msg) {
  TRACE_EVENT2("ppapi proxy", "HostDispatcher::OnMessageReceived",
               "Class", IPC_MESSAGE_ID_CLASS(msg.type()),
               "Line", IPC_MESSAGE_ID_LINE(msg.type()));
  // We only want to allow reentrancy when the most recent message from the
  // plugin was a scripting message. We save the old state of the flag on the
  // stack in case we're (we are the host) being reentered ourselves. The flag
  // is set to false here for all messages, and then the scripting API will
  // explicitly set it to true during processing of those messages that can be
  // reentered.
  BoolRestorer restorer(&allow_plugin_reentrancy_);
  allow_plugin_reentrancy_ = false;

  return Dispatcher::OnMessageReceived(msg);
}

void HostDispatcher::OnChannelError() {
  Dispatcher::OnChannelError();  // Stop using the channel.

  // Tell the host about the crash so it can clean up and display notification.
  ppb_proxy_->PluginCrashed(pp_module());
}

const void* HostDispatcher::GetProxiedInterface(const std::string& iface_name) {
  const void* proxied_interface =
      InterfaceList::GetInstance()->GetInterfaceForPPP(iface_name);
  if (!proxied_interface)
    return NULL;  // Don't have a proxy for this interface, don't query further.

  PluginSupportedMap::iterator iter(plugin_supported_.find(iface_name));
  if (iter == plugin_supported_.end()) {
    // Need to query. Cache the result so we only do this once.
    bool supported = false;

    bool previous_reentrancy_value = allow_plugin_reentrancy_;
    allow_plugin_reentrancy_ = true;
    Send(new PpapiMsg_SupportsInterface(iface_name, &supported));
    allow_plugin_reentrancy_ = previous_reentrancy_value;

    std::pair<PluginSupportedMap::iterator, bool> iter_success_pair;
    iter_success_pair = plugin_supported_.insert(
        PluginSupportedMap::value_type(iface_name, supported));
    iter = iter_success_pair.first;
  }
  if (iter->second)
    return proxied_interface;
  return NULL;
}

void HostDispatcher::OnInvalidMessageReceived() {
  // TODO(brettw) bug 95345 kill the plugin when an invalid message is
  // received.
}

// ScopedModuleReference -------------------------------------------------------

ScopedModuleReference::ScopedModuleReference(Dispatcher* dispatcher) {
  DCHECK(!dispatcher->IsPlugin());
  dispatcher_ = static_cast<HostDispatcher*>(dispatcher);
  dispatcher_->ppb_proxy()->AddRefModule(dispatcher_->pp_module());
}

ScopedModuleReference::~ScopedModuleReference() {
  dispatcher_->ppb_proxy()->ReleaseModule(dispatcher_->pp_module());
}

}  // namespace proxy
}  // namespace ppapi