1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
|
// Copyright (c) 2010 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/gpu/gpu_thread.h"
#include "build/build_config.h"
#include "base/command_line.h"
#include "chrome/common/child_process.h"
#include "chrome/common/gpu_messages.h"
#include "chrome/gpu/gpu_config.h"
#if defined(OS_WIN)
#include "chrome/gpu/gpu_view_win.h"
#elif defined(GPU_USE_GLX)
#include "chrome/gpu/gpu_backing_store_glx_context.h"
#include "chrome/gpu/gpu_view_x.h"
#include <X11/Xutil.h> // Must be last.
#endif
#if defined(OS_LINUX)
#include <gtk/gtk.h>
#endif
GpuThread::GpuThread() {
#if defined(GPU_USE_GLX)
display_ = ::XOpenDisplay(NULL);
#endif
#if defined(OS_LINUX)
{
// The X11 port of the command buffer code assumes it can access the X
// display via the macro GDK_DISPLAY(), which implies that Gtk has been
// initialized. This code was taken from PluginThread. TODO(kbr):
// rethink whether initializing Gtk is really necessary or whether we
// should just send the display connection down to the GPUProcessor.
g_thread_init(NULL);
const std::vector<std::string>& args =
CommandLine::ForCurrentProcess()->argv();
int argc = args.size();
scoped_array<char *> argv(new char *[argc + 1]);
for (size_t i = 0; i < args.size(); ++i) {
// TODO(piman@google.com): can gtk_init modify argv? Just being safe
// here.
argv[i] = strdup(args[i].c_str());
}
argv[argc] = NULL;
char **argv_pointer = argv.get();
gtk_init(&argc, &argv_pointer);
for (size_t i = 0; i < args.size(); ++i) {
free(argv[i]);
}
}
#endif
}
GpuThread::~GpuThread() {
}
#if defined(GPU_USE_GLX)
GpuBackingStoreGLXContext* GpuThread::GetGLXContext() {
if (!glx_context_.get())
glx_context_.reset(new GpuBackingStoreGLXContext(this));
return glx_context_.get();
}
#endif
void GpuThread::OnControlMessageReceived(const IPC::Message& msg) {
bool msg_is_ok = true;
IPC_BEGIN_MESSAGE_MAP_EX(GpuThread, msg, msg_is_ok)
IPC_MESSAGE_HANDLER(GpuMsg_EstablishChannel,
OnEstablishChannel)
IPC_MESSAGE_HANDLER(GpuMsg_Synchronize,
OnSynchronize)
IPC_MESSAGE_HANDLER(GpuMsg_NewRenderWidgetHostView,
OnNewRenderWidgetHostView)
IPC_END_MESSAGE_MAP_EX()
}
void GpuThread::OnEstablishChannel(int renderer_id) {
scoped_refptr<GpuChannel> channel;
GpuChannelMap::const_iterator iter = gpu_channels_.find(renderer_id);
if (iter == gpu_channels_.end()) {
channel = new GpuChannel(renderer_id);
} else {
channel = iter->second;
}
DCHECK(channel != NULL);
if (channel->Init()) {
// TODO(apatrick): figure out when to remove channels from the map. They
// will never be destroyed otherwise.
gpu_channels_[renderer_id] = channel;
} else {
channel = NULL;
}
IPC::ChannelHandle channel_handle;
if (channel.get()) {
channel_handle.name = channel->GetChannelName();
#if defined(OS_POSIX)
// On POSIX, pass the renderer-side FD. Also mark it as auto-close so that
// it gets closed after it has been sent.
int renderer_fd = channel->DisownRendererFd();
channel_handle.socket = base::FileDescriptor(renderer_fd, true);
#endif
}
Send(new GpuHostMsg_ChannelEstablished(channel_handle));
}
void GpuThread::OnSynchronize() {
Send(new GpuHostMsg_SynchronizeReply());
}
void GpuThread::OnNewRenderWidgetHostView(GpuNativeWindowHandle parent_window,
int32 routing_id) {
// The GPUView class' lifetime is controlled by the host, which will send a
// message to destroy the GpuRWHView when necessary. So we don't manage the
// lifetime of this object.
#if defined(OS_WIN)
new GpuViewWin(this, parent_window, routing_id);
#elif defined(GPU_USE_GLX)
new GpuViewX(this, parent_window, routing_id);
#else
NOTIMPLEMENTED();
#endif
}
|