summaryrefslogtreecommitdiffstats
path: root/content
diff options
context:
space:
mode:
Diffstat (limited to 'content')
-rw-r--r--content/browser/gpu/gpu_process_host.cc9
-rw-r--r--content/browser/gpu/gpu_process_host_ui_shim.cc10
-rw-r--r--content/common/gpu/gpu_channel.cc22
-rw-r--r--content/common/gpu/gpu_channel.h12
-rw-r--r--content/common/gpu/gpu_channel_manager.cc57
-rw-r--r--content/common/gpu/gpu_channel_manager.h25
-rw-r--r--content/common/gpu/gpu_command_buffer_stub.cc119
-rw-r--r--content/common/gpu/gpu_command_buffer_stub.h20
-rw-r--r--content/common/gpu/gpu_messages.h18
-rw-r--r--content/common/gpu/image_transport_surface_linux.cc448
-rw-r--r--content/common/gpu/image_transport_surface_linux.h67
-rw-r--r--content/content_common.gypi14
12 files changed, 637 insertions, 184 deletions
diff --git a/content/browser/gpu/gpu_process_host.cc b/content/browser/gpu/gpu_process_host.cc
index 3b3fc5c..e689b07 100644
--- a/content/browser/gpu/gpu_process_host.cc
+++ b/content/browser/gpu/gpu_process_host.cc
@@ -270,15 +270,6 @@ bool GpuProcessHost::Init() {
if (!CreateChannel())
return false;
-#if defined(TOUCH_UI)
- if (CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
- switches::kUseGL) != gfx::kGLImplementationEGLName) {
- LOG(ERROR) << "GPU process needs EGL_KHR_image_pixmap extension. "
- << "Try --use-gl=egl on the command line.";
- return false;
- }
-#endif
-
if (in_process_) {
CommandLine::ForCurrentProcess()->AppendSwitch(
switches::kDisableGpuWatchdog);
diff --git a/content/browser/gpu/gpu_process_host_ui_shim.cc b/content/browser/gpu/gpu_process_host_ui_shim.cc
index 2bd04f1..365aa24 100644
--- a/content/browser/gpu/gpu_process_host_ui_shim.cc
+++ b/content/browser/gpu/gpu_process_host_ui_shim.cc
@@ -243,8 +243,8 @@ void GpuProcessHostUIShim::OnAcceleratedSurfaceSetIOSurface(
#elif defined(TOUCH_UI)
view->AcceleratedSurfaceSetIOSurface(
params.width, params.height, params.identifier);
- Send(new GpuMsg_AcceleratedSurfaceSetIOSurfaceACK(
- params.renderer_id, params.route_id, params.identifier));
+ Send(new AcceleratedSurfaceMsg_SetSurfaceACK(
+ params.route_id, params.identifier));
#endif
}
@@ -271,8 +271,7 @@ void GpuProcessHostUIShim::OnAcceleratedSurfaceBuffersSwapped(
params.swap_buffers_count);
#elif defined(TOUCH_UI)
view->AcceleratedSurfaceBuffersSwapped(params.surface_id);
- Send(new GpuMsg_AcceleratedSurfaceBuffersSwappedACK(
- params.renderer_id, params.route_id, params.swap_buffers_count));
+ Send(new AcceleratedSurfaceMsg_BuffersSwappedACK(params.route_id));
#endif
}
@@ -290,9 +289,6 @@ void GpuProcessHostUIShim::OnAcceleratedSurfaceRelease(
if (!view)
return;
view->AcceleratedSurfaceRelease(params.identifier);
-
- Send(new GpuMsg_AcceleratedSurfaceReleaseACK(
- params.renderer_id, params.route_id, params.identifier));
}
#endif
diff --git a/content/common/gpu/gpu_channel.cc b/content/common/gpu/gpu_channel.cc
index 5c430c4..4270bb0 100644
--- a/content/common/gpu/gpu_channel.cc
+++ b/content/common/gpu/gpu_channel.cc
@@ -234,25 +234,7 @@ GpuCommandBufferStub* GpuChannel::LookupCommandBuffer(int32 route_id) {
return stubs_.Lookup(route_id);
}
-#if defined(TOUCH_UI)
-void GpuChannel::AcceleratedSurfaceIOSurfaceSet(
- int32 route_id, uint64 surface_id) {
- GpuCommandBufferStub* stub = stubs_.Lookup(route_id);
- if (stub == NULL)
- return;
- stub->AcceleratedSurfaceIOSurfaceSet(surface_id);
-}
-
-void GpuChannel::AcceleratedSurfaceReleased(
- int32 route_id, uint64 surface_id) {
- GpuCommandBufferStub* stub = stubs_.Lookup(route_id);
- if (stub == NULL)
- return;
- stub->AcceleratedSurfaceReleased(surface_id);
-}
-#endif
-
-#if defined(OS_MACOSX) || defined(TOUCH_UI)
+#if defined(OS_MACOSX)
void GpuChannel::AcceleratedSurfaceBuffersSwapped(
int32 route_id, uint64 swap_buffers_count) {
GpuCommandBufferStub* stub = stubs_.Lookup(route_id);
@@ -260,9 +242,7 @@ void GpuChannel::AcceleratedSurfaceBuffersSwapped(
return;
stub->AcceleratedSurfaceBuffersSwapped(swap_buffers_count);
}
-#endif
-#if defined(OS_MACOSX)
void GpuChannel::DestroyCommandBufferByViewId(int32 render_view_id) {
// This responds to a message from the browser process to destroy the command
// buffer when the window with a GpuScheduler is closed (see
diff --git a/content/common/gpu/gpu_channel.h b/content/common/gpu/gpu_channel.h
index 7defd5b..6f7614b 100644
--- a/content/common/gpu/gpu_channel.h
+++ b/content/common/gpu/gpu_channel.h
@@ -101,19 +101,9 @@ class GpuChannel : public IPC::Channel::Listener,
GpuCommandBufferStub* LookupCommandBuffer(int32 route_id);
-#if defined(TOUCH_UI)
- virtual void AcceleratedSurfaceIOSurfaceSet(
- int32 route_id, uint64 surface_id);
- virtual void AcceleratedSurfaceReleased(
- int32 route_id, uint64 surface_id);
-#endif
-
-#if defined(OS_MACOSX) || defined(TOUCH_UI)
+#if defined(OS_MACOSX)
virtual void AcceleratedSurfaceBuffersSwapped(
int32 route_id, uint64 swap_buffers_count);
-#endif
-
-#if defined(OS_MACOSX)
void DestroyCommandBufferByViewId(int32 render_view_id);
#endif
diff --git a/content/common/gpu/gpu_channel_manager.cc b/content/common/gpu/gpu_channel_manager.cc
index c2ef865..d9732fc 100644
--- a/content/common/gpu/gpu_channel_manager.cc
+++ b/content/common/gpu/gpu_channel_manager.cc
@@ -4,19 +4,20 @@
#include "content/common/gpu/gpu_channel_manager.h"
+#include "content/common/child_thread.h"
#include "content/common/gpu/gpu_channel.h"
#include "content/common/gpu/gpu_messages.h"
-GpuChannelManager::GpuChannelManager(IPC::Message::Sender* browser_channel,
+GpuChannelManager::GpuChannelManager(ChildThread* gpu_child_thread,
GpuWatchdog* watchdog,
base::MessageLoopProxy* io_message_loop,
base::WaitableEvent* shutdown_event)
: ALLOW_THIS_IN_INITIALIZER_LIST(method_factory_(this)),
io_message_loop_(io_message_loop),
shutdown_event_(shutdown_event),
- browser_channel_(browser_channel),
+ gpu_child_thread_(gpu_child_thread),
watchdog_(watchdog) {
- DCHECK(browser_channel);
+ DCHECK(gpu_child_thread);
DCHECK(io_message_loop);
DCHECK(shutdown_event);
}
@@ -29,6 +30,20 @@ void GpuChannelManager::RemoveChannel(int renderer_id) {
gpu_channels_.erase(renderer_id);
}
+int GpuChannelManager::GenerateRouteID() {
+ static int last_id = 0;
+ return ++last_id;
+}
+
+void GpuChannelManager::AddRoute(int32 routing_id,
+ IPC::Channel::Listener* listener) {
+ gpu_child_thread_->AddRoute(routing_id, listener);
+}
+
+void GpuChannelManager::RemoveRoute(int32 routing_id) {
+ gpu_child_thread_->RemoveRoute(routing_id);
+}
+
bool GpuChannelManager::OnMessageReceived(const IPC::Message& msg) {
bool msg_is_ok = true;
bool handled = true;
@@ -41,17 +56,9 @@ bool GpuChannelManager::OnMessageReceived(const IPC::Message& msg) {
#if defined(TOOLKIT_USES_GTK) && !defined(TOUCH_UI) || defined(OS_WIN)
IPC_MESSAGE_HANDLER(GpuMsg_ResizeViewACK, OnResizeViewACK);
#endif
-#if defined(TOUCH_UI)
- IPC_MESSAGE_HANDLER(GpuMsg_AcceleratedSurfaceSetIOSurfaceACK,
- OnAcceleratedSurfaceSetIOSurfaceACK)
- IPC_MESSAGE_HANDLER(GpuMsg_AcceleratedSurfaceReleaseACK,
- OnAcceleratedSurfaceReleaseACK)
-#endif
-#if defined(OS_MACOSX) || defined(TOUCH_UI)
+#if defined(OS_MACOSX)
IPC_MESSAGE_HANDLER(GpuMsg_AcceleratedSurfaceBuffersSwappedACK,
OnAcceleratedSurfaceBuffersSwappedACK)
-#endif
-#if defined(OS_MACOSX)
IPC_MESSAGE_HANDLER(GpuMsg_DestroyCommandBuffer,
OnDestroyCommandBuffer)
#endif
@@ -61,7 +68,7 @@ bool GpuChannelManager::OnMessageReceived(const IPC::Message& msg) {
}
bool GpuChannelManager::Send(IPC::Message* msg) {
- return browser_channel_->Send(msg);
+ return gpu_child_thread_->Send(msg);
}
void GpuChannelManager::OnEstablishChannel(int renderer_id) {
@@ -138,27 +145,7 @@ void GpuChannelManager::OnResizeViewACK(int32 renderer_id,
channel->ViewResized(command_buffer_route_id);
}
-#if defined(TOUCH_UI)
-void GpuChannelManager::OnAcceleratedSurfaceSetIOSurfaceACK(
- int renderer_id, int32 route_id, uint64 surface_id) {
- GpuChannelMap::const_iterator iter = gpu_channels_.find(renderer_id);
- if (iter == gpu_channels_.end())
- return;
- scoped_refptr<GpuChannel> channel = iter->second;
- channel->AcceleratedSurfaceIOSurfaceSet(route_id, surface_id);
-}
-
-void GpuChannelManager::OnAcceleratedSurfaceReleaseACK(
- int renderer_id, int32 route_id, uint64 surface_id) {
- GpuChannelMap::const_iterator iter = gpu_channels_.find(renderer_id);
- if (iter == gpu_channels_.end())
- return;
- scoped_refptr<GpuChannel> channel = iter->second;
- channel->AcceleratedSurfaceReleased(route_id, surface_id);
-}
-#endif
-
-#if defined(OS_MACOSX) || defined(TOUCH_UI)
+#if defined(OS_MACOSX)
void GpuChannelManager::OnAcceleratedSurfaceBuffersSwappedACK(
int renderer_id, int32 route_id, uint64 swap_buffers_count) {
GpuChannelMap::const_iterator iter = gpu_channels_.find(renderer_id);
@@ -167,9 +154,7 @@ void GpuChannelManager::OnAcceleratedSurfaceBuffersSwappedACK(
scoped_refptr<GpuChannel> channel = iter->second;
channel->AcceleratedSurfaceBuffersSwapped(route_id, swap_buffers_count);
}
-#endif
-#if defined(OS_MACOSX)
void GpuChannelManager::OnDestroyCommandBuffer(
int renderer_id, int32 renderer_view_id) {
GpuChannelMap::const_iterator iter = gpu_channels_.find(renderer_id);
diff --git a/content/common/gpu/gpu_channel_manager.h b/content/common/gpu/gpu_channel_manager.h
index 683c9cd..dd176aa 100644
--- a/content/common/gpu/gpu_channel_manager.h
+++ b/content/common/gpu/gpu_channel_manager.h
@@ -22,6 +22,7 @@ namespace IPC {
struct ChannelHandle;
}
+class ChildThread;
class GpuChannel;
class GpuWatchdog;
struct GPUCreateCommandBufferConfig;
@@ -39,7 +40,7 @@ struct GPUCreateCommandBufferConfig;
class GpuChannelManager : public IPC::Channel::Listener,
public IPC::Message::Sender {
public:
- GpuChannelManager(IPC::Message::Sender* browser_channel,
+ GpuChannelManager(ChildThread* gpu_child_thread,
GpuWatchdog* watchdog,
base::MessageLoopProxy* io_message_loop,
base::WaitableEvent* shutdown_event);
@@ -58,6 +59,10 @@ class GpuChannelManager : public IPC::Channel::Listener,
ScopedRunnableMethodFactory<GpuChannelManager> method_factory_;
+ int GenerateRouteID();
+ void AddRoute(int32 routing_id, IPC::Channel::Listener* listener);
+ void RemoveRoute(int32 routing_id);
+
private:
// Message handlers.
void OnEstablishChannel(int renderer_id);
@@ -71,19 +76,9 @@ class GpuChannelManager : public IPC::Channel::Listener,
const GPUCreateCommandBufferConfig& init_params);
void OnResizeViewACK(int32 renderer_id, int32 command_buffer_route_id);
-#if defined(TOUCH_UI)
- void OnAcceleratedSurfaceSetIOSurfaceACK(
- int renderer_id, int32 route_id, uint64 surface_id);
- void OnAcceleratedSurfaceReleaseACK(
- int renderer_id, int32 route_id, uint64 surface_id);
-#endif
-
-#if defined(OS_MACOSX) || defined(TOUCH_UI)
+#if defined(OS_MACOSX)
void OnAcceleratedSurfaceBuffersSwappedACK(
int renderer_id, int32 route_id, uint64 swap_buffers_count);
-#endif
-
-#if defined(OS_MACOSX)
void OnDestroyCommandBuffer(int renderer_id, int32 renderer_view_id);
#endif
@@ -92,10 +87,8 @@ class GpuChannelManager : public IPC::Channel::Listener,
scoped_refptr<base::MessageLoopProxy> io_message_loop_;
base::WaitableEvent* shutdown_event_;
- // Either an IPC channel to the browser or, if the GpuChannelManager is
- // running in the browser process, a Sender implementation that will post
- // IPC messages to the UI thread.
- IPC::Message::Sender* browser_channel_;
+ // Used to send and receive IPC messages from the browser process.
+ ChildThread* gpu_child_thread_;
// These objects manage channels to individual renderer processes there is
// one channel for each renderer process that has connected to this GPU
diff --git a/content/common/gpu/gpu_command_buffer_stub.cc b/content/common/gpu/gpu_command_buffer_stub.cc
index 3890e81..b86750a 100644
--- a/content/common/gpu/gpu_command_buffer_stub.cc
+++ b/content/common/gpu/gpu_command_buffer_stub.cc
@@ -22,6 +22,8 @@
#if defined(OS_WIN)
#include "base/win/wrapped_window_proc.h"
+#elif defined(TOUCH_UI)
+#include "content/common/gpu/image_transport_surface_linux.h"
#endif
using gpu::Buffer;
@@ -115,14 +117,19 @@ bool GpuCommandBufferStub::IsScheduled() {
return !scheduler_.get() || scheduler_->IsScheduled();
}
+void GpuCommandBufferStub::OnInitializeFailed(IPC::Message* reply_message) {
+ scheduler_.reset();
+ command_buffer_.reset();
+ GpuCommandBufferMsg_Initialize::WriteReplyParams(reply_message, false);
+ Send(reply_message);
+}
+
void GpuCommandBufferStub::OnInitialize(
base::SharedMemoryHandle ring_buffer,
int32 size,
IPC::Message* reply_message) {
DCHECK(!command_buffer_.get());
- bool result = false;
-
command_buffer_.reset(new gpu::CommandBufferService);
#if defined(OS_WIN)
@@ -142,6 +149,41 @@ void GpuCommandBufferStub::OnInitialize(
scheduler_.reset(gpu::GpuScheduler::Create(command_buffer_.get(),
channel_,
NULL));
+#if defined(TOUCH_UI)
+ if (software_) {
+ OnInitializeFailed(reply_message);
+ return;
+ }
+
+ scoped_refptr<gfx::GLSurface> surface;
+ if (handle_)
+ surface = ImageTransportSurface::CreateSurface(this);
+ else
+ surface = gfx::GLSurface::CreateOffscreenGLSurface(software_,
+ gfx::Size(1, 1));
+ if (!surface.get()) {
+ LOG(ERROR) << "GpuCommandBufferStub: failed to create surface.";
+ OnInitializeFailed(reply_message);
+ return;
+ }
+
+ scoped_refptr<gfx::GLContext> context(
+ gfx::GLContext::CreateGLContext(channel_->share_group(),
+ surface.get()));
+ if (!context.get()) {
+ LOG(ERROR) << "GpuCommandBufferStub: failed to create context.";
+ OnInitializeFailed(reply_message);
+ return;
+ }
+
+ if (scheduler_->InitializeCommon(
+ surface,
+ context,
+ initial_size_,
+ disallowed_extensions_,
+ allowed_extensions_.c_str(),
+ requested_attribs_)) {
+#else
if (scheduler_->Initialize(
handle_,
initial_size_,
@@ -150,6 +192,7 @@ void GpuCommandBufferStub::OnInitialize(
allowed_extensions_.c_str(),
requested_attribs_,
channel_->share_group())) {
+#endif
command_buffer_->SetPutOffsetChangeCallback(
NewCallback(scheduler_.get(),
&gpu::GpuScheduler::PutChanged));
@@ -165,7 +208,7 @@ void GpuCommandBufferStub::OnInitialize(
scheduler_->SetCommandProcessedCallback(
NewCallback(this, &GpuCommandBufferStub::OnCommandProcessed));
-#if defined(OS_MACOSX) || defined(TOUCH_UI)
+#if defined(OS_MACOSX)
if (handle_) {
// This context conceptually puts its output directly on the
// screen, rendered by the accelerated plugin layer in
@@ -175,12 +218,19 @@ void GpuCommandBufferStub::OnInitialize(
NewCallback(this,
&GpuCommandBufferStub::SwapBuffersCallback));
}
-#endif // defined(OS_MACOSX) || defined(TOUCH_UI)
+#endif // defined(OS_MACOSX)
// Set up a pathway for resizing the output window or framebuffer at the
// right time relative to other GL commands.
+#if defined(TOUCH_UI)
+ if (handle_ == gfx::kNullPluginWindow) {
+ scheduler_->SetResizeCallback(
+ NewCallback(this, &GpuCommandBufferStub::ResizeCallback));
+ }
+#else
scheduler_->SetResizeCallback(
NewCallback(this, &GpuCommandBufferStub::ResizeCallback));
+#endif
if (parent_stub_for_initialization_) {
scheduler_->SetParent(parent_stub_for_initialization_->scheduler_.get(),
@@ -189,14 +239,13 @@ void GpuCommandBufferStub::OnInitialize(
parent_texture_for_initialization_ = 0;
}
- result = true;
} else {
- scheduler_.reset();
- command_buffer_.reset();
+ OnInitializeFailed(reply_message);
+ return;
}
}
- GpuCommandBufferMsg_Initialize::WriteReplyParams(reply_message, result);
+ GpuCommandBufferMsg_Initialize::WriteReplyParams(reply_message, true);
Send(reply_message);
}
@@ -438,34 +487,7 @@ void GpuCommandBufferStub::SwapBuffersCallback() {
scheduler_->SetScheduled(false);
}
-#endif // defined(OS_MACOSX)
-#if defined(TOUCH_UI)
-void GpuCommandBufferStub::SwapBuffersCallback() {
- TRACE_EVENT0("gpu", "GpuCommandBufferStub::SwapBuffersCallback");
- GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
- GpuHostMsg_AcceleratedSurfaceBuffersSwapped_Params params;
- params.renderer_id = renderer_id_;
- params.render_view_id = render_view_id_;
- params.surface_id = scheduler_->GetFrontSurfaceId();
- params.route_id = route_id();
- params.swap_buffers_count = scheduler_->swap_buffers_count();
- gpu_channel_manager->Send(
- new GpuHostMsg_AcceleratedSurfaceBuffersSwapped(params));
-
- scheduler_->SetScheduled(false);
-}
-
-void GpuCommandBufferStub::AcceleratedSurfaceIOSurfaceSet(uint64 surface_id) {
- scheduler_->SetScheduled(true);
-}
-
-void GpuCommandBufferStub::AcceleratedSurfaceReleased(uint64 surface_id) {
- scheduler_->ReleaseSurface(surface_id);
-}
-#endif // defined(TOUCH_UI)
-
-#if defined(OS_MACOSX) || defined(TOUCH_UI)
void GpuCommandBufferStub::AcceleratedSurfaceBuffersSwapped(
uint64 swap_buffers_count) {
TRACE_EVENT1("gpu",
@@ -485,7 +507,7 @@ void GpuCommandBufferStub::AcceleratedSurfaceBuffersSwapped(
scheduler_->SetScheduled(true);
}
}
-#endif // defined(OS_MACOSX) || defined(TOUCH_UI)
+#endif // defined(OS_MACOSX)
void GpuCommandBufferStub::AddSetTokenCallback(
const base::Callback<void(int32)>& callback) {
@@ -511,31 +533,6 @@ void GpuCommandBufferStub::ResizeCallback(gfx::Size size) {
size));
scheduler_->SetScheduled(false);
-#elif defined(TOUCH_UI)
- if (scheduler_->GetBackSurfaceId()) {
- GpuHostMsg_AcceleratedSurfaceRelease_Params params;
- params.renderer_id = renderer_id_;
- params.render_view_id = render_view_id_;
- params.identifier = scheduler_->GetBackSurfaceId();
- params.route_id = route_id();
-
- GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
- gpu_channel_manager->Send(
- new GpuHostMsg_AcceleratedSurfaceRelease(params));
- }
- scheduler_->CreateBackSurface(size);
- GpuHostMsg_AcceleratedSurfaceSetIOSurface_Params params;
- params.renderer_id = renderer_id_;
- params.render_view_id = render_view_id_;
- params.width = size.width();
- params.height = size.height();
- params.identifier = scheduler_->GetBackSurfaceId();
- params.route_id = route_id();
-
- GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
- gpu_channel_manager->Send(
- new GpuHostMsg_AcceleratedSurfaceSetIOSurface(params));
- scheduler_->SetScheduled(false);
#endif
}
}
diff --git a/content/common/gpu/gpu_command_buffer_stub.h b/content/common/gpu/gpu_command_buffer_stub.h
index b97234d4..26dd64b 100644
--- a/content/common/gpu/gpu_command_buffer_stub.h
+++ b/content/common/gpu/gpu_command_buffer_stub.h
@@ -58,6 +58,9 @@ class GpuCommandBufferStub
// Get the GLContext associated with this object.
gpu::GpuScheduler* scheduler() const { return scheduler_.get(); }
+ // Get the GpuChannel associated with this object.
+ GpuChannel* channel() const { return channel_; }
+
// Identifies the renderer process.
int32 renderer_id() const { return renderer_id_; }
@@ -81,15 +84,10 @@ class GpuCommandBufferStub
void ViewResized();
-#if defined(TOUCH_UI)
- void AcceleratedSurfaceIOSurfaceSet(uint64 surface_id);
- void AcceleratedSurfaceReleased(uint64 surface_id);
-#endif // defined(TOUCH_UI)
-
-#if defined(OS_MACOSX) || defined(TOUCH_UI)
+#if defined(OS_MACOSX)
// Called only by the GpuChannel.
void AcceleratedSurfaceBuffersSwapped(uint64 swap_buffers_count);
-#endif // defined(OS_MACOSX) || defined(TOUCH_UI)
+#endif // defined(OS_MACOSX)
// Register a callback to be Run() whenever the underlying scheduler receives
// a set_token() call. The callback will be Run() with the just-set token as
@@ -97,6 +95,9 @@ class GpuCommandBufferStub
void AddSetTokenCallback(const base::Callback<void(int32)>& callback);
private:
+ // Cleans up and sends reply if OnInitialize failed.
+ void OnInitializeFailed(IPC::Message* reply_message);
+
// Message handlers:
void OnInitialize(base::SharedMemoryHandle ring_buffer,
int32 size,
@@ -131,11 +132,8 @@ class GpuCommandBufferStub
#if defined(OS_MACOSX)
void OnSetWindowSize(const gfx::Size& size);
-#endif // defined(OS_MACOSX)
-
-#if defined(OS_MACOSX) || defined(TOUCH_UI)
void SwapBuffersCallback();
-#endif // defined(TOUCH_UI)
+#endif // defined(OS_MACOSX)
void ResizeCallback(gfx::Size size);
void ReportState();
diff --git a/content/common/gpu/gpu_messages.h b/content/common/gpu/gpu_messages.h
index b4fc5ff..a852b45 100644
--- a/content/common/gpu/gpu_messages.h
+++ b/content/common/gpu/gpu_messages.h
@@ -159,19 +159,15 @@ IPC_MESSAGE_CONTROL2(GpuMsg_ResizeViewACK,
#if defined(TOUCH_UI)
// Tells the GPU process that it's safe to start rendering to the surface.
-IPC_MESSAGE_CONTROL3(GpuMsg_AcceleratedSurfaceSetIOSurfaceACK,
- int /* renderer_id */,
- int32 /* route_id */,
- uint64 /* surface_id */)
+IPC_MESSAGE_ROUTED1(AcceleratedSurfaceMsg_SetSurfaceACK,
+ uint64 /* surface_id */)
-// Tells the GPU process the surface has been released.
-IPC_MESSAGE_CONTROL3(GpuMsg_AcceleratedSurfaceReleaseACK,
- int /* renderer_id */,
- int32 /* route_id */,
- uint64 /* surface_id */)
+// Tells the GPU process that the browser process handled the swap
+// buffers request with the given number.
+IPC_MESSAGE_ROUTED0(AcceleratedSurfaceMsg_BuffersSwappedACK)
#endif
-#if defined(OS_MACOSX) || defined(TOUCH_UI)
+#if defined(OS_MACOSX)
// Tells the GPU process that the browser process handled the swap
// buffers request with the given number. Note that it is possible
// for the browser process to coalesce frames; it is not guaranteed
@@ -181,9 +177,7 @@ IPC_MESSAGE_CONTROL3(GpuMsg_AcceleratedSurfaceBuffersSwappedACK,
int /* renderer_id */,
int32 /* route_id */,
uint64 /* swap_buffers_count */)
-#endif
-#if defined(OS_MACOSX)
// Requests the GPU process to destroy the command buffer and remove the
// associated route. Further messages to this command buffer will result in a
// channel error.
diff --git a/content/common/gpu/image_transport_surface_linux.cc b/content/common/gpu/image_transport_surface_linux.cc
new file mode 100644
index 0000000..93a067a
--- /dev/null
+++ b/content/common/gpu/image_transport_surface_linux.cc
@@ -0,0 +1,448 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if defined(ENABLE_GPU)
+
+#include "content/common/gpu/image_transport_surface_linux.h"
+
+// This conflicts with the defines in Xlib.h and must come first.
+#include "content/common/gpu/gpu_messages.h"
+
+#include <map>
+#include <X11/Xlib.h>
+#include <X11/extensions/Xcomposite.h>
+
+#include "base/callback.h"
+#include "content/common/gpu/gpu_channel.h"
+#include "content/common/gpu/gpu_channel_manager.h"
+#include "content/common/gpu/gpu_command_buffer_stub.h"
+#include "gpu/command_buffer/service/gpu_scheduler.h"
+#include "third_party/angle/include/EGL/egl.h"
+#include "third_party/angle/include/EGL/eglext.h"
+#include "ui/gfx/gl/gl_bindings.h"
+#include "ui/gfx/gl/gl_implementation.h"
+#include "ui/gfx/gl/gl_surface_egl.h"
+#include "ui/gfx/gl/gl_surface_glx.h"
+#include "ui/gfx/surface/accelerated_surface_linux.h"
+
+namespace {
+
+// We are backed by an Pbuffer offscreen surface for the purposes of creating a
+// context, but use FBOs to render to X Pixmap backed EGLImages.
+class EGLImageTransportSurface : public ImageTransportSurface,
+ public gfx::PbufferGLSurfaceEGL {
+ public:
+ explicit EGLImageTransportSurface(GpuCommandBufferStub* stub);
+
+ // GLSurface implementation
+ virtual bool Initialize() OVERRIDE;
+ virtual void Destroy() OVERRIDE;
+ virtual bool IsOffscreen() OVERRIDE;
+ virtual bool SwapBuffers() OVERRIDE;
+ virtual gfx::Size GetSize() OVERRIDE;
+ virtual void OnMakeCurrent() OVERRIDE;
+ virtual unsigned int GetBackingFrameBufferObject() OVERRIDE;
+
+ protected:
+ // ImageTransportSurface implementation
+ virtual void OnSetSurfaceACK(uint64 surface_id) OVERRIDE;
+ virtual void OnBuffersSwappedACK() OVERRIDE;
+ virtual void Resize(gfx::Size size) OVERRIDE;
+
+ private:
+ virtual ~EGLImageTransportSurface() OVERRIDE;
+ void ReleaseSurface(scoped_refptr<AcceleratedSurface>& surface);
+
+ uint32 fbo_id_;
+ uint32 depth_id_;
+ gfx::Size depth_buffer_size_;
+
+ scoped_refptr<AcceleratedSurface> back_surface_;
+ scoped_refptr<AcceleratedSurface> front_surface_;
+
+ DISALLOW_COPY_AND_ASSIGN(EGLImageTransportSurface);
+};
+
+// We are backed by an Pbuffer offscreen surface for the purposes of creating a
+// context, but use FBOs to render to X Pixmap backed EGLImages.
+class GLXImageTransportSurface : public ImageTransportSurface,
+ public gfx::NativeViewGLSurfaceGLX {
+ public:
+ explicit GLXImageTransportSurface(GpuCommandBufferStub* stub);
+
+ // gfx::GLSurface implementation:
+ virtual bool Initialize() OVERRIDE;
+ virtual void Destroy() OVERRIDE;
+ virtual bool SwapBuffers() OVERRIDE;
+ virtual gfx::Size GetSize() OVERRIDE;
+
+ protected:
+ // ImageTransportSurface implementation:
+ void OnSetSurfaceACK(uint64 surface_id) OVERRIDE;
+ void OnBuffersSwappedACK() OVERRIDE;
+ void Resize(gfx::Size size) OVERRIDE;
+
+ private:
+ virtual ~GLXImageTransportSurface();
+
+ // Tell the browser to release the surface.
+ void ReleaseSurface();
+
+ XID dummy_parent_;
+ gfx::Size size_;
+
+ // Whether or not the image has been bound on the browser side.
+ bool bound_;
+
+ DISALLOW_COPY_AND_ASSIGN(GLXImageTransportSurface);
+};
+
+EGLImageTransportSurface::EGLImageTransportSurface(GpuCommandBufferStub* stub) :
+ ImageTransportSurface(stub),
+ gfx::PbufferGLSurfaceEGL(false, gfx::Size(1,1)),
+ fbo_id_(0) {
+}
+
+EGLImageTransportSurface::~EGLImageTransportSurface() {
+}
+
+bool EGLImageTransportSurface::Initialize() {
+ if (!ImageTransportSurface::Initialize())
+ return false;
+ return PbufferGLSurfaceEGL::Initialize();
+}
+
+void EGLImageTransportSurface::Destroy() {
+ if (depth_id_) {
+ glDeleteRenderbuffersEXT(1, &depth_id_);
+ depth_id_ = 0;
+ }
+
+ if (back_surface_.get())
+ ReleaseSurface(back_surface_);
+ if (front_surface_.get())
+ ReleaseSurface(front_surface_);
+
+ ImageTransportSurface::Destroy();
+ PbufferGLSurfaceEGL::Destroy();
+}
+
+bool EGLImageTransportSurface::IsOffscreen() {
+ return false;
+}
+
+void EGLImageTransportSurface::OnMakeCurrent() {
+ if (fbo_id_)
+ return;
+
+ glGenFramebuffersEXT(1, &fbo_id_);
+ glBindFramebufferEXT(GL_FRAMEBUFFER, fbo_id_);
+ Resize(gfx::Size(1,1));
+
+ GLenum status = glCheckFramebufferStatusEXT(GL_FRAMEBUFFER);
+ if (status != GL_FRAMEBUFFER_COMPLETE) {
+ LOG(ERROR) << "Framebuffer incomplete.";
+ }
+}
+
+unsigned int EGLImageTransportSurface::GetBackingFrameBufferObject() {
+ return fbo_id_;
+}
+
+void EGLImageTransportSurface::ReleaseSurface(
+ scoped_refptr<AcceleratedSurface>& surface) {
+ if (surface.get()) {
+ GpuHostMsg_AcceleratedSurfaceRelease_Params params;
+ params.renderer_id = stub()->renderer_id();
+ params.render_view_id = stub()->render_view_id();
+ params.identifier = back_surface_->pixmap();
+ params.route_id = route_id();
+ Send(new GpuHostMsg_AcceleratedSurfaceRelease(params));
+ surface = NULL;
+ }
+}
+
+void EGLImageTransportSurface::Resize(gfx::Size size) {
+ if (back_surface_.get())
+ ReleaseSurface(back_surface_);
+
+ if (depth_id_ && depth_buffer_size_ != size) {
+ glDeleteRenderbuffersEXT(1, &depth_id_);
+ depth_id_ = 0;
+ }
+
+ if (!depth_id_) {
+ glGenRenderbuffersEXT(1, &depth_id_);
+ glBindRenderbufferEXT(GL_RENDERBUFFER, depth_id_);
+ glRenderbufferStorageEXT(GL_RENDERBUFFER,
+ GL_DEPTH24_STENCIL8,
+ size.width(),
+ size.height());
+ glBindRenderbufferEXT(GL_RENDERBUFFER, 0);
+ glFramebufferRenderbufferEXT(GL_FRAMEBUFFER,
+ GL_DEPTH_ATTACHMENT,
+ GL_RENDERBUFFER,
+ depth_id_);
+ depth_buffer_size_ = size;
+ }
+
+ back_surface_ = new AcceleratedSurface(size);
+ glFramebufferTexture2DEXT(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ back_surface_->texture(),
+ 0);
+ glFlush();
+
+ GpuHostMsg_AcceleratedSurfaceSetIOSurface_Params params;
+ params.renderer_id = stub()->renderer_id();
+ params.render_view_id = stub()->render_view_id();
+ params.width = size.width();
+ params.height = size.height();
+ params.identifier = back_surface_->pixmap();
+ params.route_id = route_id();
+ Send(new GpuHostMsg_AcceleratedSurfaceSetIOSurface(params));
+
+ scheduler()->SetScheduled(false);
+}
+
+bool EGLImageTransportSurface::SwapBuffers() {
+ front_surface_.swap(back_surface_);
+ DCHECK_NE(front_surface_.get(), static_cast<AcceleratedSurface*>(NULL));
+ glFlush();
+
+ GpuHostMsg_AcceleratedSurfaceBuffersSwapped_Params params;
+ params.renderer_id = stub()->renderer_id();
+ params.render_view_id = stub()->render_view_id();
+ params.surface_id = front_surface_->pixmap();
+ params.route_id = route_id();
+ Send(new GpuHostMsg_AcceleratedSurfaceBuffersSwapped(params));
+
+ gfx::Size expected_size = front_surface_->size();
+ if (!back_surface_.get() || back_surface_->size() != expected_size) {
+ Resize(expected_size);
+ } else {
+ glFramebufferTexture2DEXT(GL_FRAMEBUFFER,
+ GL_COLOR_ATTACHMENT0,
+ GL_TEXTURE_2D,
+ back_surface_->texture(),
+ 0);
+ }
+ scheduler()->SetScheduled(false);
+ return true;
+}
+
+gfx::Size EGLImageTransportSurface::GetSize() {
+ return back_surface_->size();
+}
+
+void EGLImageTransportSurface::OnSetSurfaceACK(
+ uint64 surface_id) {
+ DCHECK_EQ(back_surface_->pixmap(), surface_id);
+ scheduler()->SetScheduled(true);
+}
+
+void EGLImageTransportSurface::OnBuffersSwappedACK() {
+ scheduler()->SetScheduled(true);
+}
+
+GLXImageTransportSurface::GLXImageTransportSurface(GpuCommandBufferStub* stub) :
+ ImageTransportSurface(stub),
+ gfx::NativeViewGLSurfaceGLX(),
+ dummy_parent_(0),
+ size_(1, 1),
+ bound_(false) {
+}
+
+GLXImageTransportSurface::~GLXImageTransportSurface() {
+}
+
+bool GLXImageTransportSurface::Initialize() {
+ // Create a dummy window to host the real window.
+ Display* dpy = gfx::GLSurfaceGLX::GetDisplay();
+ XSetWindowAttributes swa;
+ swa.event_mask = StructureNotifyMask;
+ swa.override_redirect = True;
+ dummy_parent_ = XCreateWindow(
+ dpy,
+ RootWindow(dpy, DefaultScreen(dpy)), // parent
+ -100, -100, 1, 1,
+ 0, // border width
+ CopyFromParent, // depth
+ InputOutput,
+ CopyFromParent, // visual
+ CWEventMask | CWOverrideRedirect, &swa);
+ XMapWindow(dpy, dummy_parent_);
+
+ swa.event_mask = StructureNotifyMask;
+ swa.override_redirect = false;
+ window_ = XCreateWindow(dpy,
+ dummy_parent_,
+ 0, 0, size_.width(), size_.height(),
+ 0, // border width
+ CopyFromParent, // depth
+ InputOutput,
+ CopyFromParent, // visual
+ CWEventMask, &swa);
+ XMapWindow(dpy, window_);
+ while(1) {
+ XEvent event;
+ XNextEvent(dpy, &event);
+ if (event.type == MapNotify && event.xmap.window == window_)
+ break;
+ }
+ // Manual setting must be used to avoid unnecessary rendering by server.
+ XCompositeRedirectWindow(dpy, window_, CompositeRedirectManual);
+ Resize(size_);
+
+ if (!ImageTransportSurface::Initialize())
+ return false;
+ return gfx::NativeViewGLSurfaceGLX::Initialize();
+}
+
+void GLXImageTransportSurface::Destroy() {
+ if (bound_)
+ ReleaseSurface();
+
+ if (window_) {
+ Display* dpy = gfx::GLSurfaceGLX::GetDisplay();
+ XDestroyWindow(dpy, window_);
+ XDestroyWindow(dpy, dummy_parent_);
+ }
+
+ ImageTransportSurface::Destroy();
+ gfx::NativeViewGLSurfaceGLX::Destroy();
+}
+
+void GLXImageTransportSurface::ReleaseSurface() {
+ DCHECK(bound_);
+ GpuHostMsg_AcceleratedSurfaceRelease_Params params;
+ params.renderer_id = stub()->renderer_id();
+ params.render_view_id = stub()->render_view_id();
+ params.identifier = window_;
+ params.route_id = route_id();
+ Send(new GpuHostMsg_AcceleratedSurfaceRelease(params));
+}
+
+void GLXImageTransportSurface::Resize(gfx::Size size) {
+ size_ = size;
+ if (bound_) {
+ ReleaseSurface();
+ bound_ = false;
+ }
+
+ Display* dpy = gfx::GLSurfaceGLX::GetDisplay();
+ XResizeWindow(dpy, window_, size_.width(), size_.height());
+ XFlush(dpy);
+
+ GpuHostMsg_AcceleratedSurfaceSetIOSurface_Params params;
+ params.renderer_id = stub()->renderer_id();
+ params.render_view_id = stub()->render_view_id();
+ params.width = size_.width();
+ params.height = size_.height();
+ params.identifier = window_;
+ params.route_id = route_id();
+ Send(new GpuHostMsg_AcceleratedSurfaceSetIOSurface(params));
+
+ scheduler()->SetScheduled(false);
+}
+
+bool GLXImageTransportSurface::SwapBuffers() {
+ gfx::NativeViewGLSurfaceGLX::SwapBuffers();
+ glFlush();
+
+ GpuHostMsg_AcceleratedSurfaceBuffersSwapped_Params params;
+ params.renderer_id = stub()->renderer_id();
+ params.render_view_id = stub()->render_view_id();
+ params.surface_id = window_;
+ params.route_id = route_id();
+ Send(new GpuHostMsg_AcceleratedSurfaceBuffersSwapped(params));
+
+ return true;
+}
+
+gfx::Size GLXImageTransportSurface::GetSize() {
+ return size_;
+}
+
+void GLXImageTransportSurface::OnSetSurfaceACK(
+ uint64 surface_id) {
+ DCHECK(!bound_);
+ bound_ = true;
+ scheduler()->SetScheduled(true);
+}
+
+void GLXImageTransportSurface::OnBuffersSwappedACK() {
+}
+
+} // namespace
+
+ImageTransportSurface::ImageTransportSurface(GpuCommandBufferStub* stub) :
+ stub_(stub) {
+ GpuChannelManager* gpu_channel_manager
+ = stub_->channel()->gpu_channel_manager();
+ route_id_ = gpu_channel_manager->GenerateRouteID();
+ gpu_channel_manager->AddRoute(route_id_, this);
+}
+
+ImageTransportSurface::~ImageTransportSurface() {
+ GpuChannelManager* gpu_channel_manager
+ = stub_->channel()->gpu_channel_manager();
+ gpu_channel_manager->RemoveRoute(route_id_);
+}
+
+bool ImageTransportSurface::Initialize() {
+ scheduler()->SetResizeCallback(
+ NewCallback(this, &ImageTransportSurface::Resize));
+ return true;
+}
+
+void ImageTransportSurface::Destroy() {
+ scheduler()->SetResizeCallback(NULL);
+}
+
+bool ImageTransportSurface::OnMessageReceived(const IPC::Message& message) {
+ bool handled = true;
+ IPC_BEGIN_MESSAGE_MAP(ImageTransportSurface, message)
+ IPC_MESSAGE_HANDLER(AcceleratedSurfaceMsg_SetSurfaceACK,
+ OnSetSurfaceACK)
+ IPC_MESSAGE_HANDLER(AcceleratedSurfaceMsg_BuffersSwappedACK,
+ OnBuffersSwappedACK)
+ IPC_MESSAGE_UNHANDLED(handled = false)
+ IPC_END_MESSAGE_MAP()
+ return handled;
+}
+
+bool ImageTransportSurface::Send(IPC::Message* message) {
+ GpuChannelManager* gpu_channel_manager =
+ stub_->channel()->gpu_channel_manager();
+ return gpu_channel_manager->Send(message);
+}
+
+gpu::GpuScheduler* ImageTransportSurface::scheduler() {
+ return stub_->scheduler();
+}
+
+// static
+scoped_refptr<gfx::GLSurface> ImageTransportSurface::CreateSurface(
+ GpuCommandBufferStub* stub) {
+ scoped_refptr<gfx::GLSurface> surface;
+ switch (gfx::GetGLImplementation()) {
+ case gfx::kGLImplementationDesktopGL:
+ surface = new GLXImageTransportSurface(stub);
+ break;
+ case gfx::kGLImplementationEGLGLES2:
+ surface = new EGLImageTransportSurface(stub);
+ break;
+ default:
+ NOTREACHED();
+ return NULL;
+ }
+ if (surface->Initialize())
+ return surface;
+ else
+ return NULL;
+}
+
+#endif // defined(USE_GPU)
diff --git a/content/common/gpu/image_transport_surface_linux.h b/content/common/gpu/image_transport_surface_linux.h
new file mode 100644
index 0000000..6ad1d0b
--- /dev/null
+++ b/content/common/gpu/image_transport_surface_linux.h
@@ -0,0 +1,67 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CONTENT_COMMON_GPU_IMAGE_TRANSPORT_SURFACE_LINUX_H_
+#define CONTENT_COMMON_GPU_IMAGE_TRANSPORT_SURFACE_LINUX_H_
+#pragma once
+
+#if defined(ENABLE_GPU)
+
+#include "base/memory/ref_counted.h"
+#include "ipc/ipc_channel.h"
+#include "ipc/ipc_message.h"
+#include "ui/gfx/size.h"
+
+class GpuCommandBufferStub;
+
+namespace gfx {
+class GLSurface;
+}
+
+namespace gpu {
+class GpuScheduler;
+}
+
+class ImageTransportSurface : public IPC::Channel::Listener,
+ public IPC::Message::Sender {
+ public:
+ // Creates the appropriate surface depending on the GL implementation
+ static scoped_refptr<gfx::GLSurface>
+ CreateSurface(GpuCommandBufferStub* stub);
+
+ bool Initialize();
+ void Destroy();
+
+ // IPC::Channel::Listener implementation:
+ virtual bool OnMessageReceived(const IPC::Message& message) OVERRIDE;
+
+ // IPC::Message::Sender implementation:
+ virtual bool Send(IPC::Message* msg) OVERRIDE;
+
+ protected:
+ explicit ImageTransportSurface(GpuCommandBufferStub* stub);
+ ~ImageTransportSurface();
+
+ // IPC::Message handlers
+ virtual void OnSetSurfaceACK(uint64 surface_id) = 0;
+ virtual void OnBuffersSwappedACK() = 0;
+
+ // Resize the backbuffer
+ virtual void Resize(gfx::Size size) = 0;
+
+ GpuCommandBufferStub* stub() { return stub_; }
+ gpu::GpuScheduler* scheduler();
+ int32 route_id() { return route_id_; }
+
+ private:
+ // Weak pointer. The stub outlives this surface.
+ GpuCommandBufferStub* stub_;
+ int32 route_id_;
+
+ DISALLOW_COPY_AND_ASSIGN(ImageTransportSurface);
+};
+
+#endif // defined(ENABLE_GPU)
+
+#endif // CONTENT_COMMON_GPU_EGL_IMAGE_TRANSPORT_SURFACE_LINUX_H_
diff --git a/content/content_common.gypi b/content/content_common.gypi
index 42cfd32..9747f69 100644
--- a/content/content_common.gypi
+++ b/content/content_common.gypi
@@ -280,6 +280,20 @@
'common/native_web_keyboard_event_views.cc',
],
}],
+ ['touchui==1', {
+ 'sources': [
+ 'common/gpu/image_transport_surface_linux.h',
+ 'common/gpu/image_transport_surface_linux.cc',
+ ],
+ 'include_dirs': [
+ '<(DEPTH)/third_party/angle/include',
+ ],
+ 'link_settings': {
+ 'libraries': [
+ '-lXcomposite',
+ ],
+ },
+ }],
['enable_gpu==1', {
'dependencies': [
'../gpu/gpu.gyp:command_buffer_service',