summaryrefslogtreecommitdiffstats
path: root/ui/surface
diff options
context:
space:
mode:
authortfarina@chromium.org <tfarina@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-05-03 22:45:41 +0000
committertfarina@chromium.org <tfarina@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-05-03 22:45:41 +0000
commitd353541f29a517a93d1b0fec023f5d12a05eade2 (patch)
treeed89daef5725a582ca4957f9b237c64113529501 /ui/surface
parent724b3891cefbc629f9db168f9b39943dabc1cfe2 (diff)
downloadchromium_src-d353541f29a517a93d1b0fec023f5d12a05eade2.zip
chromium_src-d353541f29a517a93d1b0fec023f5d12a05eade2.tar.gz
chromium_src-d353541f29a517a93d1b0fec023f5d12a05eade2.tar.bz2
ui: Move surface/ directory out of gfx/, up to ui/.
BUG=104040 R=ben@chromium.org TBR=brettw@chromium.org Review URL: https://chromiumcodereview.appspot.com/10351002 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@135232 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'ui/surface')
-rw-r--r--ui/surface/OWNERS5
-rw-r--r--ui/surface/accelerated_surface_mac.cc363
-rw-r--r--ui/surface/accelerated_surface_mac.h181
-rw-r--r--ui/surface/accelerated_surface_win.cc683
-rw-r--r--ui/surface/accelerated_surface_win.h124
-rw-r--r--ui/surface/io_surface_support_mac.cc270
-rw-r--r--ui/surface/io_surface_support_mac.h70
-rw-r--r--ui/surface/surface.gyp57
-rw-r--r--ui/surface/surface_export.h26
-rw-r--r--ui/surface/transport_dib.h217
-rw-r--r--ui/surface/transport_dib_android.cc101
-rw-r--r--ui/surface/transport_dib_linux.cc141
-rw-r--r--ui/surface/transport_dib_mac.cc98
-rw-r--r--ui/surface/transport_dib_win.cc110
14 files changed, 2446 insertions, 0 deletions
diff --git a/ui/surface/OWNERS b/ui/surface/OWNERS
new file mode 100644
index 0000000..fa8215e
--- /dev/null
+++ b/ui/surface/OWNERS
@@ -0,0 +1,5 @@
+kbr@chromium.org
+pinkerton@chromium.org
+stuartmorgan@chromium.org
+backer@chromium.org
+apatrick@chromium.org
diff --git a/ui/surface/accelerated_surface_mac.cc b/ui/surface/accelerated_surface_mac.cc
new file mode 100644
index 0000000..5f2f12f
--- /dev/null
+++ b/ui/surface/accelerated_surface_mac.cc
@@ -0,0 +1,363 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "ui/surface/accelerated_surface_mac.h"
+
+#include "base/logging.h"
+#include "base/mac/scoped_cftyperef.h"
+#include "ui/gfx/gl/gl_bindings.h"
+#include "ui/gfx/gl/gl_context.h"
+#include "ui/gfx/gl/gl_implementation.h"
+#include "ui/gfx/gl/gl_surface.h"
+#include "ui/gfx/gl/scoped_make_current.h"
+#include "ui/gfx/rect.h"
+#include "ui/surface/io_surface_support_mac.h"
+
+AcceleratedSurface::AcceleratedSurface()
+ : io_surface_id_(0),
+ allocate_fbo_(false),
+ texture_(0),
+ fbo_(0) {
+}
+
+AcceleratedSurface::~AcceleratedSurface() {}
+
+bool AcceleratedSurface::Initialize(
+ gfx::GLContext* share_context,
+ bool allocate_fbo,
+ gfx::GpuPreference gpu_preference) {
+ allocate_fbo_ = allocate_fbo;
+
+ // Ensure GL is initialized before trying to create an offscreen GL context.
+ if (!gfx::GLSurface::InitializeOneOff())
+ return false;
+
+ // Drawing to IOSurfaces via OpenGL only works with Apple's GL and
+ // not with the OSMesa software renderer.
+ if (gfx::GetGLImplementation() != gfx::kGLImplementationDesktopGL &&
+ gfx::GetGLImplementation() != gfx::kGLImplementationAppleGL)
+ return false;
+
+ gl_surface_ = gfx::GLSurface::CreateOffscreenGLSurface(
+ false, gfx::Size(1, 1));
+ if (!gl_surface_.get()) {
+ Destroy();
+ return false;
+ }
+
+ gfx::GLShareGroup* share_group =
+ share_context ? share_context->share_group() : NULL;
+
+ gl_context_ = gfx::GLContext::CreateGLContext(
+ share_group,
+ gl_surface_.get(),
+ gpu_preference);
+ if (!gl_context_.get()) {
+ Destroy();
+ return false;
+ }
+
+ // Now we're ready to handle SetSurfaceSize calls, which will
+ // allocate and/or reallocate the IOSurface and associated offscreen
+ // OpenGL structures for rendering.
+ return true;
+}
+
+void AcceleratedSurface::Destroy() {
+ // The FBO and texture objects will be destroyed when the OpenGL context,
+ // and any other contexts sharing resources with it, is. We don't want to
+ // make the context current one last time here just in order to delete
+ // these objects.
+
+ // Release the old TransportDIB in the browser.
+ if (!dib_free_callback_.is_null() && transport_dib_.get()) {
+ dib_free_callback_.Run(transport_dib_->id());
+ }
+ transport_dib_.reset();
+
+ gl_context_ = NULL;
+ gl_surface_ = NULL;
+}
+
+// Call after making changes to the surface which require a visual update.
+// Makes the rendering show up in other processes.
+void AcceleratedSurface::SwapBuffers() {
+ if (io_surface_.get() != NULL) {
+ if (allocate_fbo_) {
+ // Bind and unbind the framebuffer to make changes to the
+ // IOSurface show up in the other process.
+ glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, 0);
+ glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, fbo_);
+ glFlush();
+ } else {
+ // Copy the current framebuffer's contents into our "live" texture.
+ // Note that the current GL context might not be ours at this point!
+ // This is deliberate, so that surrounding code using GL can produce
+ // rendering results consumed by the AcceleratedSurface.
+ // Need to save and restore OpenGL state around this call.
+ GLint current_texture = 0;
+ GLenum target_binding = GL_TEXTURE_BINDING_RECTANGLE_ARB;
+ GLenum target = GL_TEXTURE_RECTANGLE_ARB;
+ glGetIntegerv(target_binding, &current_texture);
+ glBindTexture(target, texture_);
+ glCopyTexSubImage2D(target, 0,
+ 0, 0,
+ 0, 0,
+ real_surface_size_.width(),
+ real_surface_size_.height());
+ glBindTexture(target, current_texture);
+ // This flush is absolutely essential -- it guarantees that the
+ // rendering results are seen by the other process.
+ glFlush();
+ }
+ } else if (transport_dib_.get() != NULL) {
+ // Pre-Mac OS X 10.6, fetch the rendered image from the current frame
+ // buffer and copy it into the TransportDIB.
+ // TODO(dspringer): There are a couple of options that can speed this up.
+ // First is to use async reads into a PBO, second is to use SPI that
+ // allows many tasks to access the same CGSSurface.
+ void* pixel_memory = transport_dib_->memory();
+ if (pixel_memory) {
+ // Note that glReadPixels does an implicit glFlush().
+ glReadPixels(0,
+ 0,
+ real_surface_size_.width(),
+ real_surface_size_.height(),
+ GL_BGRA, // This pixel format should have no conversion.
+ GL_UNSIGNED_INT_8_8_8_8_REV,
+ pixel_memory);
+ }
+ }
+}
+
+static void AddBooleanValue(CFMutableDictionaryRef dictionary,
+ const CFStringRef key,
+ bool value) {
+ CFDictionaryAddValue(dictionary, key,
+ (value ? kCFBooleanTrue : kCFBooleanFalse));
+}
+
+static void AddIntegerValue(CFMutableDictionaryRef dictionary,
+ const CFStringRef key,
+ int32 value) {
+ base::mac::ScopedCFTypeRef<CFNumberRef> number(
+ CFNumberCreate(NULL, kCFNumberSInt32Type, &value));
+ CFDictionaryAddValue(dictionary, key, number.get());
+}
+
+// Creates a new OpenGL texture object bound to the given texture target.
+// Caller owns the returned texture.
+static GLuint CreateTexture(GLenum target) {
+ GLuint texture = 0;
+ glGenTextures(1, &texture);
+ glBindTexture(target, texture);
+ glTexParameteri(target, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+ glTexParameteri(target, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ glTexParameteri(target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ glTexParameteri(target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ return texture;
+}
+
+void AcceleratedSurface::AllocateRenderBuffers(GLenum target,
+ const gfx::Size& size) {
+ if (!texture_) {
+ // Generate the texture object.
+ texture_ = CreateTexture(target);
+ // Generate and bind the framebuffer object.
+ glGenFramebuffersEXT(1, &fbo_);
+ glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, fbo_);
+ }
+
+ // Make sure that subsequent set-up code affects the render texture.
+ glBindTexture(target, texture_);
+}
+
+bool AcceleratedSurface::SetupFrameBufferObject(GLenum target) {
+ glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, fbo_);
+ GLenum fbo_status;
+ glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT,
+ GL_COLOR_ATTACHMENT0_EXT,
+ target,
+ texture_,
+ 0);
+ fbo_status = glCheckFramebufferStatusEXT(GL_FRAMEBUFFER_EXT);
+ return fbo_status == GL_FRAMEBUFFER_COMPLETE_EXT;
+}
+
+gfx::Size AcceleratedSurface::ClampToValidDimensions(const gfx::Size& size) {
+ return gfx::Size(std::max(size.width(), 1), std::max(size.height(), 1));
+}
+
+bool AcceleratedSurface::MakeCurrent() {
+ if (!gl_context_.get())
+ return false;
+ return gl_context_->MakeCurrent(gl_surface_.get());
+}
+
+void AcceleratedSurface::Clear(const gfx::Rect& rect) {
+ DCHECK(gl_context_->IsCurrent(gl_surface_.get()));
+ glClearColor(0, 0, 0, 0);
+ glViewport(0, 0, rect.width(), rect.height());
+ glMatrixMode(GL_PROJECTION);
+ glLoadIdentity();
+ glOrtho(0, rect.width(), 0, rect.height(), -1, 1);
+ glClear(GL_COLOR_BUFFER_BIT);
+}
+
+uint32 AcceleratedSurface::SetSurfaceSize(const gfx::Size& size) {
+ if (surface_size_ == size) {
+ // Return 0 to indicate to the caller that no new backing store
+ // allocation occurred.
+ return 0;
+ }
+
+ // Only support IO surfaces if the GL implementation is the native desktop GL.
+ // IO surfaces will not work with, for example, OSMesa software renderer
+ // GL contexts.
+ if (gfx::GetGLImplementation() != gfx::kGLImplementationDesktopGL)
+ return 0;
+
+ IOSurfaceSupport* io_surface_support = IOSurfaceSupport::Initialize();
+ if (!io_surface_support)
+ return 0; // Caller can try using SetWindowSizeForTransportDIB().
+
+ gfx::ScopedMakeCurrent make_current(gl_context_.get(), gl_surface_.get());
+ if (!make_current.Succeeded())
+ return 0;
+
+ gfx::Size clamped_size = ClampToValidDimensions(size);
+
+ // GL_TEXTURE_RECTANGLE_ARB is the best supported render target on
+ // Mac OS X and is required for IOSurface interoperability.
+ GLenum target = GL_TEXTURE_RECTANGLE_ARB;
+ if (allocate_fbo_) {
+ AllocateRenderBuffers(target, clamped_size);
+ } else if (!texture_) {
+ // Generate the texture object.
+ texture_ = CreateTexture(target);
+ }
+
+ // Allocate a new IOSurface, which is the GPU resource that can be
+ // shared across processes.
+ base::mac::ScopedCFTypeRef<CFMutableDictionaryRef> properties;
+ properties.reset(CFDictionaryCreateMutable(kCFAllocatorDefault,
+ 0,
+ &kCFTypeDictionaryKeyCallBacks,
+ &kCFTypeDictionaryValueCallBacks));
+ AddIntegerValue(properties,
+ io_surface_support->GetKIOSurfaceWidth(),
+ clamped_size.width());
+ AddIntegerValue(properties,
+ io_surface_support->GetKIOSurfaceHeight(),
+ clamped_size.height());
+ AddIntegerValue(properties,
+ io_surface_support->GetKIOSurfaceBytesPerElement(), 4);
+ AddBooleanValue(properties,
+ io_surface_support->GetKIOSurfaceIsGlobal(), true);
+ // I believe we should be able to unreference the IOSurfaces without
+ // synchronizing with the browser process because they are
+ // ultimately reference counted by the operating system.
+ io_surface_.reset(io_surface_support->IOSurfaceCreate(properties));
+
+ // Don't think we need to identify a plane.
+ GLuint plane = 0;
+ CGLError error = io_surface_support->CGLTexImageIOSurface2D(
+ static_cast<CGLContextObj>(gl_context_->GetHandle()),
+ target,
+ GL_RGBA,
+ clamped_size.width(),
+ clamped_size.height(),
+ GL_BGRA,
+ GL_UNSIGNED_INT_8_8_8_8_REV,
+ io_surface_.get(),
+ plane);
+ if (error != kCGLNoError) {
+ DLOG(ERROR) << "CGL error " << error << " during CGLTexImageIOSurface2D";
+ }
+ if (allocate_fbo_) {
+ // Set up the frame buffer object.
+ if (!SetupFrameBufferObject(target)) {
+ DLOG(ERROR) << "Failed to set up frame buffer object";
+ }
+ }
+ surface_size_ = size;
+ real_surface_size_ = clamped_size;
+
+ // Now send back an identifier for the IOSurface. We originally
+ // intended to send back a mach port from IOSurfaceCreateMachPort
+ // but it looks like Chrome IPC would need to be modified to
+ // properly send mach ports between processes. For the time being we
+ // make our IOSurfaces global and send back their identifiers. On
+ // the browser process side the identifier is reconstituted into an
+ // IOSurface for on-screen rendering.
+ io_surface_id_ = io_surface_support->IOSurfaceGetID(io_surface_);
+ return io_surface_id_;
+}
+
+uint32 AcceleratedSurface::GetSurfaceId() {
+ return io_surface_id_;
+}
+
+TransportDIB::Handle AcceleratedSurface::SetTransportDIBSize(
+ const gfx::Size& size) {
+ if (surface_size_ == size) {
+ // Return an invalid handle to indicate to the caller that no new backing
+ // store allocation occurred.
+ return TransportDIB::DefaultHandleValue();
+ }
+ surface_size_ = size;
+ gfx::Size clamped_size = ClampToValidDimensions(size);
+ real_surface_size_ = clamped_size;
+
+ // Release the old TransportDIB in the browser.
+ if (!dib_free_callback_.is_null() && transport_dib_.get()) {
+ dib_free_callback_.Run(transport_dib_->id());
+ }
+ transport_dib_.reset();
+
+ // Ask the renderer to create a TransportDIB.
+ size_t dib_size =
+ clamped_size.width() * 4 * clamped_size.height(); // 4 bytes per pixel.
+ TransportDIB::Handle dib_handle;
+ if (!dib_alloc_callback_.is_null()) {
+ dib_alloc_callback_.Run(dib_size, &dib_handle);
+ }
+ if (!TransportDIB::is_valid_handle(dib_handle)) {
+ // If the allocator fails, it means the DIB was not created in the browser,
+ // so there is no need to run the deallocator here.
+ return TransportDIB::DefaultHandleValue();
+ }
+ transport_dib_.reset(TransportDIB::Map(dib_handle));
+ if (transport_dib_.get() == NULL) {
+ // TODO(dspringer): if the Map() fails, should the deallocator be run so
+ // that the DIB is deallocated in the browser?
+ return TransportDIB::DefaultHandleValue();
+ }
+
+ if (allocate_fbo_) {
+ DCHECK(gl_context_->IsCurrent(gl_surface_.get()));
+ // Set up the render buffers and reserve enough space on the card for the
+ // framebuffer texture.
+ GLenum target = GL_TEXTURE_RECTANGLE_ARB;
+ AllocateRenderBuffers(target, clamped_size);
+ glTexImage2D(target,
+ 0, // mipmap level 0
+ GL_RGBA8, // internal pixel format
+ clamped_size.width(),
+ clamped_size.height(),
+ 0, // 0 border
+ GL_BGRA, // Used for consistency
+ GL_UNSIGNED_INT_8_8_8_8_REV,
+ NULL); // No data, just reserve room on the card.
+ SetupFrameBufferObject(target);
+ }
+ return transport_dib_->handle();
+}
+
+void AcceleratedSurface::SetTransportDIBAllocAndFree(
+ const base::Callback<void(size_t, TransportDIB::Handle*)>& allocator,
+ const base::Callback<void(TransportDIB::Id)>& deallocator) {
+ dib_alloc_callback_ = allocator;
+ dib_free_callback_ = deallocator;
+}
diff --git a/ui/surface/accelerated_surface_mac.h b/ui/surface/accelerated_surface_mac.h
new file mode 100644
index 0000000..fc37640
--- /dev/null
+++ b/ui/surface/accelerated_surface_mac.h
@@ -0,0 +1,181 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef UI_SURFACE_ACCELERATED_SURFACE_MAC_H_
+#define UI_SURFACE_ACCELERATED_SURFACE_MAC_H_
+#pragma once
+
+#include <CoreFoundation/CoreFoundation.h>
+
+#include "base/callback.h"
+#include "base/mac/scoped_cftyperef.h"
+#include "base/memory/scoped_ptr.h"
+#include "ui/gfx/gl/gl_context.h"
+#include "ui/gfx/gl/gl_surface.h"
+#include "ui/gfx/gl/gpu_preference.h"
+#include "ui/gfx/rect.h"
+#include "ui/gfx/size.h"
+#include "ui/surface/surface_export.h"
+#include "ui/surface/transport_dib.h"
+
+// Should not include GL headers in a header file. Forward declare these types
+// instead.
+typedef struct _CGLContextObject* CGLContextObj;
+typedef unsigned int GLenum;
+typedef unsigned int GLuint;
+
+namespace gfx {
+class Rect;
+}
+
+// Encapsulates an accelerated GL surface that can be shared across processes
+// on systems that support it (10.6 and above). For systems that do not, it
+// uses a regular dib. There will either be an IOSurface or a TransportDIB,
+// never both.
+
+class SURFACE_EXPORT AcceleratedSurface {
+ public:
+ AcceleratedSurface();
+ virtual ~AcceleratedSurface();
+
+ // Set up internal buffers. |share_context|, if non-NULL, is a context
+ // with which the internally created OpenGL context shares textures and
+ // other resources. |allocate_fbo| indicates whether or not this surface
+ // should allocate an offscreen frame buffer object (FBO) internally. If
+ // not, then the user is expected to allocate one. NOTE that allocating
+ // an FBO internally does NOT work properly with client code which uses
+ // OpenGL (i.e., via GLES2 command buffers), because the GLES2
+ // implementation does not know to bind the accelerated surface's
+ // internal FBO when the default FBO is bound. |gpu_preference| indicates
+ // the GPU preference for the internally allocated GLContext. If
+ // |share_context| is non-NULL, then on platforms supporting dual GPUs,
+ // its GPU preference must match the passed one. Returns false upon
+ // failure.
+ bool Initialize(gfx::GLContext* share_context,
+ bool allocate_fbo,
+ gfx::GpuPreference gpu_preference);
+ // Tear down. Must be called before destructor to prevent leaks.
+ void Destroy();
+
+ // These methods are used only once the accelerated surface is initialized.
+
+ // Sets the accelerated surface to the given size, creating a new one if
+ // the height or width changes. Returns a unique id of the IOSurface to
+ // which the surface is bound, or 0 if no changes were made or an error
+ // occurred. MakeCurrent() will have been called on the new surface.
+ uint32 SetSurfaceSize(const gfx::Size& size);
+
+ // Returns the id of this surface's IOSurface, or 0 for
+ // transport DIB surfaces.
+ uint32 GetSurfaceId();
+
+ // Sets the GL context to be the current one for drawing. Returns true if
+ // it succeeded.
+ bool MakeCurrent();
+ // Clear the surface to be transparent. Assumes the caller has already called
+ // MakeCurrent().
+ void Clear(const gfx::Rect& rect);
+ // Call after making changes to the surface which require a visual update.
+ // Makes the rendering show up in other processes. Assumes the caller has
+ // already called MakeCurrent().
+ //
+ // If this AcceleratedSurface is configured with its own FBO, then
+ // this call causes the color buffer to be transmitted. Otherwise,
+ // it causes the frame buffer of the current GL context to be copied
+ // either into an internal texture via glCopyTexSubImage2D or into a
+ // TransportDIB via glReadPixels.
+ //
+ // The size of the rectangle copied is the size last specified via
+ // SetSurfaceSize. If another GL context than the one this
+ // AcceleratedSurface contains is responsible for the production of
+ // the pixels, then when this entry point is called, the color
+ // buffer must be in a state where a glCopyTexSubImage2D or
+ // glReadPixels is legal. (For example, if using multisampled FBOs,
+ // the FBO must have been resolved into a non-multisampled color
+ // texture.) Additionally, in this situation, the contexts must
+ // share server-side GL objects, so that this AcceleratedSurface's
+ // texture is a legal name in the namespace of the current context.
+ void SwapBuffers();
+
+ CGLContextObj context() {
+ return static_cast<CGLContextObj>(gl_context_->GetHandle());
+ }
+
+ // These methods are only used when there is a transport DIB.
+
+ // Sets the transport DIB to the given size, creating a new one if the
+ // height or width changes. Returns a handle to the new DIB, or a default
+ // handle if no changes were made. Assumes the caller has already called
+ // MakeCurrent().
+ TransportDIB::Handle SetTransportDIBSize(const gfx::Size& size);
+ // Sets the methods to use for allocating and freeing memory for the
+ // transport DIB.
+ void SetTransportDIBAllocAndFree(
+ const base::Callback<void(size_t, TransportDIB::Handle*)>& allocator,
+ const base::Callback<void(TransportDIB::Id)>& deallocator);
+
+ // Get the accelerated surface size.
+ gfx::Size GetSize() const { return surface_size_; }
+
+ private:
+ // Helper function to generate names for the backing texture and FBO. On
+ // return, the resulting names can be attached to |fbo_|. |target| is
+ // the target type for the color buffer.
+ void AllocateRenderBuffers(GLenum target, const gfx::Size& size);
+
+ // Helper function to attach the buffers previously allocated by a call to
+ // AllocateRenderBuffers(). On return, |fbo_| can be used for
+ // rendering. |target| must be the same value as used in the call to
+ // AllocateRenderBuffers(). Returns |true| if the resulting framebuffer
+ // object is valid.
+ bool SetupFrameBufferObject(GLenum target);
+
+ gfx::Size ClampToValidDimensions(const gfx::Size& size);
+
+ // The OpenGL context, and pbuffer drawable, used to transfer data
+ // to the shared region (IOSurface or TransportDIB). Strictly
+ // speaking, we do not need to allocate a GL context all of the
+ // time. We only need one if (a) we are using the IOSurface code
+ // path, or (b) if we are allocating an FBO internally.
+ scoped_refptr<gfx::GLSurface> gl_surface_;
+ scoped_refptr<gfx::GLContext> gl_context_;
+ // Either |io_surface_| or |transport_dib_| is a valid pointer, but not both.
+ // |io_surface_| is non-NULL if the IOSurface APIs are supported (Mac OS X
+ // 10.6 and later).
+ // TODO(dspringer,kbr): Should the GPU backing store be encapsulated in its
+ // own class so all this implementation detail is hidden?
+ base::mac::ScopedCFTypeRef<CFTypeRef> io_surface_;
+
+ // The id of |io_surface_| or 0 if that's NULL.
+ uint32 io_surface_id_;
+
+ // TODO(dspringer): If we end up keeping this TransportDIB mechanism, this
+ // should really be a scoped_ptr_malloc<>, with a deallocate functor that
+ // runs |dib_free_callback_|. I was not able to figure out how to
+ // make this work (or even compile).
+ scoped_ptr<TransportDIB> transport_dib_;
+ gfx::Size surface_size_;
+ // It's important to avoid allocating zero-width or zero-height
+ // IOSurfaces and textures on the Mac, so we clamp each to a minimum
+ // of 1. This is the real size of the surface; surface_size_ is what
+ // the user requested.
+ gfx::Size real_surface_size_;
+ // TODO(kbr): the FBO management should not be in this class at all.
+ // However, if it is factored out, care needs to be taken to not
+ // introduce another copy of the color data on the GPU; the direct
+ // binding of the internal texture to the IOSurface saves a copy.
+ bool allocate_fbo_;
+ // If the IOSurface code path is being used, then this texture
+ // object is always allocated. Otherwise, it is only allocated if
+ // the user requests an FBO be allocated.
+ GLuint texture_;
+ // The FBO and renderbuffer are only allocated if allocate_fbo_ is
+ // true.
+ GLuint fbo_;
+ // Allocate a TransportDIB in the renderer.
+ base::Callback<void(size_t, TransportDIB::Handle*)> dib_alloc_callback_;
+ base::Callback<void(TransportDIB::Id)> dib_free_callback_;
+};
+
+#endif // UI_SURFACE_ACCELERATED_SURFACE_MAC_H_
diff --git a/ui/surface/accelerated_surface_win.cc b/ui/surface/accelerated_surface_win.cc
new file mode 100644
index 0000000..ee1b6c6
--- /dev/null
+++ b/ui/surface/accelerated_surface_win.cc
@@ -0,0 +1,683 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "ui/surface/accelerated_surface_win.h"
+
+#include <windows.h>
+#include <algorithm>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/callback.h"
+#include "base/command_line.h"
+#include "base/debug/trace_event.h"
+#include "base/file_path.h"
+#include "base/lazy_instance.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/scoped_native_library.h"
+#include "base/string_number_conversions.h"
+#include "base/stringprintf.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/time.h"
+#include "base/tracked_objects.h"
+#include "base/win/wrapped_window_proc.h"
+#include "ui/base/win/hwnd_util.h"
+#include "ui/gfx/gl/gl_switches.h"
+
+namespace {
+
+typedef HRESULT (WINAPI *Direct3DCreate9ExFunc)(UINT sdk_version,
+ IDirect3D9Ex **d3d);
+
+const wchar_t kD3D9ModuleName[] = L"d3d9.dll";
+const char kCreate3D9DeviceExName[] = "Direct3DCreate9Ex";
+
+UINT GetPresentationInterval() {
+ if (CommandLine::ForCurrentProcess()->HasSwitch(switches::kDisableGpuVsync))
+ return D3DPRESENT_INTERVAL_IMMEDIATE;
+ else
+ return D3DPRESENT_INTERVAL_ONE;
+}
+
+// Calculate the number necessary to transform |source_size| into |dest_size|
+// by repeating downsampling of the image of |source_size| by a factor no more
+// than 2.
+int GetResampleCount(const gfx::Size& source_size, const gfx::Size& dest_size) {
+ int width_count = 0;
+ int width = source_size.width();
+ while (width > dest_size.width()) {
+ ++width_count;
+ width >>= 1;
+ }
+ int height_count = 0;
+ int height = source_size.height();
+ while (height > dest_size.height()) {
+ ++height_count;
+ height >>= 1;
+ }
+ return std::max(width_count, height_count);
+}
+
+// Returns half the size of |size| no smaller than |min_size|.
+gfx::Size GetHalfSizeNoLessThan(const gfx::Size& size,
+ const gfx::Size& min_size) {
+ return gfx::Size(std::max(min_size.width(), size.width() / 2),
+ std::max(min_size.height(), size.height() / 2));
+}
+
+bool CreateTemporarySurface(IDirect3DDevice9* device,
+ const gfx::Size& size,
+ IDirect3DSurface9** surface) {
+ HRESULT hr = device->CreateRenderTarget(
+ size.width(),
+ size.height(),
+ D3DFMT_A8R8G8B8,
+ D3DMULTISAMPLE_NONE,
+ 0,
+ TRUE,
+ surface,
+ NULL);
+ return SUCCEEDED(hr);
+}
+
+} // namespace anonymous
+
+// A PresentThread is a thread that is dedicated to presenting surfaces to a
+// window. It owns a Direct3D device and a Direct3D query for this purpose.
+class PresentThread : public base::Thread,
+ public base::RefCountedThreadSafe<PresentThread> {
+ public:
+ explicit PresentThread(const char* name);
+
+ IDirect3DDevice9Ex* device() { return device_.get(); }
+ IDirect3DQuery9* query() { return query_.get(); }
+
+ void InitDevice();
+ void ResetDevice();
+
+ protected:
+ virtual void CleanUp();
+
+ private:
+ friend class base::RefCountedThreadSafe<PresentThread>;
+
+ ~PresentThread();
+
+ base::ScopedNativeLibrary d3d_module_;
+ base::win::ScopedComPtr<IDirect3DDevice9Ex> device_;
+
+ // This query is used to wait until a certain amount of progress has been
+ // made by the GPU and it is safe for the producer to modify its shared
+ // texture again.
+ base::win::ScopedComPtr<IDirect3DQuery9> query_;
+
+ DISALLOW_COPY_AND_ASSIGN(PresentThread);
+};
+
+// There is a fixed sized pool of PresentThreads and therefore the maximum
+// number of Direct3D devices owned by those threads is bounded.
+class PresentThreadPool {
+ public:
+ static const int kNumPresentThreads = 4;
+
+ PresentThreadPool();
+ PresentThread* NextThread();
+
+ private:
+ int next_thread_;
+ scoped_refptr<PresentThread> present_threads_[kNumPresentThreads];
+
+ DISALLOW_COPY_AND_ASSIGN(PresentThreadPool);
+};
+
+// A thread safe map of presenters by surface ID that returns presenters via
+// a scoped_refptr to keep them alive while they are referenced.
+class AcceleratedPresenterMap {
+ public:
+ AcceleratedPresenterMap();
+ scoped_refptr<AcceleratedPresenter> CreatePresenter(gfx::NativeWindow window);
+ void RemovePresenter(const scoped_refptr<AcceleratedPresenter>& presenter);
+ scoped_refptr<AcceleratedPresenter> GetPresenter(gfx::NativeWindow window);
+ private:
+ base::Lock lock_;
+ typedef std::map<gfx::NativeWindow, AcceleratedPresenter*> PresenterMap;
+ PresenterMap presenters_;
+ DISALLOW_COPY_AND_ASSIGN(AcceleratedPresenterMap);
+};
+
+base::LazyInstance<PresentThreadPool>
+ g_present_thread_pool = LAZY_INSTANCE_INITIALIZER;
+
+base::LazyInstance<AcceleratedPresenterMap>
+ g_accelerated_presenter_map = LAZY_INSTANCE_INITIALIZER;
+
+PresentThread::PresentThread(const char* name) : base::Thread(name) {
+}
+
+void PresentThread::InitDevice() {
+ if (device_)
+ return;
+
+ TRACE_EVENT0("surface", "PresentThread::Init");
+ d3d_module_.Reset(base::LoadNativeLibrary(FilePath(kD3D9ModuleName), NULL));
+ ResetDevice();
+}
+
+void PresentThread::ResetDevice() {
+ TRACE_EVENT0("surface", "PresentThread::ResetDevice");
+
+ // This will crash some Intel drivers but we can't render anything without
+ // reseting the device, which would be disappointing.
+ query_ = NULL;
+ device_ = NULL;
+
+ Direct3DCreate9ExFunc create_func = reinterpret_cast<Direct3DCreate9ExFunc>(
+ d3d_module_.GetFunctionPointer(kCreate3D9DeviceExName));
+ if (!create_func)
+ return;
+
+ base::win::ScopedComPtr<IDirect3D9Ex> d3d;
+ HRESULT hr = create_func(D3D_SDK_VERSION, d3d.Receive());
+ if (FAILED(hr))
+ return;
+
+ // Any old window will do to create the device. In practice the window to
+ // present to is an argument to IDirect3DDevice9::Present.
+ HWND window = GetShellWindow();
+
+ D3DPRESENT_PARAMETERS parameters = { 0 };
+ parameters.BackBufferWidth = 1;
+ parameters.BackBufferHeight = 1;
+ parameters.BackBufferCount = 1;
+ parameters.BackBufferFormat = D3DFMT_A8R8G8B8;
+ parameters.hDeviceWindow = window;
+ parameters.Windowed = TRUE;
+ parameters.Flags = 0;
+ parameters.PresentationInterval = GetPresentationInterval();
+ parameters.SwapEffect = D3DSWAPEFFECT_COPY;
+
+ hr = d3d->CreateDeviceEx(
+ D3DADAPTER_DEFAULT,
+ D3DDEVTYPE_HAL,
+ window,
+ D3DCREATE_FPU_PRESERVE | D3DCREATE_SOFTWARE_VERTEXPROCESSING |
+ D3DCREATE_DISABLE_PSGP_THREADING | D3DCREATE_MULTITHREADED,
+ &parameters,
+ NULL,
+ device_.Receive());
+ if (FAILED(hr))
+ return;
+
+ hr = device_->CreateQuery(D3DQUERYTYPE_EVENT, query_.Receive());
+ if (FAILED(hr))
+ device_ = NULL;
+}
+
+void PresentThread::CleanUp() {
+ // The D3D device and query are leaked because destroying the associated D3D
+ // query crashes some Intel drivers.
+ device_.Detach();
+ query_.Detach();
+}
+
+PresentThread::~PresentThread() {
+ Stop();
+}
+
+PresentThreadPool::PresentThreadPool() : next_thread_(0) {
+ // Do this in the constructor so present_threads_ is initialized before any
+ // other thread sees it. See LazyInstance documentation.
+ for (int i = 0; i < kNumPresentThreads; ++i) {
+ present_threads_[i] = new PresentThread(
+ base::StringPrintf("PresentThread #%d", i).c_str());
+ present_threads_[i]->Start();
+ }
+}
+
+PresentThread* PresentThreadPool::NextThread() {
+ next_thread_ = (next_thread_ + 1) % kNumPresentThreads;
+ return present_threads_[next_thread_].get();
+}
+
+AcceleratedPresenterMap::AcceleratedPresenterMap() {
+}
+
+scoped_refptr<AcceleratedPresenter> AcceleratedPresenterMap::CreatePresenter(
+ gfx::NativeWindow window) {
+ scoped_refptr<AcceleratedPresenter> presenter(
+ new AcceleratedPresenter(window));
+
+ base::AutoLock locked(lock_);
+ DCHECK(presenters_.find(window) == presenters_.end());
+ presenters_[window] = presenter.get();
+
+ return presenter;
+}
+
+void AcceleratedPresenterMap::RemovePresenter(
+ const scoped_refptr<AcceleratedPresenter>& presenter) {
+ base::AutoLock locked(lock_);
+ for (PresenterMap::iterator it = presenters_.begin();
+ it != presenters_.end();
+ ++it) {
+ if (it->second == presenter.get()) {
+ presenters_.erase(it);
+ return;
+ }
+ }
+
+ NOTREACHED();
+}
+
+scoped_refptr<AcceleratedPresenter> AcceleratedPresenterMap::GetPresenter(
+ gfx::NativeWindow window) {
+ base::AutoLock locked(lock_);
+ PresenterMap::iterator it = presenters_.find(window);
+ if (it == presenters_.end())
+ return scoped_refptr<AcceleratedPresenter>();
+
+ return it->second;
+}
+
+AcceleratedPresenter::AcceleratedPresenter(gfx::NativeWindow window)
+ : present_thread_(g_present_thread_pool.Pointer()->NextThread()),
+ window_(window),
+ event_(false, false) {
+}
+
+scoped_refptr<AcceleratedPresenter> AcceleratedPresenter::GetForWindow(
+ gfx::NativeWindow window) {
+ return g_accelerated_presenter_map.Pointer()->GetPresenter(window);
+}
+
+void AcceleratedPresenter::AsyncPresentAndAcknowledge(
+ const gfx::Size& size,
+ int64 surface_handle,
+ const base::Callback<void(bool)>& completion_task) {
+ if (!surface_handle) {
+ completion_task.Run(true);
+ return;
+ }
+
+ present_thread_->message_loop()->PostTask(
+ FROM_HERE,
+ base::Bind(&AcceleratedPresenter::DoPresentAndAcknowledge,
+ this,
+ size,
+ surface_handle,
+ completion_task));
+}
+
+bool AcceleratedPresenter::Present() {
+ TRACE_EVENT0("surface", "Present");
+
+ bool result;
+
+ present_thread_->message_loop()->PostTask(
+ FROM_HERE,
+ base::Bind(&AcceleratedPresenter::DoPresent,
+ this,
+ &result));
+ // http://crbug.com/125391
+ base::ThreadRestrictions::ScopedAllowWait allow_wait;
+ event_.Wait();
+ return result;
+}
+
+void AcceleratedPresenter::DoPresent(bool* result)
+{
+ *result = DoRealPresent();
+ event_.Signal();
+}
+
+bool AcceleratedPresenter::DoRealPresent()
+{
+ TRACE_EVENT0("surface", "DoRealPresent");
+ HRESULT hr;
+
+ base::AutoLock locked(lock_);
+
+ // Signal the caller to recomposite if the presenter has been suspended or no
+ // surface has ever been presented.
+ if (!swap_chain_)
+ return false;
+
+ // If invalidated, do nothing. The window is gone.
+ if (!window_)
+ return true;
+
+ RECT rect = {
+ 0, 0,
+ size_.width(), size_.height()
+ };
+
+ {
+ TRACE_EVENT0("surface", "PresentEx");
+ hr = swap_chain_->Present(&rect,
+ &rect,
+ window_,
+ NULL,
+ D3DPRESENT_INTERVAL_IMMEDIATE);
+ if (FAILED(hr))
+ return false;
+ }
+
+ return true;
+}
+
+bool AcceleratedPresenter::CopyTo(const gfx::Size& size, void* buf) {
+ base::AutoLock locked(lock_);
+
+ if (!swap_chain_)
+ return false;
+
+ base::win::ScopedComPtr<IDirect3DSurface9> back_buffer;
+ HRESULT hr = swap_chain_->GetBackBuffer(0,
+ D3DBACKBUFFER_TYPE_MONO,
+ back_buffer.Receive());
+ if (FAILED(hr))
+ return false;
+
+ D3DSURFACE_DESC desc;
+ hr = back_buffer->GetDesc(&desc);
+ if (FAILED(hr))
+ return false;
+
+ const gfx::Size back_buffer_size(desc.Width, desc.Height);
+ if (back_buffer_size.IsEmpty())
+ return false;
+
+ // Set up intermediate buffers needed for downsampling.
+ const int resample_count =
+ GetResampleCount(gfx::Size(desc.Width, desc.Height), size);
+ base::win::ScopedComPtr<IDirect3DSurface9> final_surface;
+ base::win::ScopedComPtr<IDirect3DSurface9> temp_buffer[2];
+ if (resample_count == 0)
+ final_surface = back_buffer;
+ if (resample_count > 0) {
+ if (!CreateTemporarySurface(present_thread_->device(),
+ size,
+ final_surface.Receive()))
+ return false;
+ }
+ const gfx::Size half_size = GetHalfSizeNoLessThan(back_buffer_size, size);
+ if (resample_count > 1) {
+ if (!CreateTemporarySurface(present_thread_->device(),
+ half_size,
+ temp_buffer[0].Receive()))
+ return false;
+ }
+ if (resample_count > 2) {
+ const gfx::Size quarter_size = GetHalfSizeNoLessThan(half_size, size);
+ if (!CreateTemporarySurface(present_thread_->device(),
+ quarter_size,
+ temp_buffer[1].Receive()))
+ return false;
+ }
+
+ // Repeat downsampling the surface until its size becomes identical to
+ // |size|. We keep the factor of each downsampling no more than two because
+ // using a factor more than two can introduce aliasing.
+ gfx::Size read_size = back_buffer_size;
+ gfx::Size write_size = half_size;
+ int read_buffer_index = 1;
+ int write_buffer_index = 0;
+ for (int i = 0; i < resample_count; ++i) {
+ base::win::ScopedComPtr<IDirect3DSurface9> read_buffer =
+ (i == 0) ? back_buffer : temp_buffer[read_buffer_index];
+ base::win::ScopedComPtr<IDirect3DSurface9> write_buffer =
+ (i == resample_count - 1) ? final_surface :
+ temp_buffer[write_buffer_index];
+ RECT read_rect = {0, 0, read_size.width(), read_size.height()};
+ RECT write_rect = {0, 0, write_size.width(), write_size.height()};
+ hr = present_thread_->device()->StretchRect(read_buffer,
+ &read_rect,
+ write_buffer,
+ &write_rect,
+ D3DTEXF_LINEAR);
+ if (FAILED(hr))
+ return false;
+ read_size = write_size;
+ write_size = GetHalfSizeNoLessThan(write_size, size);
+ std::swap(read_buffer_index, write_buffer_index);
+ }
+
+ DCHECK(size == read_size);
+
+ base::win::ScopedComPtr<IDirect3DSurface9> temp_surface;
+ HANDLE handle = reinterpret_cast<HANDLE>(buf);
+ hr = present_thread_->device()->CreateOffscreenPlainSurface(
+ size.width(),
+ size.height(),
+ D3DFMT_A8R8G8B8,
+ D3DPOOL_SYSTEMMEM,
+ temp_surface.Receive(),
+ &handle);
+ if (FAILED(hr))
+ return false;
+
+ // Copy the data in the temporary buffer to the surface backed by |buf|.
+ hr = present_thread_->device()->GetRenderTargetData(final_surface,
+ temp_surface);
+ if (FAILED(hr))
+ return false;
+
+ return true;
+}
+
+void AcceleratedPresenter::Suspend() {
+ present_thread_->message_loop()->PostTask(
+ FROM_HERE,
+ base::Bind(&AcceleratedPresenter::DoSuspend,
+ this));
+}
+
+void AcceleratedPresenter::ReleaseSurface() {
+ present_thread_->message_loop()->PostTask(
+ FROM_HERE,
+ base::Bind(&AcceleratedPresenter::DoReleaseSurface,
+ this));
+}
+
+void AcceleratedPresenter::Invalidate() {
+ // Make any pending or future presentation tasks do nothing. Once the last
+ // last pending task has been ignored, the reference count on the presenter
+ // will go to zero and the presenter, and potentially also the present thread
+ // it has a reference count on, will be destroyed.
+ base::AutoLock locked(lock_);
+ window_ = NULL;
+}
+
+AcceleratedPresenter::~AcceleratedPresenter() {
+}
+
+static base::TimeDelta GetSwapDelay() {
+ CommandLine* cmd_line = CommandLine::ForCurrentProcess();
+ int delay = 0;
+ if (cmd_line->HasSwitch(switches::kGpuSwapDelay)) {
+ base::StringToInt(cmd_line->GetSwitchValueNative(
+ switches::kGpuSwapDelay).c_str(), &delay);
+ }
+ return base::TimeDelta::FromMilliseconds(delay);
+}
+
+void AcceleratedPresenter::DoPresentAndAcknowledge(
+ const gfx::Size& size,
+ int64 surface_handle,
+ const base::Callback<void(bool)>& completion_task) {
+ TRACE_EVENT1(
+ "surface", "DoPresentAndAcknowledge", "surface_handle", surface_handle);
+
+ HRESULT hr;
+
+ base::AutoLock locked(lock_);
+
+ // Initialize the device lazily since calling Direct3D can crash bots.
+ present_thread_->InitDevice();
+
+ if (!present_thread_->device()) {
+ if (!completion_task.is_null())
+ completion_task.Run(false);
+ return;
+ }
+
+ // Ensure the task is always run and while the lock is taken.
+ base::ScopedClosureRunner scoped_completion_runner(base::Bind(completion_task,
+ true));
+
+ // If invalidated, do nothing, the window is gone.
+ if (!window_)
+ return;
+
+ // Round up size so the swap chain is not continuously resized with the
+ // surface, which could lead to memory fragmentation.
+ const int kRound = 64;
+ gfx::Size quantized_size(
+ std::max(1, (size.width() + kRound - 1) / kRound * kRound),
+ std::max(1, (size.height() + kRound - 1) / kRound * kRound));
+
+ // Ensure the swap chain exists and is the same size (rounded up) as the
+ // surface to be presented.
+ if (!swap_chain_ || size_ != quantized_size) {
+ TRACE_EVENT0("surface", "CreateAdditionalSwapChain");
+ size_ = quantized_size;
+
+ D3DPRESENT_PARAMETERS parameters = { 0 };
+ parameters.BackBufferWidth = quantized_size.width();
+ parameters.BackBufferHeight = quantized_size.height();
+ parameters.BackBufferCount = 1;
+ parameters.BackBufferFormat = D3DFMT_A8R8G8B8;
+ parameters.hDeviceWindow = GetShellWindow();
+ parameters.Windowed = TRUE;
+ parameters.Flags = 0;
+ parameters.PresentationInterval = GetPresentationInterval();
+ parameters.SwapEffect = D3DSWAPEFFECT_COPY;
+
+ swap_chain_ = NULL;
+ HRESULT hr = present_thread_->device()->CreateAdditionalSwapChain(
+ &parameters,
+ swap_chain_.Receive());
+ if (FAILED(hr))
+ return;
+ }
+
+ if (!source_texture_.get()) {
+ TRACE_EVENT0("surface", "CreateTexture");
+ HANDLE handle = reinterpret_cast<HANDLE>(surface_handle);
+ hr = present_thread_->device()->CreateTexture(size.width(),
+ size.height(),
+ 1,
+ D3DUSAGE_RENDERTARGET,
+ D3DFMT_A8R8G8B8,
+ D3DPOOL_DEFAULT,
+ source_texture_.Receive(),
+ &handle);
+ if (FAILED(hr))
+ return;
+ }
+
+ base::win::ScopedComPtr<IDirect3DSurface9> source_surface;
+ hr = source_texture_->GetSurfaceLevel(0, source_surface.Receive());
+ if (FAILED(hr))
+ return;
+
+ base::win::ScopedComPtr<IDirect3DSurface9> dest_surface;
+ hr = swap_chain_->GetBackBuffer(0,
+ D3DBACKBUFFER_TYPE_MONO,
+ dest_surface.Receive());
+ if (FAILED(hr))
+ return;
+
+ RECT rect = {
+ 0, 0,
+ size.width(), size.height()
+ };
+
+ {
+ TRACE_EVENT0("surface", "StretchRect");
+ hr = present_thread_->device()->StretchRect(source_surface,
+ &rect,
+ dest_surface,
+ &rect,
+ D3DTEXF_NONE);
+ if (FAILED(hr))
+ return;
+ }
+
+ hr = present_thread_->query()->Issue(D3DISSUE_END);
+ if (FAILED(hr))
+ return;
+
+ // Flush so the StretchRect can be processed by the GPU while the window is
+ // being resized.
+ present_thread_->query()->GetData(NULL, 0, D3DGETDATA_FLUSH);
+
+ ::SetWindowPos(
+ window_,
+ NULL,
+ 0, 0,
+ size.width(), size.height(),
+ SWP_NOACTIVATE | SWP_NOCOPYBITS | SWP_NOMOVE |SWP_NOOWNERZORDER |
+ SWP_NOREDRAW | SWP_NOSENDCHANGING | SWP_NOSENDCHANGING |
+ SWP_ASYNCWINDOWPOS | SWP_NOZORDER);
+
+ // Wait for the StretchRect to complete before notifying the GPU process
+ // that it is safe to write to its backing store again.
+ {
+ TRACE_EVENT0("surface", "spin");
+ do {
+ hr = present_thread_->query()->GetData(NULL, 0, D3DGETDATA_FLUSH);
+
+ if (hr == S_FALSE)
+ Sleep(1);
+ } while (hr == S_FALSE);
+ }
+
+ static const base::TimeDelta swap_delay = GetSwapDelay();
+ if (swap_delay.ToInternalValue())
+ base::PlatformThread::Sleep(swap_delay);
+
+ {
+ TRACE_EVENT0("surface", "Present");
+ hr = swap_chain_->Present(&rect, &rect, window_, NULL, 0);
+ if (FAILED(hr) &&
+ FAILED(present_thread_->device()->CheckDeviceState(window_))) {
+ present_thread_->ResetDevice();
+ }
+ }
+}
+
+void AcceleratedPresenter::DoSuspend() {
+ base::AutoLock locked(lock_);
+ swap_chain_ = NULL;
+}
+
+void AcceleratedPresenter::DoReleaseSurface() {
+ base::AutoLock locked(lock_);
+ source_texture_.Release();
+}
+
+AcceleratedSurface::AcceleratedSurface(gfx::NativeWindow window)
+ : presenter_(g_accelerated_presenter_map.Pointer()->CreatePresenter(
+ window)) {
+}
+
+AcceleratedSurface::~AcceleratedSurface() {
+ g_accelerated_presenter_map.Pointer()->RemovePresenter(presenter_);
+ presenter_->Invalidate();
+}
+
+bool AcceleratedSurface::Present() {
+ return presenter_->Present();
+}
+
+bool AcceleratedSurface::CopyTo(const gfx::Size& size, void* buf) {
+ return presenter_->CopyTo(size, buf);
+}
+
+void AcceleratedSurface::Suspend() {
+ presenter_->Suspend();
+}
diff --git a/ui/surface/accelerated_surface_win.h b/ui/surface/accelerated_surface_win.h
new file mode 100644
index 0000000..967b258
--- /dev/null
+++ b/ui/surface/accelerated_surface_win.h
@@ -0,0 +1,124 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef UI_SURFACE_ACCELERATED_SURFACE_WIN_H_
+#define UI_SURFACE_ACCELERATED_SURFACE_WIN_H_
+#pragma once
+
+#include <d3d9.h>
+
+#include "base/callback_forward.h"
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/lock.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/win/scoped_comptr.h"
+#include "ui/gfx/native_widget_types.h"
+#include "ui/gfx/size.h"
+#include "ui/surface/surface_export.h"
+
+class PresentThread;
+
+class SURFACE_EXPORT AcceleratedPresenter
+ : public base::RefCountedThreadSafe<AcceleratedPresenter> {
+ public:
+ typedef base::Callback<void(bool)> CompletionTaskl;
+
+ explicit AcceleratedPresenter(gfx::NativeWindow window);
+
+ // Returns a thread safe reference to the presenter for the given window or
+ // null is no such presenter exists. The thread safe refptr ensures the
+ // presenter will not be destroyed. This can be called on any thread.
+ static scoped_refptr<AcceleratedPresenter> GetForWindow(
+ gfx::NativeWindow window);
+
+ // Schedule a frame to be presented. The completion callback will be invoked
+ // when it is safe to write to the surface on another thread. The lock for
+ // this surface will be held while the completion callback runs. This can be
+ // called on any thread.
+ void AsyncPresentAndAcknowledge(
+ const gfx::Size& size,
+ int64 surface_handle,
+ const base::Callback<void(bool)>& completion_task);
+
+ // Schedule the presenter to free all its resources. This can be called on any
+ // thread.
+ void Suspend();
+
+ // Schedule the presenter to release its reference to the shared surface.
+ void ReleaseSurface();
+
+ // The public member functions are called on the main thread.
+ bool Present();
+ bool CopyTo(const gfx::Size& size, void* buf);
+ void Invalidate();
+
+ private:
+ friend class base::RefCountedThreadSafe<AcceleratedPresenter>;
+
+ ~AcceleratedPresenter();
+
+ // These member functions are called on the PresentThread with which the
+ // presenter has affinity.
+ void DoPresentAndAcknowledge(
+ const gfx::Size& size,
+ int64 surface_handle,
+ const base::Callback<void(bool)>& completion_task);
+ void DoSuspend();
+ void DoPresent(bool* presented);
+ bool DoRealPresent();
+ void DoReleaseSurface();
+
+ // The thread with which this presenter has affinity.
+ PresentThread* const present_thread_;
+
+ // The window that is presented to.
+ gfx::NativeWindow window_;
+
+ // The lock is taken while any thread is calling the object, except those that
+ // simply post from the main thread to the present thread via the immutable
+ // present_thread_ member.
+ base::Lock lock_;
+
+ // UI thread can wait on this event to ensure a present is finished.
+ base::WaitableEvent event_;
+
+ // The current size of the swap chain. This is only accessed on the thread
+ // with which the surface has affinity.
+ gfx::Size size_;
+
+ // This is a shared texture that is being presented from.
+ base::win::ScopedComPtr<IDirect3DTexture9> source_texture_;
+
+ // The swap chain is presented to the child window. Copy semantics
+ // are used so it is possible to represent it to quickly validate the window.
+ base::win::ScopedComPtr<IDirect3DSwapChain9> swap_chain_;
+
+ DISALLOW_COPY_AND_ASSIGN(AcceleratedPresenter);
+};
+
+class SURFACE_EXPORT AcceleratedSurface {
+ public:
+ AcceleratedSurface(gfx::NativeWindow window);
+ ~AcceleratedSurface();
+
+ // Synchronously present a frame with no acknowledgement.
+ bool Present();
+
+ // Copies the surface data to |buf|. The image data is transformed so that it
+ // fits in |size|.
+ // Caller must ensure that |buf| is allocated with the size no less than
+ // |4 * size.width() * size.height()| bytes.
+ bool CopyTo(const gfx::Size& size, void* buf);
+
+ // Temporarily release resources until a new surface is asynchronously
+ // presented. Present will not be able to represent the last surface after
+ // calling this and will return false.
+ void Suspend();
+
+ private:
+ const scoped_refptr<AcceleratedPresenter> presenter_;
+ DISALLOW_COPY_AND_ASSIGN(AcceleratedSurface);
+};
+
+#endif // UI_SURFACE_ACCELERATED_SURFACE_WIN_H_
diff --git a/ui/surface/io_surface_support_mac.cc b/ui/surface/io_surface_support_mac.cc
new file mode 100644
index 0000000..1b2c324
--- /dev/null
+++ b/ui/surface/io_surface_support_mac.cc
@@ -0,0 +1,270 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <dlfcn.h>
+
+#include "base/memory/singleton.h"
+#include "ui/surface/io_surface_support_mac.h"
+
+typedef CFTypeRef (*IOSurfaceCreateProcPtr)(CFDictionaryRef properties);
+typedef uint32 (*IOSurfaceGetIDProcPtr)(CFTypeRef io_surface);
+typedef CFTypeRef (*IOSurfaceLookupProcPtr)(uint32 io_surface_id);
+typedef mach_port_t (*IOSurfaceCreateMachPortProcPtr)(CFTypeRef io_surface);
+typedef CFTypeRef (*IOSurfaceLookupFromMachPortProcPtr)(mach_port_t port);
+typedef size_t (*IOSurfaceGetWidthPtr)(CFTypeRef io_surface);
+typedef size_t (*IOSurfaceGetHeightPtr)(CFTypeRef io_surface);
+typedef CGLError (*CGLTexImageIOSurface2DProcPtr)(CGLContextObj ctx,
+ GLenum target,
+ GLenum internal_format,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ CFTypeRef io_surface,
+ GLuint plane);
+
+class IOSurfaceSupportImpl : public IOSurfaceSupport {
+ public:
+ static IOSurfaceSupportImpl* GetInstance();
+
+ bool InitializedSuccessfully() {
+ return initialized_successfully_;
+ }
+
+ virtual CFStringRef GetKIOSurfaceWidth();
+ virtual CFStringRef GetKIOSurfaceHeight();
+ virtual CFStringRef GetKIOSurfaceBytesPerElement();
+ virtual CFStringRef GetKIOSurfaceIsGlobal();
+
+ virtual CFTypeRef IOSurfaceCreate(CFDictionaryRef properties);
+ virtual uint32 IOSurfaceGetID(CFTypeRef io_surface);
+ virtual CFTypeRef IOSurfaceLookup(uint32 io_surface_id);
+ virtual mach_port_t IOSurfaceCreateMachPort(CFTypeRef io_surface);
+ virtual CFTypeRef IOSurfaceLookupFromMachPort(mach_port_t port);
+
+ virtual size_t IOSurfaceGetWidth(CFTypeRef io_surface);
+ virtual size_t IOSurfaceGetHeight(CFTypeRef io_surface);
+
+ virtual CGLError CGLTexImageIOSurface2D(CGLContextObj ctx,
+ GLenum target,
+ GLenum internal_format,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ CFTypeRef io_surface,
+ GLuint plane);
+
+ private:
+ IOSurfaceSupportImpl();
+ ~IOSurfaceSupportImpl();
+
+ void* iosurface_handle_;
+ void* opengl_handle_;
+ CFStringRef k_io_surface_width_;
+ CFStringRef k_io_surface_height_;
+ CFStringRef k_io_surface_bytes_per_element_;
+ CFStringRef k_io_surface_is_global_;
+ IOSurfaceCreateProcPtr io_surface_create_;
+ IOSurfaceGetIDProcPtr io_surface_get_id_;
+ IOSurfaceLookupProcPtr io_surface_lookup_;
+ IOSurfaceCreateMachPortProcPtr io_surface_create_mach_port_;
+ IOSurfaceLookupFromMachPortProcPtr io_surface_lookup_from_mach_port_;
+ IOSurfaceGetWidthPtr io_surface_get_width_;
+ IOSurfaceGetHeightPtr io_surface_get_height_;
+ CGLTexImageIOSurface2DProcPtr cgl_tex_image_io_surface_2d_;
+ bool initialized_successfully_;
+
+ friend struct DefaultSingletonTraits<IOSurfaceSupportImpl>;
+ DISALLOW_COPY_AND_ASSIGN(IOSurfaceSupportImpl);
+};
+
+IOSurfaceSupportImpl* IOSurfaceSupportImpl::GetInstance() {
+ IOSurfaceSupportImpl* impl = Singleton<IOSurfaceSupportImpl>::get();
+ if (impl->InitializedSuccessfully())
+ return impl;
+ return NULL;
+}
+
+CFStringRef IOSurfaceSupportImpl::GetKIOSurfaceWidth() {
+ return k_io_surface_width_;
+}
+
+CFStringRef IOSurfaceSupportImpl::GetKIOSurfaceHeight() {
+ return k_io_surface_height_;
+}
+
+CFStringRef IOSurfaceSupportImpl::GetKIOSurfaceBytesPerElement() {
+ return k_io_surface_bytes_per_element_;
+}
+
+CFStringRef IOSurfaceSupportImpl::GetKIOSurfaceIsGlobal() {
+ return k_io_surface_is_global_;
+}
+
+CFTypeRef IOSurfaceSupportImpl::IOSurfaceCreate(CFDictionaryRef properties) {
+ return io_surface_create_(properties);
+}
+
+uint32 IOSurfaceSupportImpl::IOSurfaceGetID(
+ CFTypeRef io_surface) {
+ return io_surface_get_id_(io_surface);
+}
+
+CFTypeRef IOSurfaceSupportImpl::IOSurfaceLookup(uint32 io_surface_id) {
+ return io_surface_lookup_(io_surface_id);
+}
+
+mach_port_t IOSurfaceSupportImpl::IOSurfaceCreateMachPort(
+ CFTypeRef io_surface) {
+ return io_surface_create_mach_port_(io_surface);
+}
+
+CFTypeRef IOSurfaceSupportImpl::IOSurfaceLookupFromMachPort(mach_port_t port) {
+ return io_surface_lookup_from_mach_port_(port);
+}
+
+size_t IOSurfaceSupportImpl::IOSurfaceGetWidth(CFTypeRef io_surface) {
+ return io_surface_get_width_(io_surface);
+}
+
+size_t IOSurfaceSupportImpl::IOSurfaceGetHeight(CFTypeRef io_surface) {
+ return io_surface_get_height_(io_surface);
+}
+
+
+CGLError IOSurfaceSupportImpl::CGLTexImageIOSurface2D(CGLContextObj ctx,
+ GLenum target,
+ GLenum internal_format,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ CFTypeRef io_surface,
+ GLuint plane) {
+ return cgl_tex_image_io_surface_2d_(ctx,
+ target,
+ internal_format,
+ width,
+ height,
+ format,
+ type,
+ io_surface,
+ plane);
+}
+
+IOSurfaceSupportImpl::IOSurfaceSupportImpl()
+ : iosurface_handle_(NULL),
+ opengl_handle_(NULL),
+ k_io_surface_width_(NULL),
+ k_io_surface_height_(NULL),
+ k_io_surface_bytes_per_element_(NULL),
+ k_io_surface_is_global_(NULL),
+ io_surface_create_(NULL),
+ io_surface_get_id_(NULL),
+ io_surface_lookup_(NULL),
+ io_surface_create_mach_port_(NULL),
+ io_surface_lookup_from_mach_port_(NULL),
+ io_surface_get_width_(NULL),
+ io_surface_get_height_(NULL),
+ cgl_tex_image_io_surface_2d_(NULL),
+ initialized_successfully_(false) {
+ iosurface_handle_ = dlopen(
+ "/System/Library/Frameworks/IOSurface.framework/IOSurface",
+ RTLD_LAZY | RTLD_LOCAL);
+ if (!iosurface_handle_)
+ return;
+ opengl_handle_ = dlopen(
+ "/System/Library/Frameworks/OpenGL.framework/OpenGL",
+ RTLD_LAZY | RTLD_LOCAL);
+ if (!opengl_handle_) {
+ dlclose(iosurface_handle_);
+ iosurface_handle_ = NULL;
+ return;
+ }
+
+ void* surface_width_ptr = dlsym(iosurface_handle_, "kIOSurfaceWidth");
+ void* surface_height_ptr = dlsym(iosurface_handle_, "kIOSurfaceHeight");
+ void* surface_bytes_per_element_ptr =
+ dlsym(iosurface_handle_, "kIOSurfaceBytesPerElement");
+ void* surface_is_global_ptr =
+ dlsym(iosurface_handle_, "kIOSurfaceIsGlobal");
+ void* surface_create_ptr = dlsym(iosurface_handle_, "IOSurfaceCreate");
+ void* surface_get_id_ptr = dlsym(iosurface_handle_, "IOSurfaceGetID");
+ void* surface_lookup_ptr = dlsym(iosurface_handle_, "IOSurfaceLookup");
+ void* surface_create_mach_port_ptr =
+ dlsym(iosurface_handle_, "IOSurfaceCreateMachPort");
+ void* surface_lookup_from_mach_port_ptr =
+ dlsym(iosurface_handle_, "IOSurfaceLookupFromMachPort");
+ void* io_surface_get_width_ptr =
+ dlsym(iosurface_handle_, "IOSurfaceGetWidth");
+ void* io_surface_get_height_ptr =
+ dlsym(iosurface_handle_, "IOSurfaceGetHeight");
+ void* tex_image_io_surface_2d_ptr =
+ dlsym(opengl_handle_, "CGLTexImageIOSurface2D");
+ if (!surface_width_ptr ||
+ !surface_height_ptr ||
+ !surface_bytes_per_element_ptr ||
+ !surface_is_global_ptr ||
+ !surface_create_ptr ||
+ !surface_get_id_ptr ||
+ !surface_lookup_ptr ||
+ !surface_create_mach_port_ptr ||
+ !surface_lookup_from_mach_port_ptr ||
+ !io_surface_get_width_ptr ||
+ !io_surface_get_height_ptr ||
+ !tex_image_io_surface_2d_ptr) {
+ dlclose(iosurface_handle_);
+ iosurface_handle_ = NULL;
+ dlclose(opengl_handle_);
+ opengl_handle_ = NULL;
+ return;
+ }
+
+ k_io_surface_width_ = *static_cast<CFStringRef*>(surface_width_ptr);
+ k_io_surface_height_ = *static_cast<CFStringRef*>(surface_height_ptr);
+ k_io_surface_bytes_per_element_ =
+ *static_cast<CFStringRef*>(surface_bytes_per_element_ptr);
+ k_io_surface_is_global_ = *static_cast<CFStringRef*>(surface_is_global_ptr);
+ io_surface_create_ = reinterpret_cast<IOSurfaceCreateProcPtr>(
+ surface_create_ptr);
+ io_surface_get_id_ =
+ reinterpret_cast<IOSurfaceGetIDProcPtr>(surface_get_id_ptr);
+ io_surface_lookup_ =
+ reinterpret_cast<IOSurfaceLookupProcPtr>(surface_lookup_ptr);
+ io_surface_create_mach_port_ =
+ reinterpret_cast<IOSurfaceCreateMachPortProcPtr>(
+ surface_create_mach_port_ptr);
+ io_surface_lookup_from_mach_port_ =
+ reinterpret_cast<IOSurfaceLookupFromMachPortProcPtr>(
+ surface_lookup_from_mach_port_ptr);
+ io_surface_get_width_ =
+ reinterpret_cast<IOSurfaceGetWidthPtr>(
+ io_surface_get_width_ptr);
+ io_surface_get_height_ =
+ reinterpret_cast<IOSurfaceGetHeightPtr>(
+ io_surface_get_height_ptr);
+ cgl_tex_image_io_surface_2d_ =
+ reinterpret_cast<CGLTexImageIOSurface2DProcPtr>(
+ tex_image_io_surface_2d_ptr);
+ initialized_successfully_ = true;
+}
+
+IOSurfaceSupportImpl::~IOSurfaceSupportImpl() {
+ if (iosurface_handle_)
+ dlclose(iosurface_handle_);
+ if (opengl_handle_)
+ dlclose(opengl_handle_);
+}
+
+IOSurfaceSupport* IOSurfaceSupport::Initialize() {
+ return IOSurfaceSupportImpl::GetInstance();
+}
+
+IOSurfaceSupport::IOSurfaceSupport() {
+}
+
+IOSurfaceSupport::~IOSurfaceSupport() {
+}
+
diff --git a/ui/surface/io_surface_support_mac.h b/ui/surface/io_surface_support_mac.h
new file mode 100644
index 0000000..e1d78ef
--- /dev/null
+++ b/ui/surface/io_surface_support_mac.h
@@ -0,0 +1,70 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef UI_SURFACE_IO_SURFACE_SUPPORT_MAC_H_
+#define UI_SURFACE_IO_SURFACE_SUPPORT_MAC_H_
+#pragma once
+
+#include <CoreFoundation/CoreFoundation.h>
+#include <mach/mach.h>
+#include <OpenGL/OpenGL.h>
+
+#include "base/basictypes.h"
+#include "ui/surface/surface_export.h"
+
+// This Mac OS X-specific class provides dynamically-linked access to
+// IOSurface.framework, which is only available on 10.6 and later.
+// Since Chromium is built on 10.5 we must dynamically look up all of
+// the entry points we need in this framework.
+
+// See IOSurface/IOSurfaceAPI.h and OpenGL/CGLIOSurface.h on 10.6 for
+// documentation of the fields and methods of this class.
+
+class SURFACE_EXPORT IOSurfaceSupport {
+ public:
+ // Returns an instance of the IOSurfaceSupport class if the
+ // operating system supports it, NULL otherwise. It is safe to call
+ // this multiple times.
+ static IOSurfaceSupport* Initialize();
+
+ virtual CFStringRef GetKIOSurfaceWidth() = 0;
+ virtual CFStringRef GetKIOSurfaceHeight() = 0;
+ virtual CFStringRef GetKIOSurfaceBytesPerElement() = 0;
+ virtual CFStringRef GetKIOSurfaceIsGlobal() = 0;
+
+ virtual CFTypeRef IOSurfaceCreate(CFDictionaryRef properties) = 0;
+
+ // The following two APIs assume the IOSurface was created with the
+ // kIOSurfaceIsGlobal key set to true
+ virtual uint32 IOSurfaceGetID(CFTypeRef io_surface) = 0;
+ virtual CFTypeRef IOSurfaceLookup(uint32 io_surface_id) = 0;
+
+ // The following two APIs are more robust and secure, but
+ // unfortunately it looks like it will be a lot of work to correctly
+ // transmit a mach port from process to process (possibly requiring
+ // a side channel for or extension of the Chrome IPC mechanism)
+ virtual mach_port_t IOSurfaceCreateMachPort(CFTypeRef io_surface) = 0;
+ virtual CFTypeRef IOSurfaceLookupFromMachPort(mach_port_t port) = 0;
+
+ virtual size_t IOSurfaceGetWidth(CFTypeRef io_surface) = 0;
+ virtual size_t IOSurfaceGetHeight(CFTypeRef io_surface) = 0;
+
+ virtual CGLError CGLTexImageIOSurface2D(CGLContextObj ctx,
+ GLenum target,
+ GLenum internal_format,
+ GLsizei width,
+ GLsizei height,
+ GLenum format,
+ GLenum type,
+ CFTypeRef io_surface,
+ GLuint plane) = 0;
+
+ protected:
+ IOSurfaceSupport();
+ virtual ~IOSurfaceSupport();
+
+ DISALLOW_COPY_AND_ASSIGN(IOSurfaceSupport);
+};
+
+#endif // UI_SURFACE_IO_SURFACE_SUPPORT_MAC_H_
diff --git a/ui/surface/surface.gyp b/ui/surface/surface.gyp
new file mode 100644
index 0000000..af52fef
--- /dev/null
+++ b/ui/surface/surface.gyp
@@ -0,0 +1,57 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'chromium_code': 1,
+ },
+
+ 'target_defaults': {
+ 'conditions': [
+ ['use_x11 == 1', {
+ 'include_dirs': [
+ '<(DEPTH)/third_party/angle/include',
+ ],
+ }],
+ ],
+ },
+ 'targets': [
+ {
+ 'target_name': 'surface',
+ 'type': '<(component)',
+ 'dependencies': [
+ '<(DEPTH)/base/base.gyp:base',
+ '<(DEPTH)/base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
+ '<(DEPTH)/skia/skia.gyp:skia',
+ '<(DEPTH)/ui/gfx/gl/gl.gyp:gl',
+ '<(DEPTH)/ui/ui.gyp:ui',
+ ],
+ 'sources': [
+ 'accelerated_surface_mac.cc',
+ 'accelerated_surface_mac.h',
+ 'accelerated_surface_win.cc',
+ 'accelerated_surface_win.h',
+ 'io_surface_support_mac.cc',
+ 'io_surface_support_mac.h',
+ 'surface_export.h',
+ 'transport_dib.h',
+ 'transport_dib_android.cc',
+ 'transport_dib_linux.cc',
+ 'transport_dib_mac.cc',
+ 'transport_dib_win.cc',
+ ],
+ 'defines': [
+ 'SURFACE_IMPLEMENTATION',
+ ],
+ 'conditions': [
+ ['use_aura==1', {
+ 'sources/': [
+ ['exclude', 'accelerated_surface_win.cc'],
+ ['exclude', 'accelerated_surface_win.h'],
+ ],
+ }],
+ ],
+ },
+ ],
+}
diff --git a/ui/surface/surface_export.h b/ui/surface/surface_export.h
new file mode 100644
index 0000000..1b2164e
--- /dev/null
+++ b/ui/surface/surface_export.h
@@ -0,0 +1,26 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef UI_SURFACE_SURFACE_EXPORT_H_
+#define UI_SURFACE_SURFACE_EXPORT_H_
+#pragma once
+
+#if defined(COMPONENT_BUILD)
+#if defined(WIN32)
+
+#if defined(SURFACE_IMPLEMENTATION)
+#define SURFACE_EXPORT __declspec(dllexport)
+#else
+#define SURFACE_EXPORT __declspec(dllimport)
+#endif // defined(SURFACE_IMPLEMENTATION)
+
+#else // defined(WIN32)
+#define SURFACE_EXPORT __attribute__((visibility("default")))
+#endif
+
+#else // defined(COMPONENT_BUILD)
+#define SURFACE_EXPORT
+#endif
+
+#endif // UI_SURFACE_SURFACE_EXPORT_H_
diff --git a/ui/surface/transport_dib.h b/ui/surface/transport_dib.h
new file mode 100644
index 0000000..f323f1f
--- /dev/null
+++ b/ui/surface/transport_dib.h
@@ -0,0 +1,217 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef UI_SURFACE_TRANSPORT_DIB_H_
+#define UI_SURFACE_TRANSPORT_DIB_H_
+#pragma once
+
+#include "base/basictypes.h"
+#include "ui/surface/surface_export.h"
+
+#if defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_ANDROID)
+#include "base/shared_memory.h"
+#endif
+
+#if defined(OS_WIN)
+#include <windows.h>
+#elif defined(USE_X11)
+#include "ui/base/x/x11_util.h"
+#endif
+
+namespace skia {
+class PlatformCanvas;
+}
+
+// -----------------------------------------------------------------------------
+// A TransportDIB is a block of memory that is used to transport pixels
+// between processes: from the renderer process to the browser, and
+// between renderer and plugin processes.
+// -----------------------------------------------------------------------------
+class SURFACE_EXPORT TransportDIB {
+ public:
+ ~TransportDIB();
+
+ // Two typedefs are defined. A Handle is the type which can be sent over
+ // the wire so that the remote side can map the transport DIB. The Id typedef
+ // is sufficient to identify the transport DIB when you know that the remote
+ // side already may have it mapped.
+#if defined(OS_WIN)
+ typedef HANDLE Handle;
+ // On Windows, the Id type includes a sequence number (epoch) to solve an ABA
+ // issue:
+ // 1) Process A creates a transport DIB with HANDLE=1 and sends to B.
+ // 2) Process B maps the transport DIB and caches 1 -> DIB.
+ // 3) Process A closes the transport DIB and creates a new one. The new DIB
+ // is also assigned HANDLE=1.
+ // 4) Process A sends the Handle to B, but B incorrectly believes that it
+ // already has it cached.
+ struct HandleAndSequenceNum {
+ HandleAndSequenceNum()
+ : handle(NULL),
+ sequence_num(0) {
+ }
+
+ HandleAndSequenceNum(HANDLE h, uint32 seq_num)
+ : handle(h),
+ sequence_num(seq_num) {
+ }
+
+ bool operator<(const HandleAndSequenceNum& other) const {
+ // Use the lexicographic order on the tuple <handle, sequence_num>.
+ if (other.handle != handle)
+ return other.handle < handle;
+ return other.sequence_num < sequence_num;
+ }
+
+ HANDLE handle;
+ uint32 sequence_num;
+ };
+ typedef HandleAndSequenceNum Id;
+
+ // Returns a default, invalid handle, that is meant to indicate a missing
+ // Transport DIB.
+ static Handle DefaultHandleValue() { return NULL; }
+
+ // Returns a value that is ONLY USEFUL FOR TESTS WHERE IT WON'T BE
+ // ACTUALLY USED AS A REAL HANDLE.
+ static Handle GetFakeHandleForTest() {
+ static int fake_handle = 10;
+ return reinterpret_cast<Handle>(fake_handle++);
+ }
+#elif defined(OS_MACOSX)
+ typedef base::SharedMemoryHandle Handle;
+ // On Mac, the inode number of the backing file is used as an id.
+ typedef base::SharedMemoryId Id;
+
+ // Returns a default, invalid handle, that is meant to indicate a missing
+ // Transport DIB.
+ static Handle DefaultHandleValue() { return Handle(); }
+
+ // Returns a value that is ONLY USEFUL FOR TESTS WHERE IT WON'T BE
+ // ACTUALLY USED AS A REAL HANDLE.
+ static Handle GetFakeHandleForTest() {
+ static int fake_handle = 10;
+ return Handle(fake_handle++, false);
+ }
+#elif defined(USE_X11)
+ typedef int Handle; // These two ints are SysV IPC shared memory keys
+ struct Id {
+ // Ensure that default initialized Ids are invalid.
+ Id() : shmkey(-1) {
+ }
+
+ bool operator<(const Id& other) const {
+ return shmkey < other.shmkey;
+ }
+
+ int shmkey;
+ };
+
+ // Returns a default, invalid handle, that is meant to indicate a missing
+ // Transport DIB.
+ static Handle DefaultHandleValue() { return -1; }
+
+ // Returns a value that is ONLY USEFUL FOR TESTS WHERE IT WON'T BE
+ // ACTUALLY USED AS A REAL HANDLE.
+ static Handle GetFakeHandleForTest() {
+ static int fake_handle = 10;
+ return fake_handle++;
+ }
+#elif defined(OS_ANDROID)
+ typedef base::SharedMemoryHandle Handle;
+ typedef base::SharedMemoryHandle Id;
+
+ // Returns a default, invalid handle, that is meant to indicate a missing
+ // Transport DIB.
+ static Handle DefaultHandleValue() { return Handle(); }
+
+ // Returns a value that is ONLY USEFUL FOR TESTS WHERE IT WON'T BE
+ // ACTUALLY USED AS A REAL HANDLE.
+ static Handle GetFakeHandleForTest() {
+ static int fake_handle = 10;
+ return Handle(fake_handle++, false);
+ }
+#endif
+
+ // Create a new TransportDIB, returning NULL on failure.
+ //
+ // The size is the minimum size in bytes of the memory backing the transport
+ // DIB (we may actually allocate more than that to give us better reuse when
+ // cached).
+ //
+ // The sequence number is used to uniquely identify the transport DIB. It
+ // should be unique for all transport DIBs ever created in the same
+ // renderer.
+ static TransportDIB* Create(size_t size, uint32 sequence_num);
+
+ // Map the referenced transport DIB. The caller owns the returned object.
+ // Returns NULL on failure.
+ static TransportDIB* Map(Handle transport_dib);
+
+ // Create a new |TransportDIB| with a handle to the shared memory. This
+ // always returns a valid pointer. The DIB is not mapped.
+ static TransportDIB* CreateWithHandle(Handle handle);
+
+ // Returns true if the handle is valid.
+ static bool is_valid_handle(Handle dib);
+
+ // Returns true if the ID refers to a valid dib.
+ static bool is_valid_id(Id id);
+
+ // Returns a canvas using the memory of this TransportDIB. The returned
+ // pointer will be owned by the caller. The bitmap will be of the given size,
+ // which should fit inside this memory.
+ //
+ // On POSIX, this |TransportDIB| will be mapped if not already. On Windows,
+ // this |TransportDIB| will NOT be mapped and should not be mapped prior,
+ // because PlatformCanvas will map the file internally.
+ //
+ // Will return NULL on allocation failure. This could be because the image
+ // is too large to map into the current process' address space.
+ skia::PlatformCanvas* GetPlatformCanvas(int w, int h);
+
+ // Map the DIB into the current process if it is not already. This is used to
+ // map a DIB that has already been created. Returns true if the DIB is mapped.
+ bool Map();
+
+ // Return a pointer to the shared memory.
+ void* memory() const;
+
+ // Return the maximum size of the shared memory. This is not the amount of
+ // data which is valid, you have to know that via other means, this is simply
+ // the maximum amount that /could/ be valid.
+ size_t size() const { return size_; }
+
+ // Return the identifier which can be used to refer to this shared memory
+ // on the wire.
+ Id id() const;
+
+ // Return a handle to the underlying shared memory. This can be sent over the
+ // wire to give this transport DIB to another process.
+ Handle handle() const;
+
+#if defined(USE_X11)
+ // Map the shared memory into the X server and return an id for the shared
+ // segment.
+ XID MapToX(Display* connection);
+#endif
+
+ private:
+ TransportDIB();
+#if defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_ANDROID)
+ explicit TransportDIB(base::SharedMemoryHandle dib);
+ base::SharedMemory shared_memory_;
+ uint32 sequence_num_;
+#elif defined(USE_X11)
+ Id key_; // SysV shared memory id
+ void* address_; // mapped address
+ XSharedMemoryId x_shm_; // X id for the shared segment
+ Display* display_; // connection to the X server
+#endif
+ size_t size_; // length, in bytes
+
+ DISALLOW_COPY_AND_ASSIGN(TransportDIB);
+};
+
+#endif // UI_SURFACE_TRANSPORT_DIB_H_
diff --git a/ui/surface/transport_dib_android.cc b/ui/surface/transport_dib_android.cc
new file mode 100644
index 0000000..54f8dc3
--- /dev/null
+++ b/ui/surface/transport_dib_android.cc
@@ -0,0 +1,101 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "ui/surface/transport_dib.h"
+
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include "base/eintr_wrapper.h"
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/shared_memory.h"
+#include "skia/ext/platform_canvas.h"
+
+TransportDIB::TransportDIB()
+ : size_(0) {
+}
+
+TransportDIB::TransportDIB(TransportDIB::Handle dib)
+ : shared_memory_(dib, false /* read write */),
+ size_(0) {
+}
+
+TransportDIB::~TransportDIB() {
+}
+
+// static
+TransportDIB* TransportDIB::Create(size_t size, uint32 sequence_num) {
+ TransportDIB* dib = new TransportDIB;
+ // We will use ashmem_get_size_region() to figure out the size in Map(size).
+ if (!dib->shared_memory_.CreateAndMapAnonymous(size)) {
+ delete dib;
+ return NULL;
+ }
+
+ dib->size_ = size;
+ return dib;
+}
+
+// static
+TransportDIB* TransportDIB::Map(Handle handle) {
+ scoped_ptr<TransportDIB> dib(CreateWithHandle(handle));
+ if (!dib->Map())
+ return NULL;
+ return dib.release();
+}
+
+// static
+TransportDIB* TransportDIB::CreateWithHandle(Handle handle) {
+ return new TransportDIB(handle);
+}
+
+// static
+bool TransportDIB::is_valid_handle(Handle dib) {
+ return dib.fd >= 0;
+}
+
+// static
+bool TransportDIB::is_valid_id(Id id) {
+ // Same as is_valid_handle().
+ return id.fd >= 0;
+}
+
+skia::PlatformCanvas* TransportDIB::GetPlatformCanvas(int w, int h) {
+ if (!memory() && !Map())
+ return NULL;
+ scoped_ptr<skia::PlatformCanvas> canvas(new skia::PlatformCanvas);
+ if (!canvas->initialize(w, h, true, reinterpret_cast<uint8_t*>(memory()))) {
+ // TODO(husky): Remove when http://b/issue?id=4233182 is definitely fixed.
+ LOG(ERROR) << "Failed to initialize canvas of size " << w << "x" << h;
+ return NULL;
+ }
+ return canvas.release();
+}
+
+bool TransportDIB::Map() {
+ if (!is_valid_handle(handle()))
+ return false;
+ // We will use ashmem_get_size_region() to figure out the size in Map(size).
+ if (!shared_memory_.Map(0))
+ return false;
+
+ // TODO: Note that using created_size() below is a hack. See the comment in
+ // SharedMemory::Map().
+ size_ = shared_memory_.created_size();
+ return true;
+}
+
+void* TransportDIB::memory() const {
+ return shared_memory_.memory();
+}
+
+TransportDIB::Id TransportDIB::id() const {
+ // Use FileDescriptor as id.
+ return shared_memory_.handle();
+}
+
+TransportDIB::Handle TransportDIB::handle() const {
+ return shared_memory_.handle();
+}
diff --git a/ui/surface/transport_dib_linux.cc b/ui/surface/transport_dib_linux.cc
new file mode 100644
index 0000000..ae407f9
--- /dev/null
+++ b/ui/surface/transport_dib_linux.cc
@@ -0,0 +1,141 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "ui/surface/transport_dib.h"
+
+#include <errno.h>
+#include <stdlib.h>
+#include <sys/ipc.h>
+#include <sys/shm.h>
+
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "skia/ext/platform_canvas.h"
+#include "ui/base/x/x11_util.h"
+#include "ui/gfx/size.h"
+
+// The shmat system call uses this as it's invalid return address
+static void *const kInvalidAddress = (void*) -1;
+
+TransportDIB::TransportDIB()
+ : address_(kInvalidAddress),
+ x_shm_(0),
+ display_(NULL),
+ size_(0) {
+}
+
+TransportDIB::~TransportDIB() {
+ if (address_ != kInvalidAddress) {
+ shmdt(address_);
+ address_ = kInvalidAddress;
+ }
+
+ if (x_shm_) {
+ DCHECK(display_);
+ ui::DetachSharedMemory(display_, x_shm_);
+ }
+}
+
+// static
+TransportDIB* TransportDIB::Create(size_t size, uint32 sequence_num) {
+ // We use a mode of 0666 since the X server won't attach to memory which is
+ // 0600 since it can't know if it (as a root process) is being asked to map
+ // someone else's private shared memory region.
+ const int shmkey = shmget(IPC_PRIVATE, size, 0666);
+ if (shmkey == -1) {
+ DLOG(ERROR) << "Failed to create SysV shared memory region"
+ << " errno:" << errno;
+ return NULL;
+ }
+
+ void* address = shmat(shmkey, NULL /* desired address */, 0 /* flags */);
+ // Here we mark the shared memory for deletion. Since we attached it in the
+ // line above, it doesn't actually get deleted but, if we crash, this means
+ // that the kernel will automatically clean it up for us.
+ shmctl(shmkey, IPC_RMID, 0);
+ if (address == kInvalidAddress)
+ return NULL;
+
+ TransportDIB* dib = new TransportDIB;
+
+ dib->key_.shmkey = shmkey;
+ dib->address_ = address;
+ dib->size_ = size;
+ return dib;
+}
+
+// static
+TransportDIB* TransportDIB::Map(Handle handle) {
+ scoped_ptr<TransportDIB> dib(CreateWithHandle(handle));
+ if (!dib->Map())
+ return NULL;
+ return dib.release();
+}
+
+// static
+TransportDIB* TransportDIB::CreateWithHandle(Handle shmkey) {
+ TransportDIB* dib = new TransportDIB;
+ dib->key_.shmkey = shmkey;
+ return dib;
+}
+
+// static
+bool TransportDIB::is_valid_handle(Handle dib) {
+ return dib >= 0;
+}
+
+// static
+bool TransportDIB::is_valid_id(Id id) {
+ return id.shmkey != -1;
+}
+
+skia::PlatformCanvas* TransportDIB::GetPlatformCanvas(int w, int h) {
+ if (address_ == kInvalidAddress && !Map())
+ return NULL;
+ scoped_ptr<skia::PlatformCanvas> canvas(new skia::PlatformCanvas);
+ if (!canvas->initialize(w, h, true, reinterpret_cast<uint8_t*>(memory())))
+ return NULL;
+ return canvas.release();
+}
+
+bool TransportDIB::Map() {
+ if (!is_valid_id(key_))
+ return false;
+ if (address_ != kInvalidAddress)
+ return true;
+
+ struct shmid_ds shmst;
+ if (shmctl(key_.shmkey, IPC_STAT, &shmst) == -1)
+ return false;
+
+ void* address = shmat(key_.shmkey, NULL /* desired address */, 0 /* flags */);
+ if (address == kInvalidAddress)
+ return false;
+
+ address_ = address;
+ size_ = shmst.shm_segsz;
+ return true;
+}
+
+void* TransportDIB::memory() const {
+ DCHECK_NE(address_, kInvalidAddress);
+ return address_;
+}
+
+TransportDIB::Id TransportDIB::id() const {
+ return key_;
+}
+
+TransportDIB::Handle TransportDIB::handle() const {
+ return key_.shmkey;
+}
+
+XID TransportDIB::MapToX(Display* display) {
+ if (!x_shm_) {
+ x_shm_ = ui::AttachSharedMemory(display, key_.shmkey);
+ display_ = display;
+ }
+
+ return x_shm_;
+}
diff --git a/ui/surface/transport_dib_mac.cc b/ui/surface/transport_dib_mac.cc
new file mode 100644
index 0000000..37ce877
--- /dev/null
+++ b/ui/surface/transport_dib_mac.cc
@@ -0,0 +1,98 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "ui/surface/transport_dib.h"
+
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include "base/eintr_wrapper.h"
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/shared_memory.h"
+#include "skia/ext/platform_canvas.h"
+
+TransportDIB::TransportDIB()
+ : size_(0) {
+}
+
+TransportDIB::TransportDIB(TransportDIB::Handle dib)
+ : shared_memory_(dib, false /* read write */),
+ size_(0) {
+}
+
+TransportDIB::~TransportDIB() {
+}
+
+// static
+TransportDIB* TransportDIB::Create(size_t size, uint32 sequence_num) {
+ TransportDIB* dib = new TransportDIB;
+ if (!dib->shared_memory_.CreateAndMapAnonymous(size)) {
+ delete dib;
+ return NULL;
+ }
+
+ dib->size_ = size;
+ return dib;
+}
+
+// static
+TransportDIB* TransportDIB::Map(Handle handle) {
+ scoped_ptr<TransportDIB> dib(CreateWithHandle(handle));
+ if (!dib->Map())
+ return NULL;
+ return dib.release();
+}
+
+// static
+TransportDIB* TransportDIB::CreateWithHandle(Handle handle) {
+ return new TransportDIB(handle);
+}
+
+// static
+bool TransportDIB::is_valid_handle(Handle dib) {
+ return dib.fd >= 0;
+}
+
+// static
+bool TransportDIB::is_valid_id(Id id) {
+ return id != 0;
+}
+
+skia::PlatformCanvas* TransportDIB::GetPlatformCanvas(int w, int h) {
+ if (!memory() && !Map())
+ return NULL;
+ scoped_ptr<skia::PlatformCanvas> canvas(new skia::PlatformCanvas);
+ if (!canvas->initialize(w, h, true, reinterpret_cast<uint8_t*>(memory())))
+ return NULL;
+ return canvas.release();
+}
+
+bool TransportDIB::Map() {
+ if (!is_valid_handle(handle()))
+ return false;
+ if (memory())
+ return true;
+
+ struct stat st;
+ if ((fstat(shared_memory_.handle().fd, &st) != 0) ||
+ (!shared_memory_.Map(st.st_size))) {
+ return false;
+ }
+
+ size_ = st.st_size;
+ return true;
+}
+
+void* TransportDIB::memory() const {
+ return shared_memory_.memory();
+}
+
+TransportDIB::Id TransportDIB::id() const {
+ return shared_memory_.id();
+}
+
+TransportDIB::Handle TransportDIB::handle() const {
+ return shared_memory_.handle();
+}
diff --git a/ui/surface/transport_dib_win.cc b/ui/surface/transport_dib_win.cc
new file mode 100644
index 0000000..c30e7c4c
--- /dev/null
+++ b/ui/surface/transport_dib_win.cc
@@ -0,0 +1,110 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "ui/surface/transport_dib.h"
+
+#include <windows.h>
+
+#include <limits>
+
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/sys_info.h"
+#include "skia/ext/platform_canvas.h"
+
+TransportDIB::TransportDIB() {
+}
+
+TransportDIB::~TransportDIB() {
+}
+
+TransportDIB::TransportDIB(HANDLE handle)
+ : shared_memory_(handle, false /* read write */) {
+}
+
+// static
+TransportDIB* TransportDIB::Create(size_t size, uint32 sequence_num) {
+ size_t allocation_granularity = base::SysInfo::VMAllocationGranularity();
+ size = size / allocation_granularity + 1;
+ size = size * allocation_granularity;
+
+ TransportDIB* dib = new TransportDIB;
+
+ if (!dib->shared_memory_.CreateAnonymous(size)) {
+ delete dib;
+ return NULL;
+ }
+
+ dib->size_ = size;
+ dib->sequence_num_ = sequence_num;
+
+ return dib;
+}
+
+// static
+TransportDIB* TransportDIB::Map(Handle handle) {
+ scoped_ptr<TransportDIB> dib(CreateWithHandle(handle));
+ if (!dib->Map())
+ return NULL;
+ return dib.release();
+}
+
+// static
+TransportDIB* TransportDIB::CreateWithHandle(Handle handle) {
+ return new TransportDIB(handle);
+}
+
+// static
+bool TransportDIB::is_valid_handle(Handle dib) {
+ return dib != NULL;
+}
+
+// static
+bool TransportDIB::is_valid_id(TransportDIB::Id id) {
+ return is_valid_handle(id.handle);
+}
+
+skia::PlatformCanvas* TransportDIB::GetPlatformCanvas(int w, int h) {
+ // This DIB already mapped the file into this process, but PlatformCanvas
+ // will map it again.
+ DCHECK(!memory()) << "Mapped file twice in the same process.";
+
+ scoped_ptr<skia::PlatformCanvas> canvas(new skia::PlatformCanvas);
+ if (!canvas->initialize(w, h, true, handle()))
+ return NULL;
+ return canvas.release();
+}
+
+bool TransportDIB::Map() {
+ if (!is_valid_handle(handle()))
+ return false;
+ if (memory())
+ return true;
+
+ if (!shared_memory_.Map(0 /* map whole shared memory segment */)) {
+ LOG(ERROR) << "Failed to map transport DIB"
+ << " handle:" << shared_memory_.handle()
+ << " error:" << ::GetLastError();
+ return false;
+ }
+
+ // There doesn't seem to be any way to find the size of the shared memory
+ // region! GetFileSize indicates that the handle is invalid. Thus, we
+ // conservatively set the size to the maximum and hope that the renderer
+ // isn't about to ask us to read off the end of the array.
+ size_ = std::numeric_limits<size_t>::max();
+ return true;
+}
+
+void* TransportDIB::memory() const {
+ return shared_memory_.memory();
+}
+
+TransportDIB::Handle TransportDIB::handle() const {
+ return shared_memory_.handle();
+}
+
+TransportDIB::Id TransportDIB::id() const {
+ return Id(handle(), sequence_num_);
+}