diff options
author | jbauman@chromium.org <jbauman@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2012-06-09 02:16:09 +0000 |
---|---|---|
committer | jbauman@chromium.org <jbauman@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2012-06-09 02:16:09 +0000 |
commit | 96a28eadb8e54939a2c03dbd12e89786c449beb9 (patch) | |
tree | 8e0699e86bc7b606670ddd92c8fab0b7973fc120 /ui/gl/gl_context_cgl.cc | |
parent | eeda49cbdfe963c7cac6ca7f6cef8f4020dd4d3b (diff) | |
download | chromium_src-96a28eadb8e54939a2c03dbd12e89786c449beb9.zip chromium_src-96a28eadb8e54939a2c03dbd12e89786c449beb9.tar.gz chromium_src-96a28eadb8e54939a2c03dbd12e89786c449beb9.tar.bz2 |
Don't destroy channels when switching GPUs
It looks like OS X automatically migrates old contexts onto the discrete GPU when it's using the discrete GPU. Therefore, instead of destroying the entire channel and all its contexts to switch GPUs, we can create a pixelformat that requires the discrete GPU before creating the new context, which will force every context onto the discrete GPU for the duration.
BUG=129258
TEST=
Review URL: https://chromiumcodereview.appspot.com/10545077
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@141347 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'ui/gl/gl_context_cgl.cc')
-rw-r--r-- | ui/gl/gl_context_cgl.cc | 36 |
1 files changed, 24 insertions, 12 deletions
diff --git a/ui/gl/gl_context_cgl.cc b/ui/gl/gl_context_cgl.cc index 790baad..f10595f 100644 --- a/ui/gl/gl_context_cgl.cc +++ b/ui/gl/gl_context_cgl.cc @@ -18,7 +18,8 @@ namespace gfx { GLContextCGL::GLContextCGL(GLShareGroup* share_group) : GLContext(share_group), context_(NULL), - gpu_preference_(PreferIntegratedGpu) { + gpu_preference_(PreferIntegratedGpu), + discrete_pixelformat_(NULL) { } bool GLContextCGL::Initialize(GLSurface* compatible_surface, @@ -27,19 +28,11 @@ bool GLContextCGL::Initialize(GLSurface* compatible_surface, GLContextCGL* share_context = share_group() ? static_cast<GLContextCGL*>(share_group()->GetContext()) : NULL; - if (SupportsDualGpus()) { - // Ensure the GPU preference is compatible with contexts already in the - // share group. - if (share_context && gpu_preference != share_context->GetGpuPreference()) - return false; - } std::vector<CGLPixelFormatAttribute> attribs; - bool using_offline_renderer = - SupportsDualGpus() && gpu_preference == PreferIntegratedGpu; - if (using_offline_renderer) { - attribs.push_back(kCGLPFAAllowOfflineRenderers); - } + // Allow offline renderers for every context, so that they can all be in the + // same share group. + attribs.push_back(kCGLPFAAllowOfflineRenderers); if (GetGLImplementation() == kGLImplementationAppleGL) { attribs.push_back(kCGLPFARendererID); attribs.push_back((CGLPixelFormatAttribute) kCGLRendererGenericFloatID); @@ -60,6 +53,21 @@ bool GLContextCGL::Initialize(GLSurface* compatible_surface, } DCHECK_NE(num_pixel_formats, 0); + // If using the discrete gpu, create a pixel format requiring it before we + // create the context. + if (!SupportsDualGpus() || gpu_preference == PreferDiscreteGpu) { + std::vector<CGLPixelFormatAttribute> discrete_attribs; + discrete_attribs.push_back((CGLPixelFormatAttribute) 0); + GLint num_pixel_formats; + if (CGLChoosePixelFormat(&discrete_attribs.front(), + &discrete_pixelformat_, + &num_pixel_formats) != kCGLNoError) { + LOG(ERROR) << "Error choosing pixel format."; + return false; + } + } + + CGLError res = CGLCreateContext( format, share_context ? @@ -77,6 +85,10 @@ bool GLContextCGL::Initialize(GLSurface* compatible_surface, } void GLContextCGL::Destroy() { + if (discrete_pixelformat_) { + CGLReleasePixelFormat(discrete_pixelformat_); + discrete_pixelformat_ = NULL; + } if (context_) { CGLDestroyContext(static_cast<CGLContextObj>(context_)); context_ = NULL; |