diff options
author | apatrick@chromium.org <apatrick@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2011-12-08 02:25:15 +0000 |
---|---|---|
committer | apatrick@chromium.org <apatrick@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2011-12-08 02:25:15 +0000 |
commit | 20f656fa24a2c96bf28d9889dc226dbc6eba1011 (patch) | |
tree | 0ac68bf2828c732225b68d7b68c3fdd0e172a383 | |
parent | 37b89cacd9d5aa309eeb3689c8871c9c07d3b879 (diff) | |
download | chromium_src-20f656fa24a2c96bf28d9889dc226dbc6eba1011.zip chromium_src-20f656fa24a2c96bf28d9889dc226dbc6eba1011.tar.gz chromium_src-20f656fa24a2c96bf28d9889dc226dbc6eba1011.tar.bz2 |
Ensure that GpuScheduler invokes fence tasks even if client is not flushing.
Review URL: http://codereview.chromium.org/8495038
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@113535 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r-- | content/common/gpu/gpu_channel.cc | 24 | ||||
-rw-r--r-- | content/common/gpu/gpu_command_buffer_stub.cc | 12 | ||||
-rw-r--r-- | content/common/gpu/gpu_command_buffer_stub.h | 3 | ||||
-rw-r--r-- | gpu/command_buffer/service/gpu_scheduler.cc | 64 | ||||
-rw-r--r-- | gpu/command_buffer/service/gpu_scheduler.h | 8 |
5 files changed, 68 insertions, 43 deletions
diff --git a/content/common/gpu/gpu_channel.cc b/content/common/gpu/gpu_channel.cc index 27eb490..e8cc269 100644 --- a/content/common/gpu/gpu_channel.cc +++ b/content/common/gpu/gpu_channel.cc @@ -26,6 +26,10 @@ #include "ipc/ipc_channel_posix.h" #endif +namespace { +const int64 kHandleMoreWorkPeriod = 1; +} + GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager, GpuWatchdog* watchdog, int renderer_id, @@ -154,7 +158,7 @@ void GpuChannel::OnScheduled() { // Post a task to handle any deferred messages. The deferred message queue is // not emptied here, which ensures that OnMessageReceived will continue to // defer newly received messages until the ones in the queue have all been - // handled by HandleDeferredMessages. HandleDeferredMessages is invoked as a + // handled by HandleMessage. HandleMessage is invoked as a // task to prevent reentrancy. MessageLoop::current()->PostTask( FROM_HERE, @@ -256,18 +260,28 @@ void GpuChannel::HandleMessage() { Send(reply); } } else { - // If the channel becomes unscheduled as a result of handling the message, - // synthesize an IPC message to flush the command buffer that became - // unscheduled. + // If the channel becomes unscheduled as a result of handling the message + // or has more work to do, synthesize an IPC message to flush the command + // buffer that became unscheduled. + bool has_more_work = false; for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_); !it.IsAtEnd(); it.Advance()) { GpuCommandBufferStub* stub = it.GetCurrentValue(); - if (!stub->IsScheduled()) { + + if (!stub->IsScheduled() || stub->HasMoreWork()) { + has_more_work = true; deferred_messages_.push_front(new GpuCommandBufferMsg_Rescheduled( stub->route_id())); } } + + if (has_more_work) { + MessageLoop::current()->PostDelayedTask( + FROM_HERE, + base::Bind(&GpuChannel::HandleMessage, weak_factory_.GetWeakPtr()), + kHandleMoreWorkPeriod); + } } } diff --git a/content/common/gpu/gpu_command_buffer_stub.cc b/content/common/gpu/gpu_command_buffer_stub.cc index af478e0..ff2ab82 100644 --- a/content/common/gpu/gpu_command_buffer_stub.cc +++ b/content/common/gpu/gpu_command_buffer_stub.cc @@ -123,6 +123,10 @@ bool GpuCommandBufferStub::IsScheduled() { return !scheduler_.get() || scheduler_->IsScheduled(); } +bool GpuCommandBufferStub::HasMoreWork() { + return scheduler_.get() && scheduler_->HasMoreWork(); +} + void GpuCommandBufferStub::SetSwapInterval() { #if !defined(OS_MACOSX) && !defined(UI_COMPOSITOR_IMAGE_TRANSPORT) // Set up swap interval for onscreen contexts. @@ -360,10 +364,12 @@ void GpuCommandBufferStub::OnAsyncFlush(int32 put_offset, } void GpuCommandBufferStub::OnRescheduled() { - gpu::CommandBuffer::State state = command_buffer_->GetLastState(); - command_buffer_->Flush(state.put_offset); + gpu::CommandBuffer::State pre_state = command_buffer_->GetLastState(); + command_buffer_->Flush(pre_state.put_offset); + gpu::CommandBuffer::State post_state = command_buffer_->GetLastState(); - ReportState(); + if (pre_state.get_offset != post_state.get_offset) + ReportState(); } void GpuCommandBufferStub::OnCreateTransferBuffer(int32 size, diff --git a/content/common/gpu/gpu_command_buffer_stub.h b/content/common/gpu/gpu_command_buffer_stub.h index f581593..50e2b23 100644 --- a/content/common/gpu/gpu_command_buffer_stub.h +++ b/content/common/gpu/gpu_command_buffer_stub.h @@ -65,6 +65,9 @@ class GpuCommandBufferStub // Whether this command buffer can currently handle IPC messages. bool IsScheduled(); + // Whether this command buffer needs to be polled again in the future. + bool HasMoreWork(); + // Set the swap interval according to the command line. void SetSwapInterval(); diff --git a/gpu/command_buffer/service/gpu_scheduler.cc b/gpu/command_buffer/service/gpu_scheduler.cc index f37c1a0..eb1db7e 100644 --- a/gpu/command_buffer/service/gpu_scheduler.cc +++ b/gpu/command_buffer/service/gpu_scheduler.cc @@ -10,17 +10,12 @@ #include "base/debug/trace_event.h" #include "base/message_loop.h" #include "base/time.h" -#include "ui/gfx/gl/gl_context.h" #include "ui/gfx/gl/gl_bindings.h" -#include "ui/gfx/gl/gl_surface.h" #include "ui/gfx/gl/gl_switches.h" using ::base::SharedMemory; namespace gpu { -namespace { -const uint64 kPollFencePeriod = 1; -} GpuScheduler::GpuScheduler(CommandBuffer* command_buffer, gles2::GLES2Decoder* decoder, @@ -52,41 +47,14 @@ GpuScheduler::~GpuScheduler() { void GpuScheduler::PutChanged() { TRACE_EVENT1("gpu", "GpuScheduler:PutChanged", "this", this); - DCHECK(IsScheduled()); - CommandBuffer::State state = command_buffer_->GetState(); parser_->set_put(state.put_offset); if (state.error != error::kNoError) return; // Check that the GPU has passed all fences. - if (!unschedule_fences_.empty()) { - if (gfx::g_GL_NV_fence) { - while (!unschedule_fences_.empty()) { - if (glTestFenceNV(unschedule_fences_.front().fence)) { - glDeleteFencesNV(1, &unschedule_fences_.front().fence); - unschedule_fences_.front().task.Run(); - unschedule_fences_.pop(); - } else { - SetScheduled(false); - MessageLoop::current()->PostDelayedTask( - FROM_HERE, - base::Bind(&GpuScheduler::SetScheduled, AsWeakPtr(), true), - kPollFencePeriod); - return; - } - } - } else { - // Hopefully no recent drivers don't support GL_NV_fence and this will - // not happen in practice. - glFinish(); - - while (!unschedule_fences_.empty()) { - unschedule_fences_.front().task.Run(); - unschedule_fences_.pop(); - } - } - } + if (!PollUnscheduleFences()) + return; // One of the unschedule fence tasks might have unscheduled us. if (!IsScheduled()) @@ -137,6 +105,10 @@ bool GpuScheduler::IsScheduled() { return unscheduled_count_ == 0; } +bool GpuScheduler::HasMoreWork() { + return !unschedule_fences_.empty(); +} + void GpuScheduler::SetScheduledCallback( const base::Closure& scheduled_callback) { scheduled_callback_ = scheduled_callback; @@ -193,6 +165,30 @@ void GpuScheduler::DeferToFence(base::Closure task) { unschedule_fences_.push(fence); } +bool GpuScheduler::PollUnscheduleFences() { + if (gfx::g_GL_NV_fence) { + while (!unschedule_fences_.empty()) { + if (glTestFenceNV(unschedule_fences_.front().fence)) { + glDeleteFencesNV(1, &unschedule_fences_.front().fence); + unschedule_fences_.front().task.Run(); + unschedule_fences_.pop(); + } else { + return false; + } + } + } else { + if (!unschedule_fences_.empty()) + glFinish(); + + while (!unschedule_fences_.empty()) { + unschedule_fences_.front().task.Run(); + unschedule_fences_.pop(); + } + } + + return true; +} + GpuScheduler::UnscheduleFence::UnscheduleFence() : fence(0) { } diff --git a/gpu/command_buffer/service/gpu_scheduler.h b/gpu/command_buffer/service/gpu_scheduler.h index 5256e49..de18b61 100644 --- a/gpu/command_buffer/service/gpu_scheduler.h +++ b/gpu/command_buffer/service/gpu_scheduler.h @@ -41,9 +41,12 @@ class GpuScheduler // false must eventually be paired by a call with true. void SetScheduled(bool is_scheduled); - // Returns whether the scheduler is currently scheduled to process commands. + // Returns whether the scheduler is currently able to process more commands. bool IsScheduled(); + // Returns whether the scheduler needs to be polled again in the future. + bool HasMoreWork(); + // Sets a callback that is invoked just before scheduler is rescheduled. // Takes ownership of callback object. void SetScheduledCallback(const base::Closure& scheduled_callback); @@ -59,6 +62,9 @@ class GpuScheduler void DeferToFence(base::Closure task); private: + // Polls the fences, invoking callbacks that were waiting to be triggered + // by them and returns whether all fences were complete. + bool PollUnscheduleFences(); // The GpuScheduler holds a weak reference to the CommandBuffer. The // CommandBuffer owns the GpuScheduler and holds a strong reference to it |