summaryrefslogtreecommitdiffstats
path: root/gpu
diff options
context:
space:
mode:
authorapatrick@chromium.org <apatrick@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2011-10-25 18:01:56 +0000
committerapatrick@chromium.org <apatrick@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2011-10-25 18:01:56 +0000
commitc257ae7a44e3a0bcb9d38c913b64885b0537b45f (patch)
treeee27d33568e3d504d6a18474c844672f9e430a20 /gpu
parent339d80a241349ede01e274aa9544c155259fb1f8 (diff)
downloadchromium_src-c257ae7a44e3a0bcb9d38c913b64885b0537b45f.zip
chromium_src-c257ae7a44e3a0bcb9d38c913b64885b0537b45f.tar.gz
chromium_src-c257ae7a44e3a0bcb9d38c913b64885b0537b45f.tar.bz2
GpuScheduler can unschedule a command buffer until the GPU has made progress up to a fence.
Other command buffers on the channel can still be processed but if any more calls are issued on the unscheduled command buffer, the entire channel is unscheduled. This prevents reordering of GL commands wrt flushes. It'll be used by this: http://codereview.chromium.org/8060045/ Review URL: http://codereview.chromium.org/8387008 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@107154 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'gpu')
-rw-r--r--gpu/command_buffer/service/gpu_scheduler.cc72
-rw-r--r--gpu/command_buffer/service/gpu_scheduler.h20
2 files changed, 91 insertions, 1 deletions
diff --git a/gpu/command_buffer/service/gpu_scheduler.cc b/gpu/command_buffer/service/gpu_scheduler.cc
index a6442e9..09e6478 100644
--- a/gpu/command_buffer/service/gpu_scheduler.cc
+++ b/gpu/command_buffer/service/gpu_scheduler.cc
@@ -4,6 +4,7 @@
#include "gpu/command_buffer/service/gpu_scheduler.h"
+#include "base/bind.h"
#include "base/callback.h"
#include "base/command_line.h"
#include "base/compiler_specific.h"
@@ -18,6 +19,9 @@
using ::base::SharedMemory;
namespace gpu {
+namespace {
+const uint64 kPollFencePeriod = 1;
+}
GpuScheduler::GpuScheduler(CommandBuffer* command_buffer,
gles2::GLES2Decoder* decoder,
@@ -56,8 +60,44 @@ void GpuScheduler::PutChanged() {
if (state.error != error::kNoError)
return;
+ // Check that the GPU has passed all fences.
+ if (!unschedule_fences_.empty()) {
+ if (glGenFencesNV) {
+ while (!unschedule_fences_.empty()) {
+ if (glTestFenceNV(unschedule_fences_.front().fence)) {
+ glDeleteFencesNV(1, &unschedule_fences_.front().fence);
+ unschedule_fences_.front().task.Run();
+ unschedule_fences_.pop();
+ } else {
+ SetScheduled(false);
+ MessageLoop::current()->PostDelayedTask(
+ FROM_HERE,
+ base::Bind(&GpuScheduler::SetScheduled, AsWeakPtr(), true),
+ kPollFencePeriod);
+ return;
+ }
+ }
+ } else {
+ // Hopefully no recent drivers don't support GL_NV_fence and this will
+ // not happen in practice.
+ glFinish();
+
+ while (!unschedule_fences_.empty()) {
+ unschedule_fences_.front().task.Run();
+ unschedule_fences_.pop();
+ }
+ }
+ }
+
+ // One of the unschedule fence tasks might have unscheduled us.
+ if (!IsScheduled())
+ return;
+
error::Error error = error::kNoError;
while (!parser_->IsEmpty()) {
+ DCHECK(IsScheduled());
+ DCHECK(unschedule_fences_.empty());
+
error = parser_->ProcessCommand();
// TODO(piman): various classes duplicate various pieces of state, leading
@@ -127,4 +167,36 @@ void GpuScheduler::SetCommandProcessedCallback(
command_processed_callback_.reset(callback);
}
+void GpuScheduler::DeferToFence(base::Closure task) {
+ UnscheduleFence fence;
+
+ // What if either of these GL calls fails? TestFenceNV will return true and
+ // PutChanged will treat the fence as having been crossed and thereby not
+ // poll indefinately. See spec:
+ // http://www.opengl.org/registry/specs/NV/fence.txt
+ //
+ // What should happen if TestFenceNV is called for a name before SetFenceNV
+ // is called?
+ // We generate an INVALID_OPERATION error, and return TRUE.
+ // This follows the semantics for texture object names before
+ // they are bound, in that they acquire their state upon binding.
+ // We will arbitrarily return TRUE for consistency.
+ if (glGenFencesNV) {
+ glGenFencesNV(1, &fence.fence);
+ glSetFenceNV(fence.fence, GL_ALL_COMPLETED_NV);
+ }
+
+ glFlush();
+
+ fence.task = task;
+
+ unschedule_fences_.push(fence);
+}
+
+GpuScheduler::UnscheduleFence::UnscheduleFence() : fence(0) {
+}
+
+GpuScheduler::UnscheduleFence::~UnscheduleFence() {
+}
+
} // namespace gpu
diff --git a/gpu/command_buffer/service/gpu_scheduler.h b/gpu/command_buffer/service/gpu_scheduler.h
index 32b072c..5c7cfe0 100644
--- a/gpu/command_buffer/service/gpu_scheduler.h
+++ b/gpu/command_buffer/service/gpu_scheduler.h
@@ -5,9 +5,12 @@
#ifndef GPU_COMMAND_BUFFER_SERVICE_GPU_SCHEDULER_H_
#define GPU_COMMAND_BUFFER_SERVICE_GPU_SCHEDULER_H_
+#include <queue>
+
#include "base/callback.h"
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
+#include "base/memory/weak_ptr.h"
#include "base/shared_memory.h"
#include "gpu/command_buffer/common/command_buffer.h"
#include "gpu/command_buffer/service/cmd_buffer_engine.h"
@@ -20,7 +23,9 @@ namespace gpu {
// a command buffer and forwarded to a command parser. TODO(apatrick): This
// class should not know about the decoder. Do not add additional dependencies
// on it.
-class GpuScheduler : public CommandBufferEngine {
+class GpuScheduler
+ : public CommandBufferEngine,
+ public base::SupportsWeakPtr<GpuScheduler> {
public:
GpuScheduler(CommandBuffer* command_buffer,
gles2::GLES2Decoder* decoder,
@@ -51,6 +56,8 @@ class GpuScheduler : public CommandBufferEngine {
void SetCommandProcessedCallback(Callback0::Type* callback);
+ void DeferToFence(base::Closure task);
+
private:
// The GpuScheduler holds a weak reference to the CommandBuffer. The
@@ -71,6 +78,17 @@ class GpuScheduler : public CommandBufferEngine {
// Greater than zero if this is waiting to be rescheduled before continuing.
int unscheduled_count_;
+ // The GpuScheduler will unschedule itself in the event that further GL calls
+ // are issued to it before all these fences have been crossed by the GPU.
+ struct UnscheduleFence {
+ UnscheduleFence();
+ ~UnscheduleFence();
+
+ uint32 fence;
+ base::Closure task;
+ };
+ std::queue<UnscheduleFence> unschedule_fences_;
+
scoped_ptr<Callback0::Type> scheduled_callback_;
scoped_ptr<Callback0::Type> command_processed_callback_;
};