summaryrefslogtreecommitdiffstats
path: root/gpu/command_buffer
diff options
context:
space:
mode:
authorapatrick@chromium.org <apatrick@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2011-07-20 01:11:51 +0000
committerapatrick@chromium.org <apatrick@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2011-07-20 01:11:51 +0000
commit1d4ea84edf96367e3e8d7278f8e1b9f913f290b0 (patch)
tree068ccc98005de448c2fe3135e33e8dc268758705 /gpu/command_buffer
parent7dea88fd2340d36442020166c1aec0be431607e1 (diff)
downloadchromium_src-1d4ea84edf96367e3e8d7278f8e1b9f913f290b0.zip
chromium_src-1d4ea84edf96367e3e8d7278f8e1b9f913f290b0.tar.gz
chromium_src-1d4ea84edf96367e3e8d7278f8e1b9f913f290b0.tar.bz2
Revert 93066 - Execute all GL commands up to the put offset reported by a each flush.This means glFlush is a barrier that prevents reordering of GL commands issued on different command buffers. I used it to replace latches for synchronizing the rendering of WebGL canvas and Pepper 3D with the accelerated compositor. The primary advantage is it is more robust than latches and there is no possibility of deadlock. It should also be possible for WebGL and Pepper 3D to use it whereas exposing SetLatch and WaitLatch would be dangerous.The calls to SetLatch and WaitLatch are still in webkit but they are no-ops. SetLatch and WaitLatch are completely removed elsewhere.I changed CommandBuffer::FlushSync to Finish to reflect the new semantics. Going forward, I will add a synchronous CommandBuffer::WaitForToken and WaitForAvailableEntries, which should eliminate the need to call Finish unless glFinish is called by the client. The Pepper interface is unchanged because I don't want to break binary compatibility.I fixed a bug where the last read token in CmdBufferHelper was stale after receiving a ReportState IPC. That was causing a redundant synchronous flush in the client side SwapBuffers throttling.I removed Yield because it does not make sense with the new semantics. There is no round robin scheduling.Tested with WebGL on Windows and Mac and checked that 72672 did not regress.
Review URL: http://codereview.chromium.org/7253052 TBR=apatrick@chromium.org Review URL: http://codereview.chromium.org/7458010 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@93143 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'gpu/command_buffer')
-rwxr-xr-xgpu/command_buffer/build_gles2_cmd_buffer.py10
-rw-r--r--gpu/command_buffer/client/cmd_buffer_helper.cc29
-rw-r--r--gpu/command_buffer/client/cmd_buffer_helper.h19
-rw-r--r--gpu/command_buffer/client/cmd_buffer_helper_test.cc8
-rw-r--r--gpu/command_buffer/client/fenced_allocator_test.cc4
-rw-r--r--gpu/command_buffer/client/gles2_c_lib_autogen.h6
-rw-r--r--gpu/command_buffer/client/gles2_cmd_helper_autogen.h10
-rw-r--r--gpu/command_buffer/client/gles2_demo.cc6
-rw-r--r--gpu/command_buffer/client/gles2_implementation.cc1
-rw-r--r--gpu/command_buffer/client/gles2_implementation_autogen.h10
-rw-r--r--gpu/command_buffer/client/gles2_implementation_unittest.cc6
-rw-r--r--gpu/command_buffer/client/mapped_memory_unittest.cc4
-rw-r--r--gpu/command_buffer/client/ring_buffer_test.cc4
-rw-r--r--gpu/command_buffer/common/cmd_buffer_common.cc11
-rw-r--r--gpu/command_buffer/common/cmd_buffer_common.h27
-rw-r--r--gpu/command_buffer/common/command_buffer.h15
-rw-r--r--gpu/command_buffer/common/command_buffer_mock.h1
-rw-r--r--gpu/command_buffer/common/constants.h20
-rw-r--r--gpu/command_buffer/common/gles2_cmd_format_autogen.h68
-rw-r--r--gpu/command_buffer/common/gles2_cmd_format_test_autogen.h26
-rw-r--r--gpu/command_buffer/common/gles2_cmd_ids_autogen.h2
-rw-r--r--gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h4
-rw-r--r--gpu/command_buffer/service/cmd_parser.cc2
-rw-r--r--gpu/command_buffer/service/cmd_parser_test.cc24
-rw-r--r--gpu/command_buffer/service/command_buffer_service.cc10
-rw-r--r--gpu/command_buffer/service/command_buffer_service.h5
-rw-r--r--gpu/command_buffer/service/common_decoder.cc6
-rw-r--r--gpu/command_buffer/service/common_decoder_unittest.cc6
-rw-r--r--gpu/command_buffer/service/gles2_cmd_decoder.cc63
-rw-r--r--gpu/command_buffer/service/gles2_cmd_decoder.h5
-rw-r--r--gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc59
-rw-r--r--gpu/command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h3
-rw-r--r--gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h3
-rw-r--r--gpu/command_buffer/service/gpu_scheduler.cc170
-rw-r--r--gpu/command_buffer/service/gpu_scheduler.h35
-rw-r--r--gpu/command_buffer/service/gpu_scheduler_unittest.cc62
-rw-r--r--gpu/command_buffer/service/mocks.cc19
-rw-r--r--gpu/command_buffer/service/mocks.h14
38 files changed, 629 insertions, 148 deletions
diff --git a/gpu/command_buffer/build_gles2_cmd_buffer.py b/gpu/command_buffer/build_gles2_cmd_buffer.py
index 76f3c59..02cac97 100755
--- a/gpu/command_buffer/build_gles2_cmd_buffer.py
+++ b/gpu/command_buffer/build_gles2_cmd_buffer.py
@@ -217,6 +217,8 @@ GL_APICALL void GL_APIENTRY glCopyTextureToParentTextureCHROMIUM (GLidBi
GL_APICALL void GL_APIENTRY glResizeCHROMIUM (GLuint width, GLuint height);
GL_APICALL const GLchar* GL_APIENTRY glGetRequestableExtensionsCHROMIUM (void);
GL_APICALL void GL_APIENTRY glRequestExtensionCHROMIUM (const char* extension);
+GL_APICALL void GL_APIENTRY glSetLatchCHROMIUM (GLuint latch_id);
+GL_APICALL void GL_APIENTRY glWaitLatchCHROMIUM (GLuint latch_id);
GL_APICALL void GL_APIENTRY glRateLimitOffscreenContextCHROMIUM (void);
GL_APICALL void GL_APIENTRY glSetSurfaceCHROMIUM (GLint surface_id);
GL_APICALL void GL_APIENTRY glGetMultipleIntegervCHROMIUM (const GLenum* pnames, GLuint count, GLint* results, GLsizeiptr size);
@@ -424,6 +426,8 @@ _CMD_ID_TABLE = {
'ResizeCHROMIUM': 448,
'GetRequestableExtensionsCHROMIUM': 449,
'RequestExtensionCHROMIUM': 450,
+ 'SetLatchCHROMIUM': 451,
+ 'WaitLatchCHROMIUM': 452,
'SetSurfaceCHROMIUM': 453,
'GetMultipleIntegervCHROMIUM': 454,
'GetProgramInfoCHROMIUM': 455,
@@ -1733,6 +1737,12 @@ _FUNCTION_INFO = {
'extension': True,
'chromium': True,
},
+ 'SetLatchCHROMIUM': {
+ 'type': 'Custom',
+ },
+ 'WaitLatchCHROMIUM': {
+ 'type': 'Custom',
+ },
'RateLimitOffscreenContextCHROMIUM': {
'gen_cmd': False,
'extension': True,
diff --git a/gpu/command_buffer/client/cmd_buffer_helper.cc b/gpu/command_buffer/client/cmd_buffer_helper.cc
index bd44431..354d563 100644
--- a/gpu/command_buffer/client/cmd_buffer_helper.cc
+++ b/gpu/command_buffer/client/cmd_buffer_helper.cc
@@ -21,6 +21,8 @@ CommandBufferHelper::CommandBufferHelper(CommandBuffer* command_buffer)
total_entry_count_(0),
usable_entry_count_(0),
token_(0),
+ last_token_read_(-1),
+ get_(0),
put_(0),
last_put_sent_(0),
commands_issued_(0),
@@ -45,6 +47,7 @@ bool CommandBufferHelper::Initialize(int32 ring_buffer_size) {
total_entry_count_ = num_ring_buffer_entries;
usable_entry_count_ = total_entry_count_ - kJumpEntries;
put_ = state.put_offset;
+ SynchronizeState(state);
return true;
}
@@ -54,7 +57,8 @@ CommandBufferHelper::~CommandBufferHelper() {
bool CommandBufferHelper::FlushSync() {
time(&last_flush_time_);
last_put_sent_ = put_;
- CommandBuffer::State state = command_buffer_->FlushSync(put_, get_offset());
+ CommandBuffer::State state = command_buffer_->FlushSync(put_, get_);
+ SynchronizeState(state);
return state.error == error::kNoError;
}
@@ -73,7 +77,7 @@ bool CommandBufferHelper::Finish() {
// has shutdown.
if (!FlushSync())
return false;
- } while (put_ != get_offset());
+ } while (put_ != get_);
return true;
}
@@ -92,7 +96,7 @@ int32 CommandBufferHelper::InsertToken() {
TRACE_EVENT0("gpu", "CommandBufferHelper::InsertToken(wrapped)");
// we wrapped
Finish();
- GPU_DCHECK_EQ(token_, last_token_read());
+ GPU_DCHECK_EQ(token_, last_token_read_);
}
return token_;
}
@@ -105,8 +109,8 @@ void CommandBufferHelper::WaitForToken(int32 token) {
if (token < 0)
return;
if (token > token_) return; // we wrapped
- while (last_token_read() < token) {
- if (get_offset() == put_) {
+ while (last_token_read_ < token) {
+ if (get_ == put_) {
GPU_LOG(FATAL) << "Empty command buffer while waiting on a token.";
return;
}
@@ -117,6 +121,11 @@ void CommandBufferHelper::WaitForToken(int32 token) {
}
}
+void CommandBufferHelper::YieldScheduler() {
+ cmd::YieldScheduler& cmd = GetCmdSpace<cmd::YieldScheduler>();
+ cmd.Init();
+}
+
// Waits for available entries, basically waiting until get >= put + count + 1.
// It actually waits for contiguous entries, so it may need to wrap the buffer
// around, adding a jump. Thus this function may change the value of put_. The
@@ -130,9 +139,9 @@ void CommandBufferHelper::WaitForAvailableEntries(int32 count) {
// need to make sure get wraps first, actually that get is 1 or more (since
// put will wrap to 0 after we add the jump).
GPU_DCHECK_LE(1, put_);
- if (get_offset() > put_ || get_offset() == 0) {
+ if (get_ > put_ || get_ == 0) {
TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries");
- while (get_offset() > put_ || get_offset() == 0) {
+ while (get_ > put_ || get_ == 0) {
// Do not loop forever if the flush fails, meaning the command buffer
// reader has shutdown.
if (!FlushSync())
@@ -176,7 +185,13 @@ CommandBufferEntry* CommandBufferHelper::GetSpace(uint32 entries) {
error::Error CommandBufferHelper::GetError() {
CommandBuffer::State state = command_buffer_->GetState();
+ SynchronizeState(state);
return static_cast<error::Error>(state.error);
}
+void CommandBufferHelper::SynchronizeState(const CommandBuffer::State& state) {
+ get_ = state.get_offset;
+ last_token_read_ = state.token;
+}
+
} // namespace gpu
diff --git a/gpu/command_buffer/client/cmd_buffer_helper.h b/gpu/command_buffer/client/cmd_buffer_helper.h
index a7c17ef..c7413ca 100644
--- a/gpu/command_buffer/client/cmd_buffer_helper.h
+++ b/gpu/command_buffer/client/cmd_buffer_helper.h
@@ -83,6 +83,11 @@ class CommandBufferHelper {
// the value of the token to wait for.
void WaitForToken(int32 token);
+ // Inserts a yield command, signaling the scheduler that this is a good point
+ // to update the state and schedule other command buffers. This is
+ // particularly useful after inserting a token that will be waited on.
+ void YieldScheduler();
+
// Called prior to each command being issued. Waits for a certain amount of
// space to be available. Returns address of space.
CommandBufferEntry* GetSpace(uint32 entries);
@@ -116,11 +121,7 @@ class CommandBufferHelper {
}
int32 last_token_read() const {
- return command_buffer_->GetLastState().token;
- }
-
- int32 get_offset() const {
- return command_buffer_->GetLastState().get_offset;
+ return last_token_read_;
}
error::Error GetError();
@@ -220,16 +221,20 @@ class CommandBufferHelper {
// Returns the number of available entries (they may not be contiguous).
int32 AvailableEntries() {
- return (get_offset() - put_ - 1 + usable_entry_count_) %
- usable_entry_count_;
+ return (get_ - put_ - 1 + usable_entry_count_) % usable_entry_count_;
}
+ // Synchronize with current service state.
+ void SynchronizeState(const CommandBuffer::State& state);
+
CommandBuffer* command_buffer_;
Buffer ring_buffer_;
CommandBufferEntry *entries_;
int32 total_entry_count_; // the total number of entries
int32 usable_entry_count_; // the usable number (ie, minus space for jump)
int32 token_;
+ int32 last_token_read_;
+ int32 get_;
int32 put_;
int32 last_put_sent_;
int commands_issued_;
diff --git a/gpu/command_buffer/client/cmd_buffer_helper_test.cc b/gpu/command_buffer/client/cmd_buffer_helper_test.cc
index 01f3760..56eaa0a 100644
--- a/gpu/command_buffer/client/cmd_buffer_helper_test.cc
+++ b/gpu/command_buffer/client/cmd_buffer_helper_test.cc
@@ -77,8 +77,8 @@ class CommandBufferHelperTest : public testing::Test {
.WillRepeatedly(
Invoke(do_jump_command_.get(), &DoJumpCommand::DoCommand));
- gpu_scheduler_.reset(GpuScheduler::CreateForTests(
- command_buffer_.get(), NULL, parser_));
+ gpu_scheduler_.reset(new GpuScheduler(
+ command_buffer_.get(), NULL, parser_, 1));
command_buffer_->SetPutOffsetChangeCallback(NewCallback(
gpu_scheduler_.get(), &GpuScheduler::PutChanged));
@@ -185,6 +185,10 @@ TEST_F(CommandBufferHelperTest, TestCommandProcessing) {
args2[1].value_float = 6.f;
AddCommandWithExpect(error::kNoError, kUnusedCommandId, 2, args2);
+ helper_->Flush();
+ // Check that the engine has work to do now.
+ EXPECT_FALSE(parser_->IsEmpty());
+
// Wait until it's done.
helper_->Finish();
// Check that the engine has no more work to do.
diff --git a/gpu/command_buffer/client/fenced_allocator_test.cc b/gpu/command_buffer/client/fenced_allocator_test.cc
index 883d752..3bf9bd8 100644
--- a/gpu/command_buffer/client/fenced_allocator_test.cc
+++ b/gpu/command_buffer/client/fenced_allocator_test.cc
@@ -51,8 +51,8 @@ class BaseFencedAllocatorTest : public testing::Test {
0,
api_mock_.get());
- gpu_scheduler_.reset(GpuScheduler::CreateForTests(
- command_buffer_.get(), NULL, parser_));
+ gpu_scheduler_.reset(new GpuScheduler(
+ command_buffer_.get(), NULL, parser_, INT_MAX));
command_buffer_->SetPutOffsetChangeCallback(NewCallback(
gpu_scheduler_.get(), &GpuScheduler::PutChanged));
diff --git a/gpu/command_buffer/client/gles2_c_lib_autogen.h b/gpu/command_buffer/client/gles2_c_lib_autogen.h
index 3585cab..2306501 100644
--- a/gpu/command_buffer/client/gles2_c_lib_autogen.h
+++ b/gpu/command_buffer/client/gles2_c_lib_autogen.h
@@ -568,6 +568,12 @@ const GLchar* GLES2GetRequestableExtensionsCHROMIUM() {
void GLES2RequestExtensionCHROMIUM(const char* extension) {
gles2::GetGLContext()->RequestExtensionCHROMIUM(extension);
}
+void GLES2SetLatchCHROMIUM(GLuint latch_id) {
+ gles2::GetGLContext()->SetLatchCHROMIUM(latch_id);
+}
+void GLES2WaitLatchCHROMIUM(GLuint latch_id) {
+ gles2::GetGLContext()->WaitLatchCHROMIUM(latch_id);
+}
void GLES2RateLimitOffscreenContextCHROMIUM() {
gles2::GetGLContext()->RateLimitOffscreenContextCHROMIUM();
}
diff --git a/gpu/command_buffer/client/gles2_cmd_helper_autogen.h b/gpu/command_buffer/client/gles2_cmd_helper_autogen.h
index c28e989..8078354 100644
--- a/gpu/command_buffer/client/gles2_cmd_helper_autogen.h
+++ b/gpu/command_buffer/client/gles2_cmd_helper_autogen.h
@@ -1219,6 +1219,16 @@
c.Init(bucket_id);
}
+ void SetLatchCHROMIUM(GLuint latch_id) {
+ gles2::SetLatchCHROMIUM& c = GetCmdSpace<gles2::SetLatchCHROMIUM>();
+ c.Init(latch_id);
+ }
+
+ void WaitLatchCHROMIUM(GLuint latch_id) {
+ gles2::WaitLatchCHROMIUM& c = GetCmdSpace<gles2::WaitLatchCHROMIUM>();
+ c.Init(latch_id);
+ }
+
void SetSurfaceCHROMIUM(GLint surface_id) {
gles2::SetSurfaceCHROMIUM& c = GetCmdSpace<gles2::SetSurfaceCHROMIUM>();
c.Init(surface_id);
diff --git a/gpu/command_buffer/client/gles2_demo.cc b/gpu/command_buffer/client/gles2_demo.cc
index 2d23a8f..c9595df 100644
--- a/gpu/command_buffer/client/gles2_demo.cc
+++ b/gpu/command_buffer/client/gles2_demo.cc
@@ -56,9 +56,9 @@ bool GLES2Demo::Setup(void* hwnd, int32 size) {
if (!command_buffer->Initialize(size))
return NULL;
- GpuScheduler* gpu_scheduler = GpuScheduler::Create(command_buffer.get(),
- NULL,
- NULL);
+ GpuScheduler* gpu_scheduler = new GpuScheduler(command_buffer.get(),
+ NULL,
+ NULL);
if (!gpu_scheduler->Initialize(reinterpret_cast<HWND>(hwnd),
gfx::Size(),
gpu::gles2::DisallowedExtensions(),
diff --git a/gpu/command_buffer/client/gles2_implementation.cc b/gpu/command_buffer/client/gles2_implementation.cc
index 52abdf1..1a7eeaa 100644
--- a/gpu/command_buffer/client/gles2_implementation.cc
+++ b/gpu/command_buffer/client/gles2_implementation.cc
@@ -832,6 +832,7 @@ void GLES2Implementation::SwapBuffers() {
// the scheduler yields between the InsertToken and the SwapBuffers.
swap_buffers_tokens_.push(helper_->InsertToken());
helper_->SwapBuffers();
+ helper_->YieldScheduler();
helper_->CommandBufferHelper::Flush();
// Wait if we added too many swap buffers.
if (swap_buffers_tokens_.size() > kMaxSwapBuffers) {
diff --git a/gpu/command_buffer/client/gles2_implementation_autogen.h b/gpu/command_buffer/client/gles2_implementation_autogen.h
index 397d975..270d303 100644
--- a/gpu/command_buffer/client/gles2_implementation_autogen.h
+++ b/gpu/command_buffer/client/gles2_implementation_autogen.h
@@ -1272,6 +1272,16 @@ const GLchar* GetRequestableExtensionsCHROMIUM();
void RequestExtensionCHROMIUM(const char* extension);
+void SetLatchCHROMIUM(GLuint latch_id) {
+ GPU_CLIENT_LOG("[" << this << "] glSetLatchCHROMIUM(" << latch_id << ")");
+ helper_->SetLatchCHROMIUM(latch_id);
+}
+
+void WaitLatchCHROMIUM(GLuint latch_id) {
+ GPU_CLIENT_LOG("[" << this << "] glWaitLatchCHROMIUM(" << latch_id << ")");
+ helper_->WaitLatchCHROMIUM(latch_id);
+}
+
void RateLimitOffscreenContextCHROMIUM();
void SetSurfaceCHROMIUM(GLint surface_id) {
diff --git a/gpu/command_buffer/client/gles2_implementation_unittest.cc b/gpu/command_buffer/client/gles2_implementation_unittest.cc
index a37f4b2..eb003a5 100644
--- a/gpu/command_buffer/client/gles2_implementation_unittest.cc
+++ b/gpu/command_buffer/client/gles2_implementation_unittest.cc
@@ -46,10 +46,6 @@ class GLES2MockCommandBufferHelper : public CommandBuffer {
return state_;
}
- virtual State GetLastState() {
- return state_;
- }
-
virtual void Flush(int32 put_offset) {
state_.put_offset = put_offset;
}
@@ -262,7 +258,7 @@ class GLES2ImplementationTest : public testing::Test {
false));
EXPECT_CALL(*command_buffer_, OnFlush(_)).Times(1).RetiresOnSaturation();
- helper_->CommandBufferHelper::Finish();
+ helper_->CommandBufferHelper::FlushSync();
Buffer ring_buffer = command_buffer_->GetRingBuffer();
commands_ = static_cast<CommandBufferEntry*>(ring_buffer.ptr) +
command_buffer_->GetState().put_offset;
diff --git a/gpu/command_buffer/client/mapped_memory_unittest.cc b/gpu/command_buffer/client/mapped_memory_unittest.cc
index 735ac23..067c8e6 100644
--- a/gpu/command_buffer/client/mapped_memory_unittest.cc
+++ b/gpu/command_buffer/client/mapped_memory_unittest.cc
@@ -49,8 +49,8 @@ class MappedMemoryTestBase : public testing::Test {
0,
api_mock_.get());
- gpu_scheduler_.reset(GpuScheduler::CreateForTests(
- command_buffer_.get(), NULL, parser_));
+ gpu_scheduler_.reset(new GpuScheduler(
+ command_buffer_.get(), NULL, parser_, INT_MAX));
command_buffer_->SetPutOffsetChangeCallback(NewCallback(
gpu_scheduler_.get(), &GpuScheduler::PutChanged));
diff --git a/gpu/command_buffer/client/ring_buffer_test.cc b/gpu/command_buffer/client/ring_buffer_test.cc
index a816393..01bc3e0 100644
--- a/gpu/command_buffer/client/ring_buffer_test.cc
+++ b/gpu/command_buffer/client/ring_buffer_test.cc
@@ -71,8 +71,8 @@ class BaseRingBufferTest : public testing::Test {
0,
api_mock_.get());
- gpu_scheduler_.reset(GpuScheduler::CreateForTests(
- command_buffer_.get(), NULL, parser_));
+ gpu_scheduler_.reset(new GpuScheduler(
+ command_buffer_.get(), NULL, parser_, INT_MAX));
command_buffer_->SetPutOffsetChangeCallback(NewCallback(
gpu_scheduler_.get(), &GpuScheduler::PutChanged));
diff --git a/gpu/command_buffer/common/cmd_buffer_common.cc b/gpu/command_buffer/common/cmd_buffer_common.cc
index 9ddb1f3..a9113b2 100644
--- a/gpu/command_buffer/common/cmd_buffer_common.cc
+++ b/gpu/command_buffer/common/cmd_buffer_common.cc
@@ -31,17 +31,6 @@ const char* GetCommandName(CommandId command_id) {
} // namespace cmd
-// TODO(apatrick): this method body is here instead of command_buffer.cc
-// because NaCl currently compiles in this file but not the other.
-// Remove this method body and the includes of command_buffer.h and
-// logging.h above once NaCl defines SetContextLostReason() in its
-// CommandBuffer subclass and has been rolled forward. See
-// http://crbug.com/89670 .
-gpu::CommandBuffer::State CommandBuffer::GetLastState() {
- GPU_NOTREACHED();
- return gpu::CommandBuffer::State();
-}
-
// TODO(kbr): this method body is here instead of command_buffer.cc
// because NaCl currently compiles in this file but not the other.
// Remove this method body and the includes of command_buffer.h and
diff --git a/gpu/command_buffer/common/cmd_buffer_common.h b/gpu/command_buffer/common/cmd_buffer_common.h
index eed4724..0f050e4 100644
--- a/gpu/command_buffer/common/cmd_buffer_common.h
+++ b/gpu/command_buffer/common/cmd_buffer_common.h
@@ -158,6 +158,7 @@ namespace cmd {
OP(SetBucketDataImmediate) /* 9 */ \
OP(GetBucketSize) /* 10 */ \
OP(GetBucketData) /* 11 */ \
+ OP(YieldScheduler) /* 12 */ \
// Common commands.
enum CommandId {
@@ -642,6 +643,32 @@ COMPILE_ASSERT(offsetof(GetBucketData, shared_memory_id) == 16,
COMPILE_ASSERT(offsetof(GetBucketData, shared_memory_offset) == 20,
Offsetof_GetBucketData_shared_memory_offset_not_20);
+// A Yield command. Hints the scheduler that this is a good point to update the
+// state and schedule other command buffers.
+struct YieldScheduler {
+ typedef YieldScheduler ValueType;
+ static const CommandId kCmdId = kYieldScheduler;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+
+ void SetHeader() {
+ header.SetCmd<ValueType>();
+ }
+
+ void Init() {
+ SetHeader();
+ }
+ static void* Set(void* cmd) {
+ static_cast<ValueType*>(cmd)->Init();
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ CommandHeader header;
+};
+
+COMPILE_ASSERT(sizeof(YieldScheduler) == 4, Sizeof_YieldScheduler_is_not_4);
+COMPILE_ASSERT(offsetof(YieldScheduler, header) == 0,
+ Offsetof_YieldScheduler_header_not_0);
+
} // namespace cmd
#pragma pack(pop)
diff --git a/gpu/command_buffer/common/command_buffer.h b/gpu/command_buffer/common/command_buffer.h
index 2eff201..539098b 100644
--- a/gpu/command_buffer/common/command_buffer.h
+++ b/gpu/command_buffer/common/command_buffer.h
@@ -78,18 +78,17 @@ class CommandBuffer {
// Returns the current status.
virtual State GetState() = 0;
- // Returns the last state without synchronizing with the service.
- virtual State GetLastState();
-
// The writer calls this to update its put offset. This ensures the reader
- // sees the latest added commands, and will eventually process them. On the
- // service side, commands are processed up to the given put_offset before
- // subsequent Flushes on the same GpuChannel.
+ // sees the latest added commands, and will eventually process them.
virtual void Flush(int32 put_offset) = 0;
// The writer calls this to update its put offset. This function returns the
- // reader's most recent get offset. Does not return until all pending commands
- // have been executed.
+ // reader's most recent get offset. Does not return until after the put offset
+ // change callback has been invoked. Returns -1 if the put offset is invalid.
+ // If last_known_get is different from the reader's current get pointer, this
+ // function will return immediately, otherwise it guarantees that the reader
+ // has processed some commands before returning (assuming the command buffer
+ // isn't empty and there is no error).
virtual State FlushSync(int32 put_offset, int32 last_known_get) = 0;
// Sets the current get offset. This can be called from any thread.
diff --git a/gpu/command_buffer/common/command_buffer_mock.h b/gpu/command_buffer/common/command_buffer_mock.h
index 321c40d..3243d17 100644
--- a/gpu/command_buffer/common/command_buffer_mock.h
+++ b/gpu/command_buffer/common/command_buffer_mock.h
@@ -25,7 +25,6 @@ class MockCommandBuffer : public CommandBuffer {
MOCK_METHOD2(Initialize, bool(base::SharedMemory* buffer, int32 size));
MOCK_METHOD0(GetRingBuffer, Buffer());
MOCK_METHOD0(GetState, State());
- MOCK_METHOD0(GetLastState, State());
MOCK_METHOD1(Flush, void(int32 put_offset));
MOCK_METHOD2(FlushSync, State(int32 put_offset, int32 last_known_get));
MOCK_METHOD1(SetGetOffset, void(int32 get_offset));
diff --git a/gpu/command_buffer/common/constants.h b/gpu/command_buffer/common/constants.h
index 1b14636..c204e87 100644
--- a/gpu/command_buffer/common/constants.h
+++ b/gpu/command_buffer/common/constants.h
@@ -21,12 +21,28 @@ namespace error {
kUnknownCommand,
kInvalidArguments,
kLostContext,
- kGenericError
+ kGenericError,
+
+ // This is not an error. It is returned by WaitLatch when it is blocked.
+ // When blocked, the context will not reschedule itself until another
+ // context executes a SetLatch command.
+ kWaiting,
+
+ // This is not an error either. It just hints the scheduler that it can exit
+ // its loop, update state, and schedule other command buffers.
+ kYield
};
// Return true if the given error code is an actual error.
inline bool IsError(Error error) {
- return error != kNoError;
+ switch (error) {
+ case kNoError:
+ case kWaiting:
+ case kYield:
+ return false;
+ default:
+ return true;
+ }
}
// Provides finer grained information about why the context was lost.
diff --git a/gpu/command_buffer/common/gles2_cmd_format_autogen.h b/gpu/command_buffer/common/gles2_cmd_format_autogen.h
index b9d6c06..3b76346 100644
--- a/gpu/command_buffer/common/gles2_cmd_format_autogen.h
+++ b/gpu/command_buffer/common/gles2_cmd_format_autogen.h
@@ -8913,6 +8913,74 @@ COMPILE_ASSERT(offsetof(RequestExtensionCHROMIUM, header) == 0,
COMPILE_ASSERT(offsetof(RequestExtensionCHROMIUM, bucket_id) == 4,
OffsetOf_RequestExtensionCHROMIUM_bucket_id_not_4);
+struct SetLatchCHROMIUM {
+ typedef SetLatchCHROMIUM ValueType;
+ static const CommandId kCmdId = kSetLatchCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+
+ static uint32 ComputeSize() {
+ return static_cast<uint32>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() {
+ header.SetCmd<ValueType>();
+ }
+
+ void Init(GLuint _latch_id) {
+ SetHeader();
+ latch_id = _latch_id;
+ }
+
+ void* Set(void* cmd, GLuint _latch_id) {
+ static_cast<ValueType*>(cmd)->Init(_latch_id);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32 latch_id;
+};
+
+COMPILE_ASSERT(sizeof(SetLatchCHROMIUM) == 8,
+ Sizeof_SetLatchCHROMIUM_is_not_8);
+COMPILE_ASSERT(offsetof(SetLatchCHROMIUM, header) == 0,
+ OffsetOf_SetLatchCHROMIUM_header_not_0);
+COMPILE_ASSERT(offsetof(SetLatchCHROMIUM, latch_id) == 4,
+ OffsetOf_SetLatchCHROMIUM_latch_id_not_4);
+
+struct WaitLatchCHROMIUM {
+ typedef WaitLatchCHROMIUM ValueType;
+ static const CommandId kCmdId = kWaitLatchCHROMIUM;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+
+ static uint32 ComputeSize() {
+ return static_cast<uint32>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() {
+ header.SetCmd<ValueType>();
+ }
+
+ void Init(GLuint _latch_id) {
+ SetHeader();
+ latch_id = _latch_id;
+ }
+
+ void* Set(void* cmd, GLuint _latch_id) {
+ static_cast<ValueType*>(cmd)->Init(_latch_id);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32 latch_id;
+};
+
+COMPILE_ASSERT(sizeof(WaitLatchCHROMIUM) == 8,
+ Sizeof_WaitLatchCHROMIUM_is_not_8);
+COMPILE_ASSERT(offsetof(WaitLatchCHROMIUM, header) == 0,
+ OffsetOf_WaitLatchCHROMIUM_header_not_0);
+COMPILE_ASSERT(offsetof(WaitLatchCHROMIUM, latch_id) == 4,
+ OffsetOf_WaitLatchCHROMIUM_latch_id_not_4);
+
struct SetSurfaceCHROMIUM {
typedef SetSurfaceCHROMIUM ValueType;
static const CommandId kCmdId = kSetSurfaceCHROMIUM;
diff --git a/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h b/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h
index 61513f5..40af555 100644
--- a/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h
+++ b/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h
@@ -3510,6 +3510,32 @@ TEST(GLES2FormatTest, RequestExtensionCHROMIUM) {
EXPECT_EQ(static_cast<uint32>(11), cmd.bucket_id);
}
+TEST(GLES2FormatTest, SetLatchCHROMIUM) {
+ SetLatchCHROMIUM cmd = { { 0 } };
+ void* next_cmd = cmd.Set(
+ &cmd,
+ static_cast<GLuint>(11));
+ EXPECT_EQ(static_cast<uint32>(SetLatchCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<char*>(next_cmd),
+ reinterpret_cast<char*>(&cmd) + sizeof(cmd));
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.latch_id);
+}
+
+TEST(GLES2FormatTest, WaitLatchCHROMIUM) {
+ WaitLatchCHROMIUM cmd = { { 0 } };
+ void* next_cmd = cmd.Set(
+ &cmd,
+ static_cast<GLuint>(11));
+ EXPECT_EQ(static_cast<uint32>(WaitLatchCHROMIUM::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<char*>(next_cmd),
+ reinterpret_cast<char*>(&cmd) + sizeof(cmd));
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.latch_id);
+}
+
TEST(GLES2FormatTest, SetSurfaceCHROMIUM) {
SetSurfaceCHROMIUM cmd = { { 0 } };
void* next_cmd = cmd.Set(
diff --git a/gpu/command_buffer/common/gles2_cmd_ids_autogen.h b/gpu/command_buffer/common/gles2_cmd_ids_autogen.h
index e164a51..25bf081 100644
--- a/gpu/command_buffer/common/gles2_cmd_ids_autogen.h
+++ b/gpu/command_buffer/common/gles2_cmd_ids_autogen.h
@@ -205,6 +205,8 @@
OP(ResizeCHROMIUM) /* 448 */ \
OP(GetRequestableExtensionsCHROMIUM) /* 449 */ \
OP(RequestExtensionCHROMIUM) /* 450 */ \
+ OP(SetLatchCHROMIUM) /* 451 */ \
+ OP(WaitLatchCHROMIUM) /* 452 */ \
OP(SetSurfaceCHROMIUM) /* 453 */ \
OP(GetMultipleIntegervCHROMIUM) /* 454 */ \
OP(GetProgramInfoCHROMIUM) /* 455 */ \
diff --git a/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h b/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h
index d83c3c1..1988472 100644
--- a/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h
+++ b/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h
@@ -179,7 +179,7 @@ static GLES2Util::EnumToString enum_to_string_table[] = {
{ 0x00000400, "GL_STENCIL_BUFFER_BIT", },
{ 0x800A, "GL_FUNC_SUBTRACT", },
{ 0x8E2C, "GL_DEPTH_COMPONENT16_NONLINEAR_NV", },
- { 0x8508, "GL_DECR_WRAP", },
+ { 0x889F, "GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING", },
{ 0x8006, "GL_FUNC_ADD", },
{ 0x8007, "GL_MIN_EXT", },
{ 0x8004, "GL_ONE_MINUS_CONSTANT_ALPHA", },
@@ -401,7 +401,7 @@ static GLES2Util::EnumToString enum_to_string_table[] = {
{ 0x80CA, "GL_BLEND_DST_ALPHA", },
{ 0x8CD6, "GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT", },
{ 0x8872, "GL_MAX_TEXTURE_IMAGE_UNITS", },
- { 0x889F, "GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING", },
+ { 0x8508, "GL_DECR_WRAP", },
{ 0x8507, "GL_INCR_WRAP", },
{ 0x8895, "GL_ELEMENT_ARRAY_BUFFER_BINDING", },
{ 0x8894, "GL_ARRAY_BUFFER_BINDING", },
diff --git a/gpu/command_buffer/service/cmd_parser.cc b/gpu/command_buffer/service/cmd_parser.cc
index fba06e6..9ed3fca 100644
--- a/gpu/command_buffer/service/cmd_parser.cc
+++ b/gpu/command_buffer/service/cmd_parser.cc
@@ -64,7 +64,7 @@ error::Error CommandParser::ProcessCommand() {
}
// If get was not set somewhere else advance it.
- if (get == get_)
+ if (result != error::kWaiting && get == get_)
get_ = (get + header.size) % entry_count_;
return result;
}
diff --git a/gpu/command_buffer/service/cmd_parser_test.cc b/gpu/command_buffer/service/cmd_parser_test.cc
index 857ca8e..315a475 100644
--- a/gpu/command_buffer/service/cmd_parser_test.cc
+++ b/gpu/command_buffer/service/cmd_parser_test.cc
@@ -288,4 +288,28 @@ TEST_F(CommandParserTest, TestError) {
Mock::VerifyAndClearExpectations(api_mock());
}
+TEST_F(CommandParserTest, TestWaiting) {
+ const unsigned int kNumEntries = 5;
+ scoped_ptr<CommandParser> parser(MakeParser(kNumEntries));
+ CommandBufferOffset put = parser->put();
+ CommandHeader header;
+
+ // Generate a command with size 1.
+ header.size = 1;
+ header.command = 3;
+ buffer()[put++].value_header = header;
+
+ parser->set_put(put);
+ // A command that returns kWaiting should not advance the get pointer.
+ AddDoCommandExpect(error::kWaiting, 3, 0, NULL);
+ EXPECT_EQ(error::kWaiting, parser->ProcessAllCommands());
+ EXPECT_EQ(0, parser->get());
+ Mock::VerifyAndClearExpectations(api_mock());
+ // Not waiting should advance the get pointer.
+ AddDoCommandExpect(error::kNoError, 3, 0, NULL);
+ EXPECT_EQ(error::kNoError, parser->ProcessAllCommands());
+ EXPECT_EQ(put, parser->get());
+ Mock::VerifyAndClearExpectations(api_mock());
+}
+
} // namespace gpu
diff --git a/gpu/command_buffer/service/command_buffer_service.cc b/gpu/command_buffer/service/command_buffer_service.cc
index 26ccbee..064341d 100644
--- a/gpu/command_buffer/service/command_buffer_service.cc
+++ b/gpu/command_buffer/service/command_buffer_service.cc
@@ -104,10 +104,6 @@ CommandBufferService::State CommandBufferService::GetState() {
return state;
}
-CommandBufferService::State CommandBufferService::GetLastState() {
- return GetState();
-}
-
CommandBufferService::State CommandBufferService::FlushSync(
int32 put_offset, int32 last_known_get) {
if (put_offset < 0 || put_offset > num_entries_) {
@@ -118,7 +114,7 @@ CommandBufferService::State CommandBufferService::FlushSync(
put_offset_ = put_offset;
if (put_offset_change_callback_.get()) {
- put_offset_change_callback_->Run();
+ put_offset_change_callback_->Run(last_known_get == get_offset_);
}
return GetState();
@@ -133,7 +129,7 @@ void CommandBufferService::Flush(int32 put_offset) {
put_offset_ = put_offset;
if (put_offset_change_callback_.get()) {
- put_offset_change_callback_->Run();
+ put_offset_change_callback_->Run(false);
}
}
@@ -265,7 +261,7 @@ void CommandBufferService::SetContextLostReason(
}
void CommandBufferService::SetPutOffsetChangeCallback(
- Callback0::Type* callback) {
+ Callback1<bool>::Type* callback) {
put_offset_change_callback_.reset(callback);
}
diff --git a/gpu/command_buffer/service/command_buffer_service.h b/gpu/command_buffer/service/command_buffer_service.h
index c388e9f..9c52531 100644
--- a/gpu/command_buffer/service/command_buffer_service.h
+++ b/gpu/command_buffer/service/command_buffer_service.h
@@ -29,7 +29,6 @@ class CommandBufferService : public CommandBuffer {
virtual bool Initialize(base::SharedMemory* buffer, int32 size);
virtual Buffer GetRingBuffer();
virtual State GetState();
- virtual State GetLastState();
virtual void Flush(int32 put_offset);
virtual State FlushSync(int32 put_offset, int32 last_known_get);
virtual void SetGetOffset(int32 get_offset);
@@ -51,7 +50,7 @@ class CommandBufferService : public CommandBuffer {
// writer a means of waiting for the reader to make some progress before
// attempting to write more to the command buffer. Takes ownership of
// callback.
- virtual void SetPutOffsetChangeCallback(Callback0::Type* callback);
+ virtual void SetPutOffsetChangeCallback(Callback1<bool>::Type* callback);
virtual void SetParseErrorCallback(Callback0::Type* callback);
private:
@@ -59,7 +58,7 @@ class CommandBufferService : public CommandBuffer {
int32 num_entries_;
int32 get_offset_;
int32 put_offset_;
- scoped_ptr<Callback0::Type> put_offset_change_callback_;
+ scoped_ptr<Callback1<bool>::Type> put_offset_change_callback_;
scoped_ptr<Callback0::Type> parse_error_callback_;
std::vector<Buffer> registered_objects_;
std::set<int32> unused_registered_object_elements_;
diff --git a/gpu/command_buffer/service/common_decoder.cc b/gpu/command_buffer/service/common_decoder.cc
index 7b28603..35eaf66 100644
--- a/gpu/command_buffer/service/common_decoder.cc
+++ b/gpu/command_buffer/service/common_decoder.cc
@@ -330,4 +330,10 @@ error::Error CommonDecoder::HandleGetBucketData(
return error::kNoError;
}
+error::Error CommonDecoder::HandleYieldScheduler(
+ uint32 immediate_data_size,
+ const cmd::YieldScheduler& args) {
+ return error::kYield;
+}
+
} // namespace gpu
diff --git a/gpu/command_buffer/service/common_decoder_unittest.cc b/gpu/command_buffer/service/common_decoder_unittest.cc
index 8f88398..9b53a56 100644
--- a/gpu/command_buffer/service/common_decoder_unittest.cc
+++ b/gpu/command_buffer/service/common_decoder_unittest.cc
@@ -556,5 +556,11 @@ TEST_F(CommonDecoderTest, GetBucketData) {
EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
}
+TEST_F(CommonDecoderTest, YieldScheduler) {
+ cmd::YieldScheduler cmd;
+ cmd.Init();
+ EXPECT_EQ(error::kYield, ExecuteCmd(cmd));
+}
+
} // namespace gpu
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder.cc b/gpu/command_buffer/service/gles2_cmd_decoder.cc
index 1dcfd51..121dbd0 100644
--- a/gpu/command_buffer/service/gles2_cmd_decoder.cc
+++ b/gpu/command_buffer/service/gles2_cmd_decoder.cc
@@ -479,6 +479,7 @@ class GLES2DecoderImpl : public base::SupportsWeakPtr<GLES2DecoderImpl>,
virtual void SetResizeCallback(Callback1<gfx::Size>::Type* callback);
virtual void SetSwapBuffersCallback(Callback0::Type* callback);
+ virtual void SetLatchCallback(const base::Callback<void(bool)>& callback);;
virtual bool GetServiceTextureId(uint32 client_texture_id,
uint32* service_texture_id);
@@ -1271,6 +1272,7 @@ class GLES2DecoderImpl : public base::SupportsWeakPtr<GLES2DecoderImpl>,
scoped_ptr<Callback1<gfx::Size>::Type> resize_callback_;
scoped_ptr<Callback0::Type> swap_buffers_callback_;
+ base::Callback<void(bool)> latch_callback_;
// The format of the back buffer_
GLenum back_buffer_color_format_;
@@ -2358,6 +2360,11 @@ void GLES2DecoderImpl::SetSwapBuffersCallback(Callback0::Type* callback) {
swap_buffers_callback_.reset(callback);
}
+void GLES2DecoderImpl::SetLatchCallback(
+ const base::Callback<void(bool)>& callback) {
+ latch_callback_ = callback;
+}
+
bool GLES2DecoderImpl::GetServiceTextureId(uint32 client_texture_id,
uint32* service_texture_id) {
TextureManager::TextureInfo* texture =
@@ -6529,6 +6536,62 @@ error::Error GLES2DecoderImpl::HandleSwapBuffers(
return error::kNoError;
}
+error::Error GLES2DecoderImpl::HandleSetLatchCHROMIUM(
+ uint32 immediate_data_size, const gles2::SetLatchCHROMIUM& c) {
+ TRACE_EVENT1("gpu", "SetLatch", "latch_id", c.latch_id);
+ // Ensure the side effects of previous commands are visible to other contexts.
+ // There is no need to do this for ANGLE because it uses a
+ // single D3D device for all contexts.
+ if (!IsAngle())
+ glFlush();
+
+ int32 shm_id = gpu::kLatchSharedMemoryId;
+ uint32 latch_id = c.latch_id;
+ uint32 shm_offset = 0;
+ base::subtle::Atomic32* latch;
+ if (!SafeMultiplyUint32(latch_id, sizeof(*latch), &shm_offset)) {
+ return error::kOutOfBounds;
+ }
+ latch = GetSharedMemoryAs<base::subtle::Atomic32*>(
+ shm_id, shm_offset, sizeof(*latch));
+ if (!latch) {
+ return error::kOutOfBounds;
+ }
+ base::subtle::Atomic32 old =
+ base::subtle::NoBarrier_CompareAndSwap(latch, 0, 1);
+ DCHECK(old == 0);
+ if (!latch_callback_.is_null())
+ latch_callback_.Run(true);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleWaitLatchCHROMIUM(
+ uint32 immediate_data_size, const gles2::WaitLatchCHROMIUM& c) {
+ TRACE_EVENT1("gpu", "WaitLatch", "latch_id", c.latch_id);
+ int32 shm_id = gpu::kLatchSharedMemoryId;
+ uint32 latch_id = c.latch_id;
+ uint32 shm_offset = 0;
+ base::subtle::Atomic32* latch;
+ if (!SafeMultiplyUint32(latch_id, sizeof(*latch), &shm_offset)) {
+ return error::kOutOfBounds;
+ }
+ latch = GetSharedMemoryAs<base::subtle::Atomic32*>(
+ shm_id, shm_offset, sizeof(*latch));
+ if (!latch) {
+ return error::kOutOfBounds;
+ }
+
+ base::subtle::Atomic32 old =
+ base::subtle::NoBarrier_CompareAndSwap(latch, 1, 0);
+ if (old == 0) {
+ if (!latch_callback_.is_null())
+ latch_callback_.Run(false);
+ return error::kWaiting;
+ } else {
+ return error::kNoError;
+ }
+}
+
error::Error GLES2DecoderImpl::HandleCommandBufferEnableCHROMIUM(
uint32 immediate_data_size, const gles2::CommandBufferEnableCHROMIUM& c) {
Bucket* bucket = GetBucket(c.bucket_id);
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder.h b/gpu/command_buffer/service/gles2_cmd_decoder.h
index 23c5e3a..abd2b85 100644
--- a/gpu/command_buffer/service/gles2_cmd_decoder.h
+++ b/gpu/command_buffer/service/gles2_cmd_decoder.h
@@ -110,6 +110,11 @@ class GLES2Decoder : public CommonDecoder {
// Sets a callback which is called when a SwapBuffers command is processed.
virtual void SetSwapBuffersCallback(Callback0::Type* callback) = 0;
+ // Sets a callback which is called after a Set/WaitLatch command is processed.
+ // The bool parameter will be true for SetLatch, and false for a WaitLatch
+ // that is blocked. An unblocked WaitLatch will not trigger a callback.
+ virtual void SetLatchCallback(const base::Callback<void(bool)>& callback) = 0;
+
// Get the service texture ID corresponding to a client texture ID.
// If no such record is found then return false.
virtual bool GetServiceTextureId(uint32 client_texture_id,
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc b/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc
index b5997f5..eadfbcd 100644
--- a/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc
@@ -2893,6 +2893,65 @@ TEST_F(GLES2DecoderWithShaderTest, VertexAttribPointer) {
}
}
+TEST_F(GLES2DecoderTest, SetLatch) {
+ bool isAngle = false;
+#if defined(OS_WIN)
+ isAngle = (gfx::GetGLImplementation() == gfx::kGLImplementationEGLGLES2);
+#endif
+ if (!isAngle) {
+ EXPECT_CALL(*gl_, Flush()).Times(3);
+ }
+ const uint32 kLatchId = 1;
+ base::subtle::Atomic32* latches = static_cast<base::subtle::Atomic32*>(
+ shared_memory_base_);
+ const uint32 kInvalidLatchId = kSharedBufferSize / sizeof(*latches);
+ const uint32 kLastValidLatchId = kInvalidLatchId - 1;
+ latches[kLatchId] = 0;
+ latches[kLastValidLatchId] = 0;
+ SetLatchCHROMIUM cmd;
+ // Check out of range latch id.
+ cmd.Init(kInvalidLatchId);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ cmd.Init(kLatchId);
+ // Check valid latch.
+ EXPECT_EQ(0, latches[kLatchId]);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(1, latches[kLatchId]);
+ // Check last valid latch.
+ EXPECT_EQ(0, latches[kLastValidLatchId]);
+ cmd.Init(kLastValidLatchId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(1, latches[kLastValidLatchId]);
+}
+
+TEST_F(GLES2DecoderTest, WaitLatch) {
+ const uint32 kLatchId = 1;
+ base::subtle::Atomic32* latches = static_cast<base::subtle::Atomic32*>(
+ shared_memory_base_);
+ const uint32 kInvalidLatchId = kSharedBufferSize / sizeof(*latches);
+ const uint32 kLastValidLatchId = kInvalidLatchId - 1;
+ latches[kLatchId] = 0;
+ latches[kLastValidLatchId] = 0;
+ WaitLatchCHROMIUM cmd;
+ // Check out of range latch id.
+ cmd.Init(kInvalidLatchId);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ // Check valid latch.
+ cmd.Init(kLatchId);
+ EXPECT_EQ(0, latches[kLatchId]);
+ EXPECT_EQ(error::kWaiting, ExecuteCmd(cmd));
+ latches[kLatchId] = 1;
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0, latches[kLatchId]);
+ // Check last valid latch.
+ cmd.Init(kLastValidLatchId);
+ EXPECT_EQ(0, latches[kLastValidLatchId]);
+ EXPECT_EQ(error::kWaiting, ExecuteCmd(cmd));
+ latches[kLastValidLatchId] = 1;
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0, latches[kLastValidLatchId]);
+}
+
TEST_F(GLES2DecoderTest, SetSurfaceCHROMIUMChangesSurfaceForExistentSurface) {
const int kSurfaceId = 1;
scoped_refptr<gfx::GLSurfaceStub> surface(new gfx::GLSurfaceStub);
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h
index 05f80a3..c5f5594 100644
--- a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h
@@ -1712,6 +1712,7 @@ TEST_F(GLES2DecoderTest2, ViewportInvalidArgs3_0) {
// TODO(gman): RequestExtensionCHROMIUM
-// TODO(gman): SetSurfaceCHROMIUM
+// TODO(gman): SetLatchCHROMIUM
+
#endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_2_AUTOGEN_H_
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h
index cab6b33..54bfdf6 100644
--- a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h
@@ -10,6 +10,9 @@
#ifndef GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_3_AUTOGEN_H_
#define GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_3_AUTOGEN_H_
+// TODO(gman): WaitLatchCHROMIUM
+
+// TODO(gman): SetSurfaceCHROMIUM
// TODO(gman): GetMultipleIntegervCHROMIUM
// TODO(gman): GetProgramInfoCHROMIUM
diff --git a/gpu/command_buffer/service/gpu_scheduler.cc b/gpu/command_buffer/service/gpu_scheduler.cc
index fbdb16b..9365118 100644
--- a/gpu/command_buffer/service/gpu_scheduler.cc
+++ b/gpu/command_buffer/service/gpu_scheduler.cc
@@ -19,37 +19,41 @@ using ::base::SharedMemory;
namespace gpu {
-GpuScheduler* GpuScheduler::Create(CommandBuffer* command_buffer,
- SurfaceManager* surface_manager,
- gles2::ContextGroup* group) {
+GpuScheduler::GpuScheduler(CommandBuffer* command_buffer,
+ SurfaceManager* surface_manager,
+ gles2::ContextGroup* group)
+ : command_buffer_(command_buffer),
+ commands_per_update_(100),
+ unscheduled_count_(0),
+#if defined(OS_MACOSX) || defined(TOUCH_UI)
+ swap_buffers_count_(0),
+ acknowledged_swap_buffers_count_(0),
+#endif
+ method_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) {
DCHECK(command_buffer);
-
- gles2::GLES2Decoder* decoder =
- gles2::GLES2Decoder::Create(surface_manager, group);
-
- GpuScheduler* scheduler = new GpuScheduler(command_buffer,
- decoder,
- NULL);
-
- decoder->set_engine(scheduler);
-
+ decoder_.reset(gles2::GLES2Decoder::Create(surface_manager, group));
+ decoder_->set_engine(this);
if (CommandLine::ForCurrentProcess()->HasSwitch(
switches::kEnableGPUServiceLogging)) {
- decoder->set_debug(true);
+ decoder_->set_debug(true);
}
-
- return scheduler;
}
-GpuScheduler* GpuScheduler::CreateForTests(CommandBuffer* command_buffer,
- gles2::GLES2Decoder* decoder,
- CommandParser* parser) {
+GpuScheduler::GpuScheduler(CommandBuffer* command_buffer,
+ gles2::GLES2Decoder* decoder,
+ CommandParser* parser,
+ int commands_per_update)
+ : command_buffer_(command_buffer),
+ commands_per_update_(commands_per_update),
+ unscheduled_count_(0),
+#if defined(OS_MACOSX) || defined(TOUCH_UI)
+ swap_buffers_count_(0),
+ acknowledged_swap_buffers_count_(0),
+#endif
+ method_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) {
DCHECK(command_buffer);
- GpuScheduler* scheduler = new GpuScheduler(command_buffer,
- decoder,
- parser);
-
- return scheduler;
+ decoder_.reset(decoder);
+ parser_.reset(parser);
}
GpuScheduler::~GpuScheduler() {
@@ -78,6 +82,11 @@ bool GpuScheduler::InitializeCommon(
}
#endif
+ // Do not limit to a certain number of commands before scheduling another
+ // update when rendering onscreen.
+ if (!surface->IsOffscreen())
+ commands_per_update_ = INT_MAX;
+
// Map the ring buffer and create the parser.
Buffer ring_buffer = command_buffer_->GetRingBuffer();
if (ring_buffer.ptr) {
@@ -135,16 +144,29 @@ const unsigned int kMaxOutstandingSwapBuffersCallsPerOnscreenContext = 1;
}
#endif
-void GpuScheduler::PutChanged() {
+void GpuScheduler::PutChanged(bool sync) {
TRACE_EVENT1("gpu", "GpuScheduler:PutChanged", "this", this);
+ CommandBuffer::State state = command_buffer_->GetState();
+ parser_->set_put(state.put_offset);
- DCHECK(IsScheduled());
+ if (sync)
+ ProcessCommands();
+ else
+ ScheduleProcessCommands();
+}
+void GpuScheduler::ProcessCommands() {
+ TRACE_EVENT1("gpu", "GpuScheduler:ProcessCommands", "this", this);
CommandBuffer::State state = command_buffer_->GetState();
- parser_->set_put(state.put_offset);
if (state.error != error::kNoError)
return;
+ if (unscheduled_count_ > 0) {
+ TRACE_EVENT1("gpu", "EarlyOut_Unscheduled",
+ "unscheduled_count_", unscheduled_count_);
+ return;
+ }
+
if (decoder_.get()) {
if (!decoder_->MakeCurrent()) {
LOG(ERROR) << "Context lost because MakeCurrent failed.";
@@ -162,30 +184,60 @@ void GpuScheduler::PutChanged() {
#if defined(OS_MACOSX) || defined(TOUCH_UI)
// Don't swamp the browser process with SwapBuffers calls it can't handle.
- DCHECK(!do_rate_limiting ||
- swap_buffers_count_ - acknowledged_swap_buffers_count_ == 0);
+ if (do_rate_limiting &&
+ swap_buffers_count_ - acknowledged_swap_buffers_count_ >=
+ kMaxOutstandingSwapBuffersCallsPerOnscreenContext) {
+ TRACE_EVENT0("gpu", "EarlyOut_OSX_Throttle");
+ // Stop doing work on this command buffer. In the GPU process,
+ // receipt of the GpuMsg_AcceleratedSurfaceBuffersSwappedACK
+ // message causes ProcessCommands to be scheduled again.
+ return;
+ }
#endif
+ base::TimeTicks start_time = base::TimeTicks::Now();
+ base::TimeDelta elapsed;
+ bool is_break = false;
error::Error error = error::kNoError;
- while (!parser_->IsEmpty()) {
- error = parser_->ProcessCommand();
-
- // TODO(piman): various classes duplicate various pieces of state, leading
- // to needlessly complex update logic. It should be possible to simply
- // share the state across all of them.
- command_buffer_->SetGetOffset(static_cast<int32>(parser_->get()));
-
- if (error::IsError(error)) {
- command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
- command_buffer_->SetParseError(error);
- return;
+ do {
+ int commands_processed = 0;
+ while (commands_processed < commands_per_update_ &&
+ !parser_->IsEmpty()) {
+ error = parser_->ProcessCommand();
+
+ // TODO(piman): various classes duplicate various pieces of state, leading
+ // to needlessly complex update logic. It should be possible to simply
+ // share the state across all of them.
+ command_buffer_->SetGetOffset(static_cast<int32>(parser_->get()));
+
+ if (error == error::kWaiting || error == error::kYield) {
+ is_break = true;
+ break;
+ } else if (error::IsError(error)) {
+ command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
+ command_buffer_->SetParseError(error);
+ return;
+ }
+
+ if (unscheduled_count_ > 0) {
+ is_break = true;
+ break;
+ }
+
+ ++commands_processed;
+ if (command_processed_callback_.get()) {
+ command_processed_callback_->Run();
+ }
}
-
- if (command_processed_callback_.get())
- command_processed_callback_->Run();
-
- if (unscheduled_count_ > 0)
- return;
+ elapsed = base::TimeTicks::Now() - start_time;
+ } while(!is_break &&
+ !parser_->IsEmpty() &&
+ elapsed.InMicroseconds() < kMinimumSchedulerQuantumMicros);
+
+ if (unscheduled_count_ == 0 &&
+ error != error::kWaiting &&
+ !parser_->IsEmpty()) {
+ ScheduleProcessCommands();
}
}
@@ -197,8 +249,12 @@ void GpuScheduler::SetScheduled(bool scheduled) {
--unscheduled_count_;
DCHECK_GE(unscheduled_count_, 0);
- if (unscheduled_count_ == 0 && scheduled_callback_.get())
- scheduled_callback_->Run();
+ if (unscheduled_count_ == 0) {
+ if (scheduled_callback_.get())
+ scheduled_callback_->Run();
+
+ ScheduleProcessCommands();
+ }
} else {
++unscheduled_count_;
}
@@ -264,18 +320,10 @@ void GpuScheduler::SetTokenCallback(
set_token_callback_ = callback;
}
-GpuScheduler::GpuScheduler(CommandBuffer* command_buffer,
- gles2::GLES2Decoder* decoder,
- CommandParser* parser)
- : command_buffer_(command_buffer),
- decoder_(decoder),
- parser_(parser),
- unscheduled_count_(0),
-#if defined(OS_MACOSX) || defined(TOUCH_UI)
- swap_buffers_count_(0),
- acknowledged_swap_buffers_count_(0),
-#endif
- method_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) {
+void GpuScheduler::ScheduleProcessCommands() {
+ MessageLoop::current()->PostTask(
+ FROM_HERE,
+ method_factory_.NewRunnableMethod(&GpuScheduler::ProcessCommands));
}
void GpuScheduler::WillResize(gfx::Size size) {
diff --git a/gpu/command_buffer/service/gpu_scheduler.h b/gpu/command_buffer/service/gpu_scheduler.h
index 30af90f..eedae30 100644
--- a/gpu/command_buffer/service/gpu_scheduler.h
+++ b/gpu/command_buffer/service/gpu_scheduler.h
@@ -43,15 +43,20 @@ class ContextGroup;
// posts tasks to the current message loop to do additional work.
class GpuScheduler : public CommandBufferEngine {
public:
+ // Scheduler quantum: makes ProcessCommands continue until the specified time
+ // has passed, or the command buffer yields or runs out of commands.
+ static const int kMinimumSchedulerQuantumMicros = 2000;
+
// If a group is not passed in one will be created.
- static GpuScheduler* Create(CommandBuffer* command_buffer,
- SurfaceManager* surface_manager,
- gles2::ContextGroup* group);
+ GpuScheduler(CommandBuffer* command_buffer,
+ SurfaceManager* surface_manager,
+ gles2::ContextGroup* group);
// This constructor is for unit tests.
- static GpuScheduler* CreateForTests(CommandBuffer* command_buffer,
- gles2::GLES2Decoder* decoder,
- CommandParser* parser);
+ GpuScheduler(CommandBuffer* command_buffer,
+ gles2::GLES2Decoder* decoder,
+ CommandParser* parser,
+ int commands_per_update);
virtual ~GpuScheduler();
@@ -68,7 +73,7 @@ class GpuScheduler : public CommandBufferEngine {
bool SetParent(GpuScheduler* parent_scheduler, uint32 parent_texture_id);
- void PutChanged();
+ void PutChanged(bool sync);
// Sets whether commands should be processed by this scheduler. Setting to
// false unschedules. Setting to true reschedules. Whether or not the
@@ -146,6 +151,13 @@ class GpuScheduler : public CommandBufferEngine {
void SetCommandProcessedCallback(Callback0::Type* callback);
+ // Sets a callback which is called after a Set/WaitLatch command is processed.
+ // The bool parameter will be true for SetLatch, and false for a WaitLatch
+ // that is blocked. An unblocked WaitLatch will not trigger a callback.
+ void SetLatchCallback(const base::Callback<void(bool)>& callback) {
+ decoder_->SetLatchCallback(callback);
+ }
+
// Sets a callback which is called when set_token() is called, and passes the
// just-set token to the callback. DCHECKs that no callback has previously
// been registered for this notification.
@@ -166,10 +178,8 @@ class GpuScheduler : public CommandBufferEngine {
private:
- // If a group is not passed in one will be created.
- GpuScheduler(CommandBuffer* command_buffer,
- gles2::GLES2Decoder* decoder,
- CommandParser* parser);
+ // Helper which causes a call to ProcessCommands to be scheduled later.
+ void ScheduleProcessCommands();
// Called via a callback just before we are supposed to call the
// user's resize callback.
@@ -178,12 +188,15 @@ class GpuScheduler : public CommandBufferEngine {
// Called via a callback just before we are supposed to call the
// user's swap buffers callback.
void WillSwapBuffers();
+ void ProcessCommands();
// The GpuScheduler holds a weak reference to the CommandBuffer. The
// CommandBuffer owns the GpuScheduler and holds a strong reference to it
// through the ProcessCommands callback.
CommandBuffer* command_buffer_;
+ int commands_per_update_;
+
scoped_ptr<gles2::GLES2Decoder> decoder_;
scoped_ptr<CommandParser> parser_;
diff --git a/gpu/command_buffer/service/gpu_scheduler_unittest.cc b/gpu/command_buffer/service/gpu_scheduler_unittest.cc
index 4fb54b4..3d21f90 100644
--- a/gpu/command_buffer/service/gpu_scheduler_unittest.cc
+++ b/gpu/command_buffer/service/gpu_scheduler_unittest.cc
@@ -44,7 +44,7 @@ class GpuSchedulerTest : public testing::Test {
ON_CALL(*command_buffer_.get(), GetState())
.WillByDefault(Return(default_state));
- async_api_.reset(new StrictMock<AsyncAPIMock>);
+ async_api_.reset(new StrictMock<SpecializedDoCommandAsyncAPIMock>);
decoder_ = new gles2::MockGLES2Decoder();
@@ -55,9 +55,10 @@ class GpuSchedulerTest : public testing::Test {
0,
async_api_.get());
- scheduler_.reset(gpu::GpuScheduler::CreateForTests(command_buffer_.get(),
- decoder_,
- parser_));
+ scheduler_.reset(new GpuScheduler(command_buffer_.get(),
+ decoder_,
+ parser_,
+ 2));
EXPECT_CALL(*decoder_, Destroy())
.Times(1)
@@ -96,7 +97,7 @@ TEST_F(GpuSchedulerTest, SchedulerDoesNothingIfRingBufferIsEmpty) {
EXPECT_CALL(*command_buffer_, SetParseError(_))
.Times(0);
- scheduler_->PutChanged();
+ scheduler_->PutChanged(true);
}
TEST_F(GpuSchedulerTest, ProcessesOneCommand) {
@@ -118,7 +119,7 @@ TEST_F(GpuSchedulerTest, ProcessesOneCommand) {
EXPECT_CALL(*command_buffer_, SetParseError(_))
.Times(0);
- scheduler_->PutChanged();
+ scheduler_->PutChanged(true);
}
TEST_F(GpuSchedulerTest, ProcessesTwoCommands) {
@@ -143,7 +144,7 @@ TEST_F(GpuSchedulerTest, ProcessesTwoCommands) {
.WillOnce(Return(error::kNoError));
EXPECT_CALL(*command_buffer_, SetGetOffset(3));
- scheduler_->PutChanged();
+ scheduler_->PutChanged(true);
}
TEST_F(GpuSchedulerTest, SchedulerSetsTheGLContext) {
@@ -156,7 +157,48 @@ TEST_F(GpuSchedulerTest, SchedulerSetsTheGLContext) {
EXPECT_CALL(*command_buffer_, GetState())
.WillRepeatedly(Return(state));
- scheduler_->PutChanged();
+ scheduler_->PutChanged(true);
+}
+
+TEST_F(GpuSchedulerTest, PostsTaskToFinishRemainingCommands) {
+ unsigned int pauseCmd = SpecializedDoCommandAsyncAPIMock::kTestQuantumCommand;
+ CommandHeader* header = reinterpret_cast<CommandHeader*>(&buffer_[0]);
+ header[0].command = 7;
+ header[0].size = 2;
+ buffer_[1] = 123;
+ header[2].command = pauseCmd;
+ header[2].size = 1;
+ header[3].command = 9;
+ header[3].size = 1;
+
+ CommandBuffer::State state;
+
+ state.put_offset = 4;
+ EXPECT_CALL(*command_buffer_, GetState())
+ .WillRepeatedly(Return(state));
+
+ EXPECT_CALL(*async_api_, DoCommand(7, 1, &buffer_[0]))
+ .WillOnce(Return(error::kNoError));
+ EXPECT_CALL(*command_buffer_, SetGetOffset(2));
+
+ EXPECT_CALL(*async_api_, DoCommand(pauseCmd, 0, &buffer_[2]))
+ .WillOnce(Return(error::kNoError));
+ EXPECT_CALL(*command_buffer_, SetGetOffset(3));
+
+ scheduler_->PutChanged(true);
+
+ // ProcessCommands is called a second time when the pending task is run.
+
+ state.put_offset = 4;
+ EXPECT_CALL(*command_buffer_, GetState())
+ .WillRepeatedly(Return(state));
+
+ EXPECT_CALL(*async_api_, DoCommand(9, 0, &buffer_[3]))
+ .WillOnce(Return(error::kNoError));
+
+ EXPECT_CALL(*command_buffer_, SetGetOffset(4));
+
+ MessageLoop::current()->RunAllPending();
}
TEST_F(GpuSchedulerTest, SetsErrorCodeOnCommandBuffer) {
@@ -180,7 +222,7 @@ TEST_F(GpuSchedulerTest, SetsErrorCodeOnCommandBuffer) {
EXPECT_CALL(*command_buffer_,
SetParseError(error::kUnknownCommand));
- scheduler_->PutChanged();
+ scheduler_->PutChanged(true);
}
TEST_F(GpuSchedulerTest, ProcessCommandsDoesNothingAfterError) {
@@ -190,7 +232,7 @@ TEST_F(GpuSchedulerTest, ProcessCommandsDoesNothingAfterError) {
EXPECT_CALL(*command_buffer_, GetState())
.WillRepeatedly(Return(state));
- scheduler_->PutChanged();
+ scheduler_->PutChanged(true);
}
TEST_F(GpuSchedulerTest, CanGetAddressOfSharedMemory) {
diff --git a/gpu/command_buffer/service/mocks.cc b/gpu/command_buffer/service/mocks.cc
index 46a8977..70898b3 100644
--- a/gpu/command_buffer/service/mocks.cc
+++ b/gpu/command_buffer/service/mocks.cc
@@ -27,6 +27,25 @@ void AsyncAPIMock::SetToken(unsigned int command,
engine_->set_token(args->token);
}
+SpecializedDoCommandAsyncAPIMock::SpecializedDoCommandAsyncAPIMock() {}
+
+SpecializedDoCommandAsyncAPIMock::~SpecializedDoCommandAsyncAPIMock() {}
+
+error::Error SpecializedDoCommandAsyncAPIMock::DoCommand(
+ unsigned int command,
+ unsigned int arg_count,
+ const void* cmd_data) {
+ if (command == kTestQuantumCommand) {
+ // Surpass the GpuScheduler scheduling quantum.
+ base::TimeTicks start_time = base::TimeTicks::Now();
+ while ((base::TimeTicks::Now() - start_time).InMicroseconds() <
+ GpuScheduler::kMinimumSchedulerQuantumMicros) {
+ base::PlatformThread::Sleep(1);
+ }
+ }
+ return AsyncAPIMock::DoCommand(command, arg_count, cmd_data);
+}
+
namespace gles2 {
MockShaderTranslator::MockShaderTranslator() {}
diff --git a/gpu/command_buffer/service/mocks.h b/gpu/command_buffer/service/mocks.h
index 0d341bd..f526c01 100644
--- a/gpu/command_buffer/service/mocks.h
+++ b/gpu/command_buffer/service/mocks.h
@@ -69,6 +69,20 @@ class AsyncAPIMock : public AsyncAPIInterface {
CommandBufferEngine *engine_;
};
+// Allows specialized behavior per command in DoCommand.
+class SpecializedDoCommandAsyncAPIMock : public AsyncAPIMock {
+ public:
+ // Cause DoCommand to sleep more than the GpuScheduler time quantum.
+ static const unsigned int kTestQuantumCommand = 333;
+
+ SpecializedDoCommandAsyncAPIMock();
+ virtual ~SpecializedDoCommandAsyncAPIMock();
+
+ virtual error::Error DoCommand(unsigned int command,
+ unsigned int arg_count,
+ const void* cmd_data);
+};
+
namespace gles2 {
class MockShaderTranslator : public ShaderTranslatorInterface {