summaryrefslogtreecommitdiffstats
path: root/gpu/command_buffer
diff options
context:
space:
mode:
authorapatrick@chromium.org <apatrick@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2011-07-21 21:40:48 +0000
committerapatrick@chromium.org <apatrick@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2011-07-21 21:40:48 +0000
commitd0f02c4df3c10b655dc32a326b131131bc7d31d7 (patch)
treeb95d230702e15fc8115f5257bb6fad73eb6cddcb /gpu/command_buffer
parent3a62f9988fa2b22beec1531efc008accb27b4ca1 (diff)
downloadchromium_src-d0f02c4df3c10b655dc32a326b131131bc7d31d7.zip
chromium_src-d0f02c4df3c10b655dc32a326b131131bc7d31d7.tar.gz
chromium_src-d0f02c4df3c10b655dc32a326b131131bc7d31d7.tar.bz2
Reland 93066 - Execute all GL commands up to the put offset reported by a each flush.This means glFlush is a barrier that prevents reordering of GL commands issued on different command buffers. I used it to replace latches for synchronizing the rendering of WebGL canvas and Pepper 3D with the accelerated compositor. The primary advantage is it is more robust than latches and there is no possibility of deadlock. It should also be possible for WebGL and Pepper 3D to use it whereas exposing SetLatch and WaitLatch would be dangerous.
The calls to SetLatch and WaitLatch are still in webkit but they are no-ops. SetLatch and WaitLatch are completely removed elsewhere.I changed CommandBuffer::FlushSync to Finish to reflect the new semantics. Going forward, I will add a synchronous CommandBuffer::WaitForToken and WaitForAvailableEntries, which should eliminate the need to call Finish unless glFinish is called by the client. The Pepper interface is unchanged because I don't want to break binary compatibility.I fixed a bug where the last read token in CmdBufferHelper was stale after receiving a ReportState IPC. That was causing a redundant synchronous flush in the client side SwapBuffers throttling. I removed Yield because it does not make sense with the new semantics. There is no round robin scheduling.Tested with WebGL on Windows and Mac and checked that 72672 did not regress. Review URL: http://codereview.chromium.org/7466022 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@93479 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'gpu/command_buffer')
-rwxr-xr-xgpu/command_buffer/build_gles2_cmd_buffer.py10
-rw-r--r--gpu/command_buffer/client/cmd_buffer_helper.cc29
-rw-r--r--gpu/command_buffer/client/cmd_buffer_helper.h19
-rw-r--r--gpu/command_buffer/client/cmd_buffer_helper_test.cc8
-rw-r--r--gpu/command_buffer/client/fenced_allocator_test.cc4
-rw-r--r--gpu/command_buffer/client/gles2_c_lib_autogen.h6
-rw-r--r--gpu/command_buffer/client/gles2_cmd_helper_autogen.h10
-rw-r--r--gpu/command_buffer/client/gles2_demo.cc6
-rw-r--r--gpu/command_buffer/client/gles2_implementation.cc1
-rw-r--r--gpu/command_buffer/client/gles2_implementation_autogen.h10
-rw-r--r--gpu/command_buffer/client/gles2_implementation_unittest.cc6
-rw-r--r--gpu/command_buffer/client/mapped_memory_unittest.cc4
-rw-r--r--gpu/command_buffer/client/ring_buffer_test.cc4
-rw-r--r--gpu/command_buffer/common/cmd_buffer_common.cc11
-rw-r--r--gpu/command_buffer/common/cmd_buffer_common.h27
-rw-r--r--gpu/command_buffer/common/command_buffer.h15
-rw-r--r--gpu/command_buffer/common/command_buffer_mock.h1
-rw-r--r--gpu/command_buffer/common/constants.h20
-rw-r--r--gpu/command_buffer/common/gles2_cmd_format_autogen.h68
-rw-r--r--gpu/command_buffer/common/gles2_cmd_format_test_autogen.h26
-rw-r--r--gpu/command_buffer/common/gles2_cmd_ids_autogen.h2
-rw-r--r--gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h4
-rw-r--r--gpu/command_buffer/service/cmd_parser.cc2
-rw-r--r--gpu/command_buffer/service/cmd_parser_test.cc24
-rw-r--r--gpu/command_buffer/service/command_buffer_service.cc10
-rw-r--r--gpu/command_buffer/service/command_buffer_service.h5
-rw-r--r--gpu/command_buffer/service/common_decoder.cc6
-rw-r--r--gpu/command_buffer/service/common_decoder_unittest.cc6
-rw-r--r--gpu/command_buffer/service/gles2_cmd_decoder.cc63
-rw-r--r--gpu/command_buffer/service/gles2_cmd_decoder.h5
-rw-r--r--gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc59
-rw-r--r--gpu/command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h3
-rw-r--r--gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h3
-rw-r--r--gpu/command_buffer/service/gpu_scheduler.cc170
-rw-r--r--gpu/command_buffer/service/gpu_scheduler.h35
-rw-r--r--gpu/command_buffer/service/gpu_scheduler_unittest.cc62
-rw-r--r--gpu/command_buffer/service/mocks.cc19
-rw-r--r--gpu/command_buffer/service/mocks.h14
38 files changed, 148 insertions, 629 deletions
diff --git a/gpu/command_buffer/build_gles2_cmd_buffer.py b/gpu/command_buffer/build_gles2_cmd_buffer.py
index 02cac97..76f3c59 100755
--- a/gpu/command_buffer/build_gles2_cmd_buffer.py
+++ b/gpu/command_buffer/build_gles2_cmd_buffer.py
@@ -217,8 +217,6 @@ GL_APICALL void GL_APIENTRY glCopyTextureToParentTextureCHROMIUM (GLidBi
GL_APICALL void GL_APIENTRY glResizeCHROMIUM (GLuint width, GLuint height);
GL_APICALL const GLchar* GL_APIENTRY glGetRequestableExtensionsCHROMIUM (void);
GL_APICALL void GL_APIENTRY glRequestExtensionCHROMIUM (const char* extension);
-GL_APICALL void GL_APIENTRY glSetLatchCHROMIUM (GLuint latch_id);
-GL_APICALL void GL_APIENTRY glWaitLatchCHROMIUM (GLuint latch_id);
GL_APICALL void GL_APIENTRY glRateLimitOffscreenContextCHROMIUM (void);
GL_APICALL void GL_APIENTRY glSetSurfaceCHROMIUM (GLint surface_id);
GL_APICALL void GL_APIENTRY glGetMultipleIntegervCHROMIUM (const GLenum* pnames, GLuint count, GLint* results, GLsizeiptr size);
@@ -426,8 +424,6 @@ _CMD_ID_TABLE = {
'ResizeCHROMIUM': 448,
'GetRequestableExtensionsCHROMIUM': 449,
'RequestExtensionCHROMIUM': 450,
- 'SetLatchCHROMIUM': 451,
- 'WaitLatchCHROMIUM': 452,
'SetSurfaceCHROMIUM': 453,
'GetMultipleIntegervCHROMIUM': 454,
'GetProgramInfoCHROMIUM': 455,
@@ -1737,12 +1733,6 @@ _FUNCTION_INFO = {
'extension': True,
'chromium': True,
},
- 'SetLatchCHROMIUM': {
- 'type': 'Custom',
- },
- 'WaitLatchCHROMIUM': {
- 'type': 'Custom',
- },
'RateLimitOffscreenContextCHROMIUM': {
'gen_cmd': False,
'extension': True,
diff --git a/gpu/command_buffer/client/cmd_buffer_helper.cc b/gpu/command_buffer/client/cmd_buffer_helper.cc
index 354d563..bd44431 100644
--- a/gpu/command_buffer/client/cmd_buffer_helper.cc
+++ b/gpu/command_buffer/client/cmd_buffer_helper.cc
@@ -21,8 +21,6 @@ CommandBufferHelper::CommandBufferHelper(CommandBuffer* command_buffer)
total_entry_count_(0),
usable_entry_count_(0),
token_(0),
- last_token_read_(-1),
- get_(0),
put_(0),
last_put_sent_(0),
commands_issued_(0),
@@ -47,7 +45,6 @@ bool CommandBufferHelper::Initialize(int32 ring_buffer_size) {
total_entry_count_ = num_ring_buffer_entries;
usable_entry_count_ = total_entry_count_ - kJumpEntries;
put_ = state.put_offset;
- SynchronizeState(state);
return true;
}
@@ -57,8 +54,7 @@ CommandBufferHelper::~CommandBufferHelper() {
bool CommandBufferHelper::FlushSync() {
time(&last_flush_time_);
last_put_sent_ = put_;
- CommandBuffer::State state = command_buffer_->FlushSync(put_, get_);
- SynchronizeState(state);
+ CommandBuffer::State state = command_buffer_->FlushSync(put_, get_offset());
return state.error == error::kNoError;
}
@@ -77,7 +73,7 @@ bool CommandBufferHelper::Finish() {
// has shutdown.
if (!FlushSync())
return false;
- } while (put_ != get_);
+ } while (put_ != get_offset());
return true;
}
@@ -96,7 +92,7 @@ int32 CommandBufferHelper::InsertToken() {
TRACE_EVENT0("gpu", "CommandBufferHelper::InsertToken(wrapped)");
// we wrapped
Finish();
- GPU_DCHECK_EQ(token_, last_token_read_);
+ GPU_DCHECK_EQ(token_, last_token_read());
}
return token_;
}
@@ -109,8 +105,8 @@ void CommandBufferHelper::WaitForToken(int32 token) {
if (token < 0)
return;
if (token > token_) return; // we wrapped
- while (last_token_read_ < token) {
- if (get_ == put_) {
+ while (last_token_read() < token) {
+ if (get_offset() == put_) {
GPU_LOG(FATAL) << "Empty command buffer while waiting on a token.";
return;
}
@@ -121,11 +117,6 @@ void CommandBufferHelper::WaitForToken(int32 token) {
}
}
-void CommandBufferHelper::YieldScheduler() {
- cmd::YieldScheduler& cmd = GetCmdSpace<cmd::YieldScheduler>();
- cmd.Init();
-}
-
// Waits for available entries, basically waiting until get >= put + count + 1.
// It actually waits for contiguous entries, so it may need to wrap the buffer
// around, adding a jump. Thus this function may change the value of put_. The
@@ -139,9 +130,9 @@ void CommandBufferHelper::WaitForAvailableEntries(int32 count) {
// need to make sure get wraps first, actually that get is 1 or more (since
// put will wrap to 0 after we add the jump).
GPU_DCHECK_LE(1, put_);
- if (get_ > put_ || get_ == 0) {
+ if (get_offset() > put_ || get_offset() == 0) {
TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries");
- while (get_ > put_ || get_ == 0) {
+ while (get_offset() > put_ || get_offset() == 0) {
// Do not loop forever if the flush fails, meaning the command buffer
// reader has shutdown.
if (!FlushSync())
@@ -185,13 +176,7 @@ CommandBufferEntry* CommandBufferHelper::GetSpace(uint32 entries) {
error::Error CommandBufferHelper::GetError() {
CommandBuffer::State state = command_buffer_->GetState();
- SynchronizeState(state);
return static_cast<error::Error>(state.error);
}
-void CommandBufferHelper::SynchronizeState(const CommandBuffer::State& state) {
- get_ = state.get_offset;
- last_token_read_ = state.token;
-}
-
} // namespace gpu
diff --git a/gpu/command_buffer/client/cmd_buffer_helper.h b/gpu/command_buffer/client/cmd_buffer_helper.h
index c7413ca..a7c17ef 100644
--- a/gpu/command_buffer/client/cmd_buffer_helper.h
+++ b/gpu/command_buffer/client/cmd_buffer_helper.h
@@ -83,11 +83,6 @@ class CommandBufferHelper {
// the value of the token to wait for.
void WaitForToken(int32 token);
- // Inserts a yield command, signaling the scheduler that this is a good point
- // to update the state and schedule other command buffers. This is
- // particularly useful after inserting a token that will be waited on.
- void YieldScheduler();
-
// Called prior to each command being issued. Waits for a certain amount of
// space to be available. Returns address of space.
CommandBufferEntry* GetSpace(uint32 entries);
@@ -121,7 +116,11 @@ class CommandBufferHelper {
}
int32 last_token_read() const {
- return last_token_read_;
+ return command_buffer_->GetLastState().token;
+ }
+
+ int32 get_offset() const {
+ return command_buffer_->GetLastState().get_offset;
}
error::Error GetError();
@@ -221,20 +220,16 @@ class CommandBufferHelper {
// Returns the number of available entries (they may not be contiguous).
int32 AvailableEntries() {
- return (get_ - put_ - 1 + usable_entry_count_) % usable_entry_count_;
+ return (get_offset() - put_ - 1 + usable_entry_count_) %
+ usable_entry_count_;
}
- // Synchronize with current service state.
- void SynchronizeState(const CommandBuffer::State& state);
-
CommandBuffer* command_buffer_;
Buffer ring_buffer_;
CommandBufferEntry *entries_;
int32 total_entry_count_; // the total number of entries
int32 usable_entry_count_; // the usable number (ie, minus space for jump)
int32 token_;
- int32 last_token_read_;
- int32 get_;
int32 put_;
int32 last_put_sent_;
int commands_issued_;
diff --git a/gpu/command_buffer/client/cmd_buffer_helper_test.cc b/gpu/command_buffer/client/cmd_buffer_helper_test.cc
index 56eaa0a..01f3760 100644
--- a/gpu/command_buffer/client/cmd_buffer_helper_test.cc
+++ b/gpu/command_buffer/client/cmd_buffer_helper_test.cc
@@ -77,8 +77,8 @@ class CommandBufferHelperTest : public testing::Test {
.WillRepeatedly(
Invoke(do_jump_command_.get(), &DoJumpCommand::DoCommand));
- gpu_scheduler_.reset(new GpuScheduler(
- command_buffer_.get(), NULL, parser_, 1));
+ gpu_scheduler_.reset(GpuScheduler::CreateForTests(
+ command_buffer_.get(), NULL, parser_));
command_buffer_->SetPutOffsetChangeCallback(NewCallback(
gpu_scheduler_.get(), &GpuScheduler::PutChanged));
@@ -185,10 +185,6 @@ TEST_F(CommandBufferHelperTest, TestCommandProcessing) {
args2[1].value_float = 6.f;
AddCommandWithExpect(error::kNoError, kUnusedCommandId, 2, args2);
- helper_->Flush();
- // Check that the engine has work to do now.
- EXPECT_FALSE(parser_->IsEmpty());
-
// Wait until it's done.
helper_->Finish();
// Check that the engine has no more work to do.
diff --git a/gpu/command_buffer/client/fenced_allocator_test.cc b/gpu/command_buffer/client/fenced_allocator_test.cc
index 3bf9bd8..883d752 100644
--- a/gpu/command_buffer/client/fenced_allocator_test.cc
+++ b/gpu/command_buffer/client/fenced_allocator_test.cc
@@ -51,8 +51,8 @@ class BaseFencedAllocatorTest : public testing::Test {
0,
api_mock_.get());
- gpu_scheduler_.reset(new GpuScheduler(
- command_buffer_.get(), NULL, parser_, INT_MAX));
+ gpu_scheduler_.reset(GpuScheduler::CreateForTests(
+ command_buffer_.get(), NULL, parser_));
command_buffer_->SetPutOffsetChangeCallback(NewCallback(
gpu_scheduler_.get(), &GpuScheduler::PutChanged));
diff --git a/gpu/command_buffer/client/gles2_c_lib_autogen.h b/gpu/command_buffer/client/gles2_c_lib_autogen.h
index 2306501..3585cab 100644
--- a/gpu/command_buffer/client/gles2_c_lib_autogen.h
+++ b/gpu/command_buffer/client/gles2_c_lib_autogen.h
@@ -568,12 +568,6 @@ const GLchar* GLES2GetRequestableExtensionsCHROMIUM() {
void GLES2RequestExtensionCHROMIUM(const char* extension) {
gles2::GetGLContext()->RequestExtensionCHROMIUM(extension);
}
-void GLES2SetLatchCHROMIUM(GLuint latch_id) {
- gles2::GetGLContext()->SetLatchCHROMIUM(latch_id);
-}
-void GLES2WaitLatchCHROMIUM(GLuint latch_id) {
- gles2::GetGLContext()->WaitLatchCHROMIUM(latch_id);
-}
void GLES2RateLimitOffscreenContextCHROMIUM() {
gles2::GetGLContext()->RateLimitOffscreenContextCHROMIUM();
}
diff --git a/gpu/command_buffer/client/gles2_cmd_helper_autogen.h b/gpu/command_buffer/client/gles2_cmd_helper_autogen.h
index 8078354..c28e989 100644
--- a/gpu/command_buffer/client/gles2_cmd_helper_autogen.h
+++ b/gpu/command_buffer/client/gles2_cmd_helper_autogen.h
@@ -1219,16 +1219,6 @@
c.Init(bucket_id);
}
- void SetLatchCHROMIUM(GLuint latch_id) {
- gles2::SetLatchCHROMIUM& c = GetCmdSpace<gles2::SetLatchCHROMIUM>();
- c.Init(latch_id);
- }
-
- void WaitLatchCHROMIUM(GLuint latch_id) {
- gles2::WaitLatchCHROMIUM& c = GetCmdSpace<gles2::WaitLatchCHROMIUM>();
- c.Init(latch_id);
- }
-
void SetSurfaceCHROMIUM(GLint surface_id) {
gles2::SetSurfaceCHROMIUM& c = GetCmdSpace<gles2::SetSurfaceCHROMIUM>();
c.Init(surface_id);
diff --git a/gpu/command_buffer/client/gles2_demo.cc b/gpu/command_buffer/client/gles2_demo.cc
index f981b1b..7dde7f6 100644
--- a/gpu/command_buffer/client/gles2_demo.cc
+++ b/gpu/command_buffer/client/gles2_demo.cc
@@ -56,9 +56,9 @@ bool GLES2Demo::Setup(void* hwnd, int32 size) {
if (!command_buffer->Initialize(size))
return NULL;
- GpuScheduler* gpu_scheduler = new GpuScheduler(command_buffer.get(),
- NULL,
- NULL);
+ GpuScheduler* gpu_scheduler = GpuScheduler::Create(command_buffer.get(),
+ NULL,
+ NULL);
if (!gpu_scheduler->Initialize(reinterpret_cast<HWND>(hwnd),
gfx::Size(),
false,
diff --git a/gpu/command_buffer/client/gles2_implementation.cc b/gpu/command_buffer/client/gles2_implementation.cc
index 1a7eeaa..52abdf1 100644
--- a/gpu/command_buffer/client/gles2_implementation.cc
+++ b/gpu/command_buffer/client/gles2_implementation.cc
@@ -832,7 +832,6 @@ void GLES2Implementation::SwapBuffers() {
// the scheduler yields between the InsertToken and the SwapBuffers.
swap_buffers_tokens_.push(helper_->InsertToken());
helper_->SwapBuffers();
- helper_->YieldScheduler();
helper_->CommandBufferHelper::Flush();
// Wait if we added too many swap buffers.
if (swap_buffers_tokens_.size() > kMaxSwapBuffers) {
diff --git a/gpu/command_buffer/client/gles2_implementation_autogen.h b/gpu/command_buffer/client/gles2_implementation_autogen.h
index 270d303..397d975 100644
--- a/gpu/command_buffer/client/gles2_implementation_autogen.h
+++ b/gpu/command_buffer/client/gles2_implementation_autogen.h
@@ -1272,16 +1272,6 @@ const GLchar* GetRequestableExtensionsCHROMIUM();
void RequestExtensionCHROMIUM(const char* extension);
-void SetLatchCHROMIUM(GLuint latch_id) {
- GPU_CLIENT_LOG("[" << this << "] glSetLatchCHROMIUM(" << latch_id << ")");
- helper_->SetLatchCHROMIUM(latch_id);
-}
-
-void WaitLatchCHROMIUM(GLuint latch_id) {
- GPU_CLIENT_LOG("[" << this << "] glWaitLatchCHROMIUM(" << latch_id << ")");
- helper_->WaitLatchCHROMIUM(latch_id);
-}
-
void RateLimitOffscreenContextCHROMIUM();
void SetSurfaceCHROMIUM(GLint surface_id) {
diff --git a/gpu/command_buffer/client/gles2_implementation_unittest.cc b/gpu/command_buffer/client/gles2_implementation_unittest.cc
index eb003a5..a37f4b2 100644
--- a/gpu/command_buffer/client/gles2_implementation_unittest.cc
+++ b/gpu/command_buffer/client/gles2_implementation_unittest.cc
@@ -46,6 +46,10 @@ class GLES2MockCommandBufferHelper : public CommandBuffer {
return state_;
}
+ virtual State GetLastState() {
+ return state_;
+ }
+
virtual void Flush(int32 put_offset) {
state_.put_offset = put_offset;
}
@@ -258,7 +262,7 @@ class GLES2ImplementationTest : public testing::Test {
false));
EXPECT_CALL(*command_buffer_, OnFlush(_)).Times(1).RetiresOnSaturation();
- helper_->CommandBufferHelper::FlushSync();
+ helper_->CommandBufferHelper::Finish();
Buffer ring_buffer = command_buffer_->GetRingBuffer();
commands_ = static_cast<CommandBufferEntry*>(ring_buffer.ptr) +
command_buffer_->GetState().put_offset;
diff --git a/gpu/command_buffer/client/mapped_memory_unittest.cc b/gpu/command_buffer/client/mapped_memory_unittest.cc
index 067c8e6..735ac23 100644
--- a/gpu/command_buffer/client/mapped_memory_unittest.cc
+++ b/gpu/command_buffer/client/mapped_memory_unittest.cc
@@ -49,8 +49,8 @@ class MappedMemoryTestBase : public testing::Test {
0,
api_mock_.get());
- gpu_scheduler_.reset(new GpuScheduler(
- command_buffer_.get(), NULL, parser_, INT_MAX));
+ gpu_scheduler_.reset(GpuScheduler::CreateForTests(
+ command_buffer_.get(), NULL, parser_));
command_buffer_->SetPutOffsetChangeCallback(NewCallback(
gpu_scheduler_.get(), &GpuScheduler::PutChanged));
diff --git a/gpu/command_buffer/client/ring_buffer_test.cc b/gpu/command_buffer/client/ring_buffer_test.cc
index 01bc3e0..a816393 100644
--- a/gpu/command_buffer/client/ring_buffer_test.cc
+++ b/gpu/command_buffer/client/ring_buffer_test.cc
@@ -71,8 +71,8 @@ class BaseRingBufferTest : public testing::Test {
0,
api_mock_.get());
- gpu_scheduler_.reset(new GpuScheduler(
- command_buffer_.get(), NULL, parser_, INT_MAX));
+ gpu_scheduler_.reset(GpuScheduler::CreateForTests(
+ command_buffer_.get(), NULL, parser_));
command_buffer_->SetPutOffsetChangeCallback(NewCallback(
gpu_scheduler_.get(), &GpuScheduler::PutChanged));
diff --git a/gpu/command_buffer/common/cmd_buffer_common.cc b/gpu/command_buffer/common/cmd_buffer_common.cc
index a9113b2..9ddb1f3 100644
--- a/gpu/command_buffer/common/cmd_buffer_common.cc
+++ b/gpu/command_buffer/common/cmd_buffer_common.cc
@@ -31,6 +31,17 @@ const char* GetCommandName(CommandId command_id) {
} // namespace cmd
+// TODO(apatrick): this method body is here instead of command_buffer.cc
+// because NaCl currently compiles in this file but not the other.
+// Remove this method body and the includes of command_buffer.h and
+// logging.h above once NaCl defines SetContextLostReason() in its
+// CommandBuffer subclass and has been rolled forward. See
+// http://crbug.com/89670 .
+gpu::CommandBuffer::State CommandBuffer::GetLastState() {
+ GPU_NOTREACHED();
+ return gpu::CommandBuffer::State();
+}
+
// TODO(kbr): this method body is here instead of command_buffer.cc
// because NaCl currently compiles in this file but not the other.
// Remove this method body and the includes of command_buffer.h and
diff --git a/gpu/command_buffer/common/cmd_buffer_common.h b/gpu/command_buffer/common/cmd_buffer_common.h
index 0f050e4..eed4724 100644
--- a/gpu/command_buffer/common/cmd_buffer_common.h
+++ b/gpu/command_buffer/common/cmd_buffer_common.h
@@ -158,7 +158,6 @@ namespace cmd {
OP(SetBucketDataImmediate) /* 9 */ \
OP(GetBucketSize) /* 10 */ \
OP(GetBucketData) /* 11 */ \
- OP(YieldScheduler) /* 12 */ \
// Common commands.
enum CommandId {
@@ -643,32 +642,6 @@ COMPILE_ASSERT(offsetof(GetBucketData, shared_memory_id) == 16,
COMPILE_ASSERT(offsetof(GetBucketData, shared_memory_offset) == 20,
Offsetof_GetBucketData_shared_memory_offset_not_20);
-// A Yield command. Hints the scheduler that this is a good point to update the
-// state and schedule other command buffers.
-struct YieldScheduler {
- typedef YieldScheduler ValueType;
- static const CommandId kCmdId = kYieldScheduler;
- static const cmd::ArgFlags kArgFlags = cmd::kFixed;
-
- void SetHeader() {
- header.SetCmd<ValueType>();
- }
-
- void Init() {
- SetHeader();
- }
- static void* Set(void* cmd) {
- static_cast<ValueType*>(cmd)->Init();
- return NextCmdAddress<ValueType>(cmd);
- }
-
- CommandHeader header;
-};
-
-COMPILE_ASSERT(sizeof(YieldScheduler) == 4, Sizeof_YieldScheduler_is_not_4);
-COMPILE_ASSERT(offsetof(YieldScheduler, header) == 0,
- Offsetof_YieldScheduler_header_not_0);
-
} // namespace cmd
#pragma pack(pop)
diff --git a/gpu/command_buffer/common/command_buffer.h b/gpu/command_buffer/common/command_buffer.h
index 539098b..2eff201 100644
--- a/gpu/command_buffer/common/command_buffer.h
+++ b/gpu/command_buffer/common/command_buffer.h
@@ -78,17 +78,18 @@ class CommandBuffer {
// Returns the current status.
virtual State GetState() = 0;
+ // Returns the last state without synchronizing with the service.
+ virtual State GetLastState();
+
// The writer calls this to update its put offset. This ensures the reader
- // sees the latest added commands, and will eventually process them.
+ // sees the latest added commands, and will eventually process them. On the
+ // service side, commands are processed up to the given put_offset before
+ // subsequent Flushes on the same GpuChannel.
virtual void Flush(int32 put_offset) = 0;
// The writer calls this to update its put offset. This function returns the
- // reader's most recent get offset. Does not return until after the put offset
- // change callback has been invoked. Returns -1 if the put offset is invalid.
- // If last_known_get is different from the reader's current get pointer, this
- // function will return immediately, otherwise it guarantees that the reader
- // has processed some commands before returning (assuming the command buffer
- // isn't empty and there is no error).
+ // reader's most recent get offset. Does not return until all pending commands
+ // have been executed.
virtual State FlushSync(int32 put_offset, int32 last_known_get) = 0;
// Sets the current get offset. This can be called from any thread.
diff --git a/gpu/command_buffer/common/command_buffer_mock.h b/gpu/command_buffer/common/command_buffer_mock.h
index 3243d17..321c40d 100644
--- a/gpu/command_buffer/common/command_buffer_mock.h
+++ b/gpu/command_buffer/common/command_buffer_mock.h
@@ -25,6 +25,7 @@ class MockCommandBuffer : public CommandBuffer {
MOCK_METHOD2(Initialize, bool(base::SharedMemory* buffer, int32 size));
MOCK_METHOD0(GetRingBuffer, Buffer());
MOCK_METHOD0(GetState, State());
+ MOCK_METHOD0(GetLastState, State());
MOCK_METHOD1(Flush, void(int32 put_offset));
MOCK_METHOD2(FlushSync, State(int32 put_offset, int32 last_known_get));
MOCK_METHOD1(SetGetOffset, void(int32 get_offset));
diff --git a/gpu/command_buffer/common/constants.h b/gpu/command_buffer/common/constants.h
index c204e87..1b14636 100644
--- a/gpu/command_buffer/common/constants.h
+++ b/gpu/command_buffer/common/constants.h
@@ -21,28 +21,12 @@ namespace error {
kUnknownCommand,
kInvalidArguments,
kLostContext,
- kGenericError,
-
- // This is not an error. It is returned by WaitLatch when it is blocked.
- // When blocked, the context will not reschedule itself until another
- // context executes a SetLatch command.
- kWaiting,
-
- // This is not an error either. It just hints the scheduler that it can exit
- // its loop, update state, and schedule other command buffers.
- kYield
+ kGenericError
};
// Return true if the given error code is an actual error.
inline bool IsError(Error error) {
- switch (error) {
- case kNoError:
- case kWaiting:
- case kYield:
- return false;
- default:
- return true;
- }
+ return error != kNoError;
}
// Provides finer grained information about why the context was lost.
diff --git a/gpu/command_buffer/common/gles2_cmd_format_autogen.h b/gpu/command_buffer/common/gles2_cmd_format_autogen.h
index 3b76346..b9d6c06 100644
--- a/gpu/command_buffer/common/gles2_cmd_format_autogen.h
+++ b/gpu/command_buffer/common/gles2_cmd_format_autogen.h
@@ -8913,74 +8913,6 @@ COMPILE_ASSERT(offsetof(RequestExtensionCHROMIUM, header) == 0,
COMPILE_ASSERT(offsetof(RequestExtensionCHROMIUM, bucket_id) == 4,
OffsetOf_RequestExtensionCHROMIUM_bucket_id_not_4);
-struct SetLatchCHROMIUM {
- typedef SetLatchCHROMIUM ValueType;
- static const CommandId kCmdId = kSetLatchCHROMIUM;
- static const cmd::ArgFlags kArgFlags = cmd::kFixed;
-
- static uint32 ComputeSize() {
- return static_cast<uint32>(sizeof(ValueType)); // NOLINT
- }
-
- void SetHeader() {
- header.SetCmd<ValueType>();
- }
-
- void Init(GLuint _latch_id) {
- SetHeader();
- latch_id = _latch_id;
- }
-
- void* Set(void* cmd, GLuint _latch_id) {
- static_cast<ValueType*>(cmd)->Init(_latch_id);
- return NextCmdAddress<ValueType>(cmd);
- }
-
- gpu::CommandHeader header;
- uint32 latch_id;
-};
-
-COMPILE_ASSERT(sizeof(SetLatchCHROMIUM) == 8,
- Sizeof_SetLatchCHROMIUM_is_not_8);
-COMPILE_ASSERT(offsetof(SetLatchCHROMIUM, header) == 0,
- OffsetOf_SetLatchCHROMIUM_header_not_0);
-COMPILE_ASSERT(offsetof(SetLatchCHROMIUM, latch_id) == 4,
- OffsetOf_SetLatchCHROMIUM_latch_id_not_4);
-
-struct WaitLatchCHROMIUM {
- typedef WaitLatchCHROMIUM ValueType;
- static const CommandId kCmdId = kWaitLatchCHROMIUM;
- static const cmd::ArgFlags kArgFlags = cmd::kFixed;
-
- static uint32 ComputeSize() {
- return static_cast<uint32>(sizeof(ValueType)); // NOLINT
- }
-
- void SetHeader() {
- header.SetCmd<ValueType>();
- }
-
- void Init(GLuint _latch_id) {
- SetHeader();
- latch_id = _latch_id;
- }
-
- void* Set(void* cmd, GLuint _latch_id) {
- static_cast<ValueType*>(cmd)->Init(_latch_id);
- return NextCmdAddress<ValueType>(cmd);
- }
-
- gpu::CommandHeader header;
- uint32 latch_id;
-};
-
-COMPILE_ASSERT(sizeof(WaitLatchCHROMIUM) == 8,
- Sizeof_WaitLatchCHROMIUM_is_not_8);
-COMPILE_ASSERT(offsetof(WaitLatchCHROMIUM, header) == 0,
- OffsetOf_WaitLatchCHROMIUM_header_not_0);
-COMPILE_ASSERT(offsetof(WaitLatchCHROMIUM, latch_id) == 4,
- OffsetOf_WaitLatchCHROMIUM_latch_id_not_4);
-
struct SetSurfaceCHROMIUM {
typedef SetSurfaceCHROMIUM ValueType;
static const CommandId kCmdId = kSetSurfaceCHROMIUM;
diff --git a/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h b/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h
index 40af555..61513f5 100644
--- a/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h
+++ b/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h
@@ -3510,32 +3510,6 @@ TEST(GLES2FormatTest, RequestExtensionCHROMIUM) {
EXPECT_EQ(static_cast<uint32>(11), cmd.bucket_id);
}
-TEST(GLES2FormatTest, SetLatchCHROMIUM) {
- SetLatchCHROMIUM cmd = { { 0 } };
- void* next_cmd = cmd.Set(
- &cmd,
- static_cast<GLuint>(11));
- EXPECT_EQ(static_cast<uint32>(SetLatchCHROMIUM::kCmdId),
- cmd.header.command);
- EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
- EXPECT_EQ(static_cast<char*>(next_cmd),
- reinterpret_cast<char*>(&cmd) + sizeof(cmd));
- EXPECT_EQ(static_cast<GLuint>(11), cmd.latch_id);
-}
-
-TEST(GLES2FormatTest, WaitLatchCHROMIUM) {
- WaitLatchCHROMIUM cmd = { { 0 } };
- void* next_cmd = cmd.Set(
- &cmd,
- static_cast<GLuint>(11));
- EXPECT_EQ(static_cast<uint32>(WaitLatchCHROMIUM::kCmdId),
- cmd.header.command);
- EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
- EXPECT_EQ(static_cast<char*>(next_cmd),
- reinterpret_cast<char*>(&cmd) + sizeof(cmd));
- EXPECT_EQ(static_cast<GLuint>(11), cmd.latch_id);
-}
-
TEST(GLES2FormatTest, SetSurfaceCHROMIUM) {
SetSurfaceCHROMIUM cmd = { { 0 } };
void* next_cmd = cmd.Set(
diff --git a/gpu/command_buffer/common/gles2_cmd_ids_autogen.h b/gpu/command_buffer/common/gles2_cmd_ids_autogen.h
index 25bf081..e164a51 100644
--- a/gpu/command_buffer/common/gles2_cmd_ids_autogen.h
+++ b/gpu/command_buffer/common/gles2_cmd_ids_autogen.h
@@ -205,8 +205,6 @@
OP(ResizeCHROMIUM) /* 448 */ \
OP(GetRequestableExtensionsCHROMIUM) /* 449 */ \
OP(RequestExtensionCHROMIUM) /* 450 */ \
- OP(SetLatchCHROMIUM) /* 451 */ \
- OP(WaitLatchCHROMIUM) /* 452 */ \
OP(SetSurfaceCHROMIUM) /* 453 */ \
OP(GetMultipleIntegervCHROMIUM) /* 454 */ \
OP(GetProgramInfoCHROMIUM) /* 455 */ \
diff --git a/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h b/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h
index 1988472..d83c3c1 100644
--- a/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h
+++ b/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h
@@ -179,7 +179,7 @@ static GLES2Util::EnumToString enum_to_string_table[] = {
{ 0x00000400, "GL_STENCIL_BUFFER_BIT", },
{ 0x800A, "GL_FUNC_SUBTRACT", },
{ 0x8E2C, "GL_DEPTH_COMPONENT16_NONLINEAR_NV", },
- { 0x889F, "GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING", },
+ { 0x8508, "GL_DECR_WRAP", },
{ 0x8006, "GL_FUNC_ADD", },
{ 0x8007, "GL_MIN_EXT", },
{ 0x8004, "GL_ONE_MINUS_CONSTANT_ALPHA", },
@@ -401,7 +401,7 @@ static GLES2Util::EnumToString enum_to_string_table[] = {
{ 0x80CA, "GL_BLEND_DST_ALPHA", },
{ 0x8CD6, "GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT", },
{ 0x8872, "GL_MAX_TEXTURE_IMAGE_UNITS", },
- { 0x8508, "GL_DECR_WRAP", },
+ { 0x889F, "GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING", },
{ 0x8507, "GL_INCR_WRAP", },
{ 0x8895, "GL_ELEMENT_ARRAY_BUFFER_BINDING", },
{ 0x8894, "GL_ARRAY_BUFFER_BINDING", },
diff --git a/gpu/command_buffer/service/cmd_parser.cc b/gpu/command_buffer/service/cmd_parser.cc
index 9ed3fca..fba06e6 100644
--- a/gpu/command_buffer/service/cmd_parser.cc
+++ b/gpu/command_buffer/service/cmd_parser.cc
@@ -64,7 +64,7 @@ error::Error CommandParser::ProcessCommand() {
}
// If get was not set somewhere else advance it.
- if (result != error::kWaiting && get == get_)
+ if (get == get_)
get_ = (get + header.size) % entry_count_;
return result;
}
diff --git a/gpu/command_buffer/service/cmd_parser_test.cc b/gpu/command_buffer/service/cmd_parser_test.cc
index 315a475..857ca8e 100644
--- a/gpu/command_buffer/service/cmd_parser_test.cc
+++ b/gpu/command_buffer/service/cmd_parser_test.cc
@@ -288,28 +288,4 @@ TEST_F(CommandParserTest, TestError) {
Mock::VerifyAndClearExpectations(api_mock());
}
-TEST_F(CommandParserTest, TestWaiting) {
- const unsigned int kNumEntries = 5;
- scoped_ptr<CommandParser> parser(MakeParser(kNumEntries));
- CommandBufferOffset put = parser->put();
- CommandHeader header;
-
- // Generate a command with size 1.
- header.size = 1;
- header.command = 3;
- buffer()[put++].value_header = header;
-
- parser->set_put(put);
- // A command that returns kWaiting should not advance the get pointer.
- AddDoCommandExpect(error::kWaiting, 3, 0, NULL);
- EXPECT_EQ(error::kWaiting, parser->ProcessAllCommands());
- EXPECT_EQ(0, parser->get());
- Mock::VerifyAndClearExpectations(api_mock());
- // Not waiting should advance the get pointer.
- AddDoCommandExpect(error::kNoError, 3, 0, NULL);
- EXPECT_EQ(error::kNoError, parser->ProcessAllCommands());
- EXPECT_EQ(put, parser->get());
- Mock::VerifyAndClearExpectations(api_mock());
-}
-
} // namespace gpu
diff --git a/gpu/command_buffer/service/command_buffer_service.cc b/gpu/command_buffer/service/command_buffer_service.cc
index 064341d..26ccbee 100644
--- a/gpu/command_buffer/service/command_buffer_service.cc
+++ b/gpu/command_buffer/service/command_buffer_service.cc
@@ -104,6 +104,10 @@ CommandBufferService::State CommandBufferService::GetState() {
return state;
}
+CommandBufferService::State CommandBufferService::GetLastState() {
+ return GetState();
+}
+
CommandBufferService::State CommandBufferService::FlushSync(
int32 put_offset, int32 last_known_get) {
if (put_offset < 0 || put_offset > num_entries_) {
@@ -114,7 +118,7 @@ CommandBufferService::State CommandBufferService::FlushSync(
put_offset_ = put_offset;
if (put_offset_change_callback_.get()) {
- put_offset_change_callback_->Run(last_known_get == get_offset_);
+ put_offset_change_callback_->Run();
}
return GetState();
@@ -129,7 +133,7 @@ void CommandBufferService::Flush(int32 put_offset) {
put_offset_ = put_offset;
if (put_offset_change_callback_.get()) {
- put_offset_change_callback_->Run(false);
+ put_offset_change_callback_->Run();
}
}
@@ -261,7 +265,7 @@ void CommandBufferService::SetContextLostReason(
}
void CommandBufferService::SetPutOffsetChangeCallback(
- Callback1<bool>::Type* callback) {
+ Callback0::Type* callback) {
put_offset_change_callback_.reset(callback);
}
diff --git a/gpu/command_buffer/service/command_buffer_service.h b/gpu/command_buffer/service/command_buffer_service.h
index 9c52531..c388e9f 100644
--- a/gpu/command_buffer/service/command_buffer_service.h
+++ b/gpu/command_buffer/service/command_buffer_service.h
@@ -29,6 +29,7 @@ class CommandBufferService : public CommandBuffer {
virtual bool Initialize(base::SharedMemory* buffer, int32 size);
virtual Buffer GetRingBuffer();
virtual State GetState();
+ virtual State GetLastState();
virtual void Flush(int32 put_offset);
virtual State FlushSync(int32 put_offset, int32 last_known_get);
virtual void SetGetOffset(int32 get_offset);
@@ -50,7 +51,7 @@ class CommandBufferService : public CommandBuffer {
// writer a means of waiting for the reader to make some progress before
// attempting to write more to the command buffer. Takes ownership of
// callback.
- virtual void SetPutOffsetChangeCallback(Callback1<bool>::Type* callback);
+ virtual void SetPutOffsetChangeCallback(Callback0::Type* callback);
virtual void SetParseErrorCallback(Callback0::Type* callback);
private:
@@ -58,7 +59,7 @@ class CommandBufferService : public CommandBuffer {
int32 num_entries_;
int32 get_offset_;
int32 put_offset_;
- scoped_ptr<Callback1<bool>::Type> put_offset_change_callback_;
+ scoped_ptr<Callback0::Type> put_offset_change_callback_;
scoped_ptr<Callback0::Type> parse_error_callback_;
std::vector<Buffer> registered_objects_;
std::set<int32> unused_registered_object_elements_;
diff --git a/gpu/command_buffer/service/common_decoder.cc b/gpu/command_buffer/service/common_decoder.cc
index 35eaf66..7b28603 100644
--- a/gpu/command_buffer/service/common_decoder.cc
+++ b/gpu/command_buffer/service/common_decoder.cc
@@ -330,10 +330,4 @@ error::Error CommonDecoder::HandleGetBucketData(
return error::kNoError;
}
-error::Error CommonDecoder::HandleYieldScheduler(
- uint32 immediate_data_size,
- const cmd::YieldScheduler& args) {
- return error::kYield;
-}
-
} // namespace gpu
diff --git a/gpu/command_buffer/service/common_decoder_unittest.cc b/gpu/command_buffer/service/common_decoder_unittest.cc
index 9b53a56..8f88398 100644
--- a/gpu/command_buffer/service/common_decoder_unittest.cc
+++ b/gpu/command_buffer/service/common_decoder_unittest.cc
@@ -556,11 +556,5 @@ TEST_F(CommonDecoderTest, GetBucketData) {
EXPECT_NE(error::kNoError, ExecuteCmd(cmd));
}
-TEST_F(CommonDecoderTest, YieldScheduler) {
- cmd::YieldScheduler cmd;
- cmd.Init();
- EXPECT_EQ(error::kYield, ExecuteCmd(cmd));
-}
-
} // namespace gpu
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder.cc b/gpu/command_buffer/service/gles2_cmd_decoder.cc
index e989af6..d5bd745 100644
--- a/gpu/command_buffer/service/gles2_cmd_decoder.cc
+++ b/gpu/command_buffer/service/gles2_cmd_decoder.cc
@@ -478,7 +478,6 @@ class GLES2DecoderImpl : public base::SupportsWeakPtr<GLES2DecoderImpl>,
virtual void SetResizeCallback(Callback1<gfx::Size>::Type* callback);
virtual void SetSwapBuffersCallback(Callback0::Type* callback);
- virtual void SetLatchCallback(const base::Callback<void(bool)>& callback);;
virtual bool GetServiceTextureId(uint32 client_texture_id,
uint32* service_texture_id);
@@ -1271,7 +1270,6 @@ class GLES2DecoderImpl : public base::SupportsWeakPtr<GLES2DecoderImpl>,
scoped_ptr<Callback1<gfx::Size>::Type> resize_callback_;
scoped_ptr<Callback0::Type> swap_buffers_callback_;
- base::Callback<void(bool)> latch_callback_;
// The format of the back buffer_
GLenum back_buffer_color_format_;
@@ -2356,11 +2354,6 @@ void GLES2DecoderImpl::SetSwapBuffersCallback(Callback0::Type* callback) {
swap_buffers_callback_.reset(callback);
}
-void GLES2DecoderImpl::SetLatchCallback(
- const base::Callback<void(bool)>& callback) {
- latch_callback_ = callback;
-}
-
bool GLES2DecoderImpl::GetServiceTextureId(uint32 client_texture_id,
uint32* service_texture_id) {
TextureManager::TextureInfo* texture =
@@ -6527,62 +6520,6 @@ error::Error GLES2DecoderImpl::HandleSwapBuffers(
return error::kNoError;
}
-error::Error GLES2DecoderImpl::HandleSetLatchCHROMIUM(
- uint32 immediate_data_size, const gles2::SetLatchCHROMIUM& c) {
- TRACE_EVENT1("gpu", "SetLatch", "latch_id", c.latch_id);
- // Ensure the side effects of previous commands are visible to other contexts.
- // There is no need to do this for ANGLE because it uses a
- // single D3D device for all contexts.
- if (!IsAngle())
- glFlush();
-
- int32 shm_id = gpu::kLatchSharedMemoryId;
- uint32 latch_id = c.latch_id;
- uint32 shm_offset = 0;
- base::subtle::Atomic32* latch;
- if (!SafeMultiplyUint32(latch_id, sizeof(*latch), &shm_offset)) {
- return error::kOutOfBounds;
- }
- latch = GetSharedMemoryAs<base::subtle::Atomic32*>(
- shm_id, shm_offset, sizeof(*latch));
- if (!latch) {
- return error::kOutOfBounds;
- }
- base::subtle::Atomic32 old =
- base::subtle::NoBarrier_CompareAndSwap(latch, 0, 1);
- DCHECK(old == 0);
- if (!latch_callback_.is_null())
- latch_callback_.Run(true);
- return error::kNoError;
-}
-
-error::Error GLES2DecoderImpl::HandleWaitLatchCHROMIUM(
- uint32 immediate_data_size, const gles2::WaitLatchCHROMIUM& c) {
- TRACE_EVENT1("gpu", "WaitLatch", "latch_id", c.latch_id);
- int32 shm_id = gpu::kLatchSharedMemoryId;
- uint32 latch_id = c.latch_id;
- uint32 shm_offset = 0;
- base::subtle::Atomic32* latch;
- if (!SafeMultiplyUint32(latch_id, sizeof(*latch), &shm_offset)) {
- return error::kOutOfBounds;
- }
- latch = GetSharedMemoryAs<base::subtle::Atomic32*>(
- shm_id, shm_offset, sizeof(*latch));
- if (!latch) {
- return error::kOutOfBounds;
- }
-
- base::subtle::Atomic32 old =
- base::subtle::NoBarrier_CompareAndSwap(latch, 1, 0);
- if (old == 0) {
- if (!latch_callback_.is_null())
- latch_callback_.Run(false);
- return error::kWaiting;
- } else {
- return error::kNoError;
- }
-}
-
error::Error GLES2DecoderImpl::HandleCommandBufferEnableCHROMIUM(
uint32 immediate_data_size, const gles2::CommandBufferEnableCHROMIUM& c) {
Bucket* bucket = GetBucket(c.bucket_id);
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder.h b/gpu/command_buffer/service/gles2_cmd_decoder.h
index abd2b85..23c5e3a 100644
--- a/gpu/command_buffer/service/gles2_cmd_decoder.h
+++ b/gpu/command_buffer/service/gles2_cmd_decoder.h
@@ -110,11 +110,6 @@ class GLES2Decoder : public CommonDecoder {
// Sets a callback which is called when a SwapBuffers command is processed.
virtual void SetSwapBuffersCallback(Callback0::Type* callback) = 0;
- // Sets a callback which is called after a Set/WaitLatch command is processed.
- // The bool parameter will be true for SetLatch, and false for a WaitLatch
- // that is blocked. An unblocked WaitLatch will not trigger a callback.
- virtual void SetLatchCallback(const base::Callback<void(bool)>& callback) = 0;
-
// Get the service texture ID corresponding to a client texture ID.
// If no such record is found then return false.
virtual bool GetServiceTextureId(uint32 client_texture_id,
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc b/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc
index eadfbcd..b5997f5 100644
--- a/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc
@@ -2893,65 +2893,6 @@ TEST_F(GLES2DecoderWithShaderTest, VertexAttribPointer) {
}
}
-TEST_F(GLES2DecoderTest, SetLatch) {
- bool isAngle = false;
-#if defined(OS_WIN)
- isAngle = (gfx::GetGLImplementation() == gfx::kGLImplementationEGLGLES2);
-#endif
- if (!isAngle) {
- EXPECT_CALL(*gl_, Flush()).Times(3);
- }
- const uint32 kLatchId = 1;
- base::subtle::Atomic32* latches = static_cast<base::subtle::Atomic32*>(
- shared_memory_base_);
- const uint32 kInvalidLatchId = kSharedBufferSize / sizeof(*latches);
- const uint32 kLastValidLatchId = kInvalidLatchId - 1;
- latches[kLatchId] = 0;
- latches[kLastValidLatchId] = 0;
- SetLatchCHROMIUM cmd;
- // Check out of range latch id.
- cmd.Init(kInvalidLatchId);
- EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
- cmd.Init(kLatchId);
- // Check valid latch.
- EXPECT_EQ(0, latches[kLatchId]);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(1, latches[kLatchId]);
- // Check last valid latch.
- EXPECT_EQ(0, latches[kLastValidLatchId]);
- cmd.Init(kLastValidLatchId);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(1, latches[kLastValidLatchId]);
-}
-
-TEST_F(GLES2DecoderTest, WaitLatch) {
- const uint32 kLatchId = 1;
- base::subtle::Atomic32* latches = static_cast<base::subtle::Atomic32*>(
- shared_memory_base_);
- const uint32 kInvalidLatchId = kSharedBufferSize / sizeof(*latches);
- const uint32 kLastValidLatchId = kInvalidLatchId - 1;
- latches[kLatchId] = 0;
- latches[kLastValidLatchId] = 0;
- WaitLatchCHROMIUM cmd;
- // Check out of range latch id.
- cmd.Init(kInvalidLatchId);
- EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
- // Check valid latch.
- cmd.Init(kLatchId);
- EXPECT_EQ(0, latches[kLatchId]);
- EXPECT_EQ(error::kWaiting, ExecuteCmd(cmd));
- latches[kLatchId] = 1;
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(0, latches[kLatchId]);
- // Check last valid latch.
- cmd.Init(kLastValidLatchId);
- EXPECT_EQ(0, latches[kLastValidLatchId]);
- EXPECT_EQ(error::kWaiting, ExecuteCmd(cmd));
- latches[kLastValidLatchId] = 1;
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(0, latches[kLastValidLatchId]);
-}
-
TEST_F(GLES2DecoderTest, SetSurfaceCHROMIUMChangesSurfaceForExistentSurface) {
const int kSurfaceId = 1;
scoped_refptr<gfx::GLSurfaceStub> surface(new gfx::GLSurfaceStub);
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h
index c5f5594..05f80a3 100644
--- a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h
@@ -1712,7 +1712,6 @@ TEST_F(GLES2DecoderTest2, ViewportInvalidArgs3_0) {
// TODO(gman): RequestExtensionCHROMIUM
-// TODO(gman): SetLatchCHROMIUM
-
+// TODO(gman): SetSurfaceCHROMIUM
#endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_2_AUTOGEN_H_
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h
index 54bfdf6..cab6b33 100644
--- a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h
+++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h
@@ -10,9 +10,6 @@
#ifndef GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_3_AUTOGEN_H_
#define GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_3_AUTOGEN_H_
-// TODO(gman): WaitLatchCHROMIUM
-
-// TODO(gman): SetSurfaceCHROMIUM
// TODO(gman): GetMultipleIntegervCHROMIUM
// TODO(gman): GetProgramInfoCHROMIUM
diff --git a/gpu/command_buffer/service/gpu_scheduler.cc b/gpu/command_buffer/service/gpu_scheduler.cc
index 9365118..fbdb16b 100644
--- a/gpu/command_buffer/service/gpu_scheduler.cc
+++ b/gpu/command_buffer/service/gpu_scheduler.cc
@@ -19,41 +19,37 @@ using ::base::SharedMemory;
namespace gpu {
-GpuScheduler::GpuScheduler(CommandBuffer* command_buffer,
- SurfaceManager* surface_manager,
- gles2::ContextGroup* group)
- : command_buffer_(command_buffer),
- commands_per_update_(100),
- unscheduled_count_(0),
-#if defined(OS_MACOSX) || defined(TOUCH_UI)
- swap_buffers_count_(0),
- acknowledged_swap_buffers_count_(0),
-#endif
- method_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) {
+GpuScheduler* GpuScheduler::Create(CommandBuffer* command_buffer,
+ SurfaceManager* surface_manager,
+ gles2::ContextGroup* group) {
DCHECK(command_buffer);
- decoder_.reset(gles2::GLES2Decoder::Create(surface_manager, group));
- decoder_->set_engine(this);
+
+ gles2::GLES2Decoder* decoder =
+ gles2::GLES2Decoder::Create(surface_manager, group);
+
+ GpuScheduler* scheduler = new GpuScheduler(command_buffer,
+ decoder,
+ NULL);
+
+ decoder->set_engine(scheduler);
+
if (CommandLine::ForCurrentProcess()->HasSwitch(
switches::kEnableGPUServiceLogging)) {
- decoder_->set_debug(true);
+ decoder->set_debug(true);
}
+
+ return scheduler;
}
-GpuScheduler::GpuScheduler(CommandBuffer* command_buffer,
- gles2::GLES2Decoder* decoder,
- CommandParser* parser,
- int commands_per_update)
- : command_buffer_(command_buffer),
- commands_per_update_(commands_per_update),
- unscheduled_count_(0),
-#if defined(OS_MACOSX) || defined(TOUCH_UI)
- swap_buffers_count_(0),
- acknowledged_swap_buffers_count_(0),
-#endif
- method_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) {
+GpuScheduler* GpuScheduler::CreateForTests(CommandBuffer* command_buffer,
+ gles2::GLES2Decoder* decoder,
+ CommandParser* parser) {
DCHECK(command_buffer);
- decoder_.reset(decoder);
- parser_.reset(parser);
+ GpuScheduler* scheduler = new GpuScheduler(command_buffer,
+ decoder,
+ parser);
+
+ return scheduler;
}
GpuScheduler::~GpuScheduler() {
@@ -82,11 +78,6 @@ bool GpuScheduler::InitializeCommon(
}
#endif
- // Do not limit to a certain number of commands before scheduling another
- // update when rendering onscreen.
- if (!surface->IsOffscreen())
- commands_per_update_ = INT_MAX;
-
// Map the ring buffer and create the parser.
Buffer ring_buffer = command_buffer_->GetRingBuffer();
if (ring_buffer.ptr) {
@@ -144,29 +135,16 @@ const unsigned int kMaxOutstandingSwapBuffersCallsPerOnscreenContext = 1;
}
#endif
-void GpuScheduler::PutChanged(bool sync) {
+void GpuScheduler::PutChanged() {
TRACE_EVENT1("gpu", "GpuScheduler:PutChanged", "this", this);
- CommandBuffer::State state = command_buffer_->GetState();
- parser_->set_put(state.put_offset);
- if (sync)
- ProcessCommands();
- else
- ScheduleProcessCommands();
-}
+ DCHECK(IsScheduled());
-void GpuScheduler::ProcessCommands() {
- TRACE_EVENT1("gpu", "GpuScheduler:ProcessCommands", "this", this);
CommandBuffer::State state = command_buffer_->GetState();
+ parser_->set_put(state.put_offset);
if (state.error != error::kNoError)
return;
- if (unscheduled_count_ > 0) {
- TRACE_EVENT1("gpu", "EarlyOut_Unscheduled",
- "unscheduled_count_", unscheduled_count_);
- return;
- }
-
if (decoder_.get()) {
if (!decoder_->MakeCurrent()) {
LOG(ERROR) << "Context lost because MakeCurrent failed.";
@@ -184,60 +162,30 @@ void GpuScheduler::ProcessCommands() {
#if defined(OS_MACOSX) || defined(TOUCH_UI)
// Don't swamp the browser process with SwapBuffers calls it can't handle.
- if (do_rate_limiting &&
- swap_buffers_count_ - acknowledged_swap_buffers_count_ >=
- kMaxOutstandingSwapBuffersCallsPerOnscreenContext) {
- TRACE_EVENT0("gpu", "EarlyOut_OSX_Throttle");
- // Stop doing work on this command buffer. In the GPU process,
- // receipt of the GpuMsg_AcceleratedSurfaceBuffersSwappedACK
- // message causes ProcessCommands to be scheduled again.
- return;
- }
+ DCHECK(!do_rate_limiting ||
+ swap_buffers_count_ - acknowledged_swap_buffers_count_ == 0);
#endif
- base::TimeTicks start_time = base::TimeTicks::Now();
- base::TimeDelta elapsed;
- bool is_break = false;
error::Error error = error::kNoError;
- do {
- int commands_processed = 0;
- while (commands_processed < commands_per_update_ &&
- !parser_->IsEmpty()) {
- error = parser_->ProcessCommand();
-
- // TODO(piman): various classes duplicate various pieces of state, leading
- // to needlessly complex update logic. It should be possible to simply
- // share the state across all of them.
- command_buffer_->SetGetOffset(static_cast<int32>(parser_->get()));
-
- if (error == error::kWaiting || error == error::kYield) {
- is_break = true;
- break;
- } else if (error::IsError(error)) {
- command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
- command_buffer_->SetParseError(error);
- return;
- }
-
- if (unscheduled_count_ > 0) {
- is_break = true;
- break;
- }
-
- ++commands_processed;
- if (command_processed_callback_.get()) {
- command_processed_callback_->Run();
- }
+ while (!parser_->IsEmpty()) {
+ error = parser_->ProcessCommand();
+
+ // TODO(piman): various classes duplicate various pieces of state, leading
+ // to needlessly complex update logic. It should be possible to simply
+ // share the state across all of them.
+ command_buffer_->SetGetOffset(static_cast<int32>(parser_->get()));
+
+ if (error::IsError(error)) {
+ command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
+ command_buffer_->SetParseError(error);
+ return;
}
- elapsed = base::TimeTicks::Now() - start_time;
- } while(!is_break &&
- !parser_->IsEmpty() &&
- elapsed.InMicroseconds() < kMinimumSchedulerQuantumMicros);
-
- if (unscheduled_count_ == 0 &&
- error != error::kWaiting &&
- !parser_->IsEmpty()) {
- ScheduleProcessCommands();
+
+ if (command_processed_callback_.get())
+ command_processed_callback_->Run();
+
+ if (unscheduled_count_ > 0)
+ return;
}
}
@@ -249,12 +197,8 @@ void GpuScheduler::SetScheduled(bool scheduled) {
--unscheduled_count_;
DCHECK_GE(unscheduled_count_, 0);
- if (unscheduled_count_ == 0) {
- if (scheduled_callback_.get())
- scheduled_callback_->Run();
-
- ScheduleProcessCommands();
- }
+ if (unscheduled_count_ == 0 && scheduled_callback_.get())
+ scheduled_callback_->Run();
} else {
++unscheduled_count_;
}
@@ -320,10 +264,18 @@ void GpuScheduler::SetTokenCallback(
set_token_callback_ = callback;
}
-void GpuScheduler::ScheduleProcessCommands() {
- MessageLoop::current()->PostTask(
- FROM_HERE,
- method_factory_.NewRunnableMethod(&GpuScheduler::ProcessCommands));
+GpuScheduler::GpuScheduler(CommandBuffer* command_buffer,
+ gles2::GLES2Decoder* decoder,
+ CommandParser* parser)
+ : command_buffer_(command_buffer),
+ decoder_(decoder),
+ parser_(parser),
+ unscheduled_count_(0),
+#if defined(OS_MACOSX) || defined(TOUCH_UI)
+ swap_buffers_count_(0),
+ acknowledged_swap_buffers_count_(0),
+#endif
+ method_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) {
}
void GpuScheduler::WillResize(gfx::Size size) {
diff --git a/gpu/command_buffer/service/gpu_scheduler.h b/gpu/command_buffer/service/gpu_scheduler.h
index d34e67f..4ebbab9 100644
--- a/gpu/command_buffer/service/gpu_scheduler.h
+++ b/gpu/command_buffer/service/gpu_scheduler.h
@@ -43,20 +43,15 @@ class ContextGroup;
// posts tasks to the current message loop to do additional work.
class GpuScheduler : public CommandBufferEngine {
public:
- // Scheduler quantum: makes ProcessCommands continue until the specified time
- // has passed, or the command buffer yields or runs out of commands.
- static const int kMinimumSchedulerQuantumMicros = 2000;
-
// If a group is not passed in one will be created.
- GpuScheduler(CommandBuffer* command_buffer,
- SurfaceManager* surface_manager,
- gles2::ContextGroup* group);
+ static GpuScheduler* Create(CommandBuffer* command_buffer,
+ SurfaceManager* surface_manager,
+ gles2::ContextGroup* group);
// This constructor is for unit tests.
- GpuScheduler(CommandBuffer* command_buffer,
- gles2::GLES2Decoder* decoder,
- CommandParser* parser,
- int commands_per_update);
+ static GpuScheduler* CreateForTests(CommandBuffer* command_buffer,
+ gles2::GLES2Decoder* decoder,
+ CommandParser* parser);
virtual ~GpuScheduler();
@@ -74,7 +69,7 @@ class GpuScheduler : public CommandBufferEngine {
bool SetParent(GpuScheduler* parent_scheduler, uint32 parent_texture_id);
- void PutChanged(bool sync);
+ void PutChanged();
// Sets whether commands should be processed by this scheduler. Setting to
// false unschedules. Setting to true reschedules. Whether or not the
@@ -152,13 +147,6 @@ class GpuScheduler : public CommandBufferEngine {
void SetCommandProcessedCallback(Callback0::Type* callback);
- // Sets a callback which is called after a Set/WaitLatch command is processed.
- // The bool parameter will be true for SetLatch, and false for a WaitLatch
- // that is blocked. An unblocked WaitLatch will not trigger a callback.
- void SetLatchCallback(const base::Callback<void(bool)>& callback) {
- decoder_->SetLatchCallback(callback);
- }
-
// Sets a callback which is called when set_token() is called, and passes the
// just-set token to the callback. DCHECKs that no callback has previously
// been registered for this notification.
@@ -179,8 +167,10 @@ class GpuScheduler : public CommandBufferEngine {
private:
- // Helper which causes a call to ProcessCommands to be scheduled later.
- void ScheduleProcessCommands();
+ // If a group is not passed in one will be created.
+ GpuScheduler(CommandBuffer* command_buffer,
+ gles2::GLES2Decoder* decoder,
+ CommandParser* parser);
// Called via a callback just before we are supposed to call the
// user's resize callback.
@@ -189,15 +179,12 @@ class GpuScheduler : public CommandBufferEngine {
// Called via a callback just before we are supposed to call the
// user's swap buffers callback.
void WillSwapBuffers();
- void ProcessCommands();
// The GpuScheduler holds a weak reference to the CommandBuffer. The
// CommandBuffer owns the GpuScheduler and holds a strong reference to it
// through the ProcessCommands callback.
CommandBuffer* command_buffer_;
- int commands_per_update_;
-
scoped_ptr<gles2::GLES2Decoder> decoder_;
scoped_ptr<CommandParser> parser_;
diff --git a/gpu/command_buffer/service/gpu_scheduler_unittest.cc b/gpu/command_buffer/service/gpu_scheduler_unittest.cc
index 3d21f90..4fb54b4 100644
--- a/gpu/command_buffer/service/gpu_scheduler_unittest.cc
+++ b/gpu/command_buffer/service/gpu_scheduler_unittest.cc
@@ -44,7 +44,7 @@ class GpuSchedulerTest : public testing::Test {
ON_CALL(*command_buffer_.get(), GetState())
.WillByDefault(Return(default_state));
- async_api_.reset(new StrictMock<SpecializedDoCommandAsyncAPIMock>);
+ async_api_.reset(new StrictMock<AsyncAPIMock>);
decoder_ = new gles2::MockGLES2Decoder();
@@ -55,10 +55,9 @@ class GpuSchedulerTest : public testing::Test {
0,
async_api_.get());
- scheduler_.reset(new GpuScheduler(command_buffer_.get(),
- decoder_,
- parser_,
- 2));
+ scheduler_.reset(gpu::GpuScheduler::CreateForTests(command_buffer_.get(),
+ decoder_,
+ parser_));
EXPECT_CALL(*decoder_, Destroy())
.Times(1)
@@ -97,7 +96,7 @@ TEST_F(GpuSchedulerTest, SchedulerDoesNothingIfRingBufferIsEmpty) {
EXPECT_CALL(*command_buffer_, SetParseError(_))
.Times(0);
- scheduler_->PutChanged(true);
+ scheduler_->PutChanged();
}
TEST_F(GpuSchedulerTest, ProcessesOneCommand) {
@@ -119,7 +118,7 @@ TEST_F(GpuSchedulerTest, ProcessesOneCommand) {
EXPECT_CALL(*command_buffer_, SetParseError(_))
.Times(0);
- scheduler_->PutChanged(true);
+ scheduler_->PutChanged();
}
TEST_F(GpuSchedulerTest, ProcessesTwoCommands) {
@@ -144,7 +143,7 @@ TEST_F(GpuSchedulerTest, ProcessesTwoCommands) {
.WillOnce(Return(error::kNoError));
EXPECT_CALL(*command_buffer_, SetGetOffset(3));
- scheduler_->PutChanged(true);
+ scheduler_->PutChanged();
}
TEST_F(GpuSchedulerTest, SchedulerSetsTheGLContext) {
@@ -157,48 +156,7 @@ TEST_F(GpuSchedulerTest, SchedulerSetsTheGLContext) {
EXPECT_CALL(*command_buffer_, GetState())
.WillRepeatedly(Return(state));
- scheduler_->PutChanged(true);
-}
-
-TEST_F(GpuSchedulerTest, PostsTaskToFinishRemainingCommands) {
- unsigned int pauseCmd = SpecializedDoCommandAsyncAPIMock::kTestQuantumCommand;
- CommandHeader* header = reinterpret_cast<CommandHeader*>(&buffer_[0]);
- header[0].command = 7;
- header[0].size = 2;
- buffer_[1] = 123;
- header[2].command = pauseCmd;
- header[2].size = 1;
- header[3].command = 9;
- header[3].size = 1;
-
- CommandBuffer::State state;
-
- state.put_offset = 4;
- EXPECT_CALL(*command_buffer_, GetState())
- .WillRepeatedly(Return(state));
-
- EXPECT_CALL(*async_api_, DoCommand(7, 1, &buffer_[0]))
- .WillOnce(Return(error::kNoError));
- EXPECT_CALL(*command_buffer_, SetGetOffset(2));
-
- EXPECT_CALL(*async_api_, DoCommand(pauseCmd, 0, &buffer_[2]))
- .WillOnce(Return(error::kNoError));
- EXPECT_CALL(*command_buffer_, SetGetOffset(3));
-
- scheduler_->PutChanged(true);
-
- // ProcessCommands is called a second time when the pending task is run.
-
- state.put_offset = 4;
- EXPECT_CALL(*command_buffer_, GetState())
- .WillRepeatedly(Return(state));
-
- EXPECT_CALL(*async_api_, DoCommand(9, 0, &buffer_[3]))
- .WillOnce(Return(error::kNoError));
-
- EXPECT_CALL(*command_buffer_, SetGetOffset(4));
-
- MessageLoop::current()->RunAllPending();
+ scheduler_->PutChanged();
}
TEST_F(GpuSchedulerTest, SetsErrorCodeOnCommandBuffer) {
@@ -222,7 +180,7 @@ TEST_F(GpuSchedulerTest, SetsErrorCodeOnCommandBuffer) {
EXPECT_CALL(*command_buffer_,
SetParseError(error::kUnknownCommand));
- scheduler_->PutChanged(true);
+ scheduler_->PutChanged();
}
TEST_F(GpuSchedulerTest, ProcessCommandsDoesNothingAfterError) {
@@ -232,7 +190,7 @@ TEST_F(GpuSchedulerTest, ProcessCommandsDoesNothingAfterError) {
EXPECT_CALL(*command_buffer_, GetState())
.WillRepeatedly(Return(state));
- scheduler_->PutChanged(true);
+ scheduler_->PutChanged();
}
TEST_F(GpuSchedulerTest, CanGetAddressOfSharedMemory) {
diff --git a/gpu/command_buffer/service/mocks.cc b/gpu/command_buffer/service/mocks.cc
index 70898b3..46a8977 100644
--- a/gpu/command_buffer/service/mocks.cc
+++ b/gpu/command_buffer/service/mocks.cc
@@ -27,25 +27,6 @@ void AsyncAPIMock::SetToken(unsigned int command,
engine_->set_token(args->token);
}
-SpecializedDoCommandAsyncAPIMock::SpecializedDoCommandAsyncAPIMock() {}
-
-SpecializedDoCommandAsyncAPIMock::~SpecializedDoCommandAsyncAPIMock() {}
-
-error::Error SpecializedDoCommandAsyncAPIMock::DoCommand(
- unsigned int command,
- unsigned int arg_count,
- const void* cmd_data) {
- if (command == kTestQuantumCommand) {
- // Surpass the GpuScheduler scheduling quantum.
- base::TimeTicks start_time = base::TimeTicks::Now();
- while ((base::TimeTicks::Now() - start_time).InMicroseconds() <
- GpuScheduler::kMinimumSchedulerQuantumMicros) {
- base::PlatformThread::Sleep(1);
- }
- }
- return AsyncAPIMock::DoCommand(command, arg_count, cmd_data);
-}
-
namespace gles2 {
MockShaderTranslator::MockShaderTranslator() {}
diff --git a/gpu/command_buffer/service/mocks.h b/gpu/command_buffer/service/mocks.h
index f526c01..0d341bd 100644
--- a/gpu/command_buffer/service/mocks.h
+++ b/gpu/command_buffer/service/mocks.h
@@ -69,20 +69,6 @@ class AsyncAPIMock : public AsyncAPIInterface {
CommandBufferEngine *engine_;
};
-// Allows specialized behavior per command in DoCommand.
-class SpecializedDoCommandAsyncAPIMock : public AsyncAPIMock {
- public:
- // Cause DoCommand to sleep more than the GpuScheduler time quantum.
- static const unsigned int kTestQuantumCommand = 333;
-
- SpecializedDoCommandAsyncAPIMock();
- virtual ~SpecializedDoCommandAsyncAPIMock();
-
- virtual error::Error DoCommand(unsigned int command,
- unsigned int arg_count,
- const void* cmd_data);
-};
-
namespace gles2 {
class MockShaderTranslator : public ShaderTranslatorInterface {