diff options
author | vmiura@chromium.org <vmiura@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2014-02-12 00:56:00 +0000 |
---|---|---|
committer | vmiura@chromium.org <vmiura@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2014-02-12 00:56:00 +0000 |
commit | 15691b4073e449b8ad6eeca889a40b157fe14e94 (patch) | |
tree | 342497925c6f34de48b5d895f5a3baec64e0fc27 /gpu | |
parent | 1d4d65dd6c49df9ffe0a43df0cdb6777029c2342 (diff) | |
download | chromium_src-15691b4073e449b8ad6eeca889a40b157fe14e94.zip chromium_src-15691b4073e449b8ad6eeca889a40b157fe14e94.tar.gz chromium_src-15691b4073e449b8ad6eeca889a40b157fe14e94.tar.bz2 |
Optimize CommandBufferHelper::GetSpace().
Previously GetSpace() did various calls:
- usable() - Check if buffer is usable.
- AllocateRingBuffer() - Allocate buffer if not already allocated.
- WaitForAvailableEntries() - Yet more checks.
This change shortcuts most calls, reducing to checks on
immediate_entry_count_ and commands_issued_ only, maintaining the old
behavior in terms of internal flushing.
immediate_entry_count_ is 0 if the buffer is unusable or unallocated,
otherwise is limited to min(immediate free space, force flush limit).
commands_issued_ is counted on non-Android platforms, and checks for
force flush condition every kCommandsPerFlushCheck commands.
BUG=340362
Review URL: https://codereview.chromium.org/141133010
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@250582 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'gpu')
-rw-r--r-- | gpu/command_buffer/client/cmd_buffer_helper.cc | 138 | ||||
-rw-r--r-- | gpu/command_buffer/client/cmd_buffer_helper.h | 58 | ||||
-rw-r--r-- | gpu/command_buffer/client/cmd_buffer_helper_test.cc | 217 |
3 files changed, 351 insertions, 62 deletions
diff --git a/gpu/command_buffer/client/cmd_buffer_helper.cc b/gpu/command_buffer/client/cmd_buffer_helper.cc index 11219ed..693063d 100644 --- a/gpu/command_buffer/client/cmd_buffer_helper.cc +++ b/gpu/command_buffer/client/cmd_buffer_helper.cc @@ -12,22 +12,19 @@ namespace gpu { -const int kCommandsPerFlushCheck = 100; - -#if !defined(OS_ANDROID) -const double kFlushDelay = 1.0 / (5.0 * 60.0); -#endif - CommandBufferHelper::CommandBufferHelper(CommandBuffer* command_buffer) : command_buffer_(command_buffer), ring_buffer_id_(-1), ring_buffer_size_(0), entries_(NULL), total_entry_count_(0), + immediate_entry_count_(0), token_(0), put_(0), last_put_sent_(0), +#if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK) commands_issued_(0), +#endif usable_(true), context_lost_(false), flush_automatically_(true), @@ -36,6 +33,7 @@ CommandBufferHelper::CommandBufferHelper(CommandBuffer* command_buffer) void CommandBufferHelper::SetAutomaticFlushes(bool enabled) { flush_automatically_ = enabled; + CalcImmediateEntries(0); } bool CommandBufferHelper::IsContextLost() { @@ -45,6 +43,47 @@ bool CommandBufferHelper::IsContextLost() { return context_lost_; } +void CommandBufferHelper::CalcImmediateEntries(int waiting_count) { + DCHECK_GE(waiting_count, 0); + + // Check if usable & allocated. + if (!usable() || !HaveRingBuffer()) { + immediate_entry_count_ = 0; + return; + } + + // Get maximum safe contiguous entries. + const int32 curr_get = get_offset(); + if (curr_get > put_) { + immediate_entry_count_ = curr_get - put_ - 1; + } else { + immediate_entry_count_ = + total_entry_count_ - put_ - (curr_get == 0 ? 1 : 0); + } + + // Limit entry count to force early flushing. + if (flush_automatically_) { + int32 limit = + total_entry_count_ / + ((curr_get == last_put_sent_) ? kAutoFlushSmall : kAutoFlushBig); + + int32 pending = + (put_ + total_entry_count_ - last_put_sent_) % total_entry_count_; + + if (pending > 0 && pending >= limit) { + // Time to force flush. + immediate_entry_count_ = 0; + } else { + // Limit remaining entries, but not lower than waiting_count entries to + // prevent deadlock when command size is greater than the flush limit. + limit -= pending; + limit = limit < waiting_count ? waiting_count : limit; + immediate_entry_count_ = + immediate_entry_count_ > limit ? limit : immediate_entry_count_; + } + } +} + bool CommandBufferHelper::AllocateRingBuffer() { if (!usable()) { return false; @@ -78,6 +117,7 @@ bool CommandBufferHelper::AllocateRingBuffer() { total_entry_count_ = num_ring_buffer_entries; put_ = state.put_offset; + CalcImmediateEntries(0); return true; } @@ -85,6 +125,7 @@ void CommandBufferHelper::FreeResources() { if (HaveRingBuffer()) { command_buffer_->DestroyTransferBuffer(ring_buffer_id_); ring_buffer_id_ = -1; + CalcImmediateEntries(0); } } @@ -107,20 +148,39 @@ bool CommandBufferHelper::FlushSync() { if (!usable()) { return false; } + + // Wrap put_ before flush. + if (put_ == total_entry_count_) + put_ = 0; + last_flush_time_ = clock(); last_put_sent_ = put_; CommandBuffer::State state = command_buffer_->FlushSync(put_, get_offset()); + CalcImmediateEntries(0); return state.error == error::kNoError; } void CommandBufferHelper::Flush() { + // Wrap put_ before flush. + if (put_ == total_entry_count_) + put_ = 0; + if (usable() && last_put_sent_ != put_) { last_flush_time_ = clock(); last_put_sent_ = put_; command_buffer_->Flush(put_); + CalcImmediateEntries(0); } } +#if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK) +void CommandBufferHelper::PeriodicFlushCheck() { + clock_t current_time = clock(); + if (current_time - last_flush_time_ > kPeriodicFlushDelay * CLOCKS_PER_SEC) + Flush(); +} +#endif + // Calls Flush() and then waits until the buffer is empty. Break early if the // error is set. bool CommandBufferHelper::Finish() { @@ -209,13 +269,15 @@ void CommandBufferHelper::WaitForAvailableEntries(int32 count) { // but we need to make sure get wraps first, actually that get is 1 or // more (since put will wrap to 0 after we add the noops). DCHECK_LE(1, put_); - if (get_offset() > put_ || get_offset() == 0) { + int32 curr_get = get_offset(); + if (curr_get > put_ || curr_get == 0) { TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries"); - while (get_offset() > put_ || get_offset() == 0) { + while (curr_get > put_ || curr_get == 0) { // Do not loop forever if the flush fails, meaning the command buffer // reader has shutdown. if (!FlushSync()) return; + curr_get = get_offset(); } } // Insert Noops to fill out the buffer. @@ -228,52 +290,26 @@ void CommandBufferHelper::WaitForAvailableEntries(int32 count) { } put_ = 0; } - if (AvailableEntries() < count) { - TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries1"); - while (AvailableEntries() < count) { - // Do not loop forever if the flush fails, meaning the command buffer - // reader has shutdown. - if (!FlushSync()) - return; - } - } - // Force a flush if the buffer is getting half full, or even earlier if the - // reader is known to be idle. - int32 pending = - (put_ + total_entry_count_ - last_put_sent_) % total_entry_count_; - int32 limit = total_entry_count_ / - ((get_offset() == last_put_sent_) ? 16 : 2); - if (pending > limit) { + + // Try to get 'count' entries without flushing. + CalcImmediateEntries(count); + if (immediate_entry_count_ < count) { + // Try again with a shallow Flush(). Flush(); - } else if (flush_automatically_ && - (commands_issued_ % kCommandsPerFlushCheck == 0)) { -#if !defined(OS_ANDROID) - // Allow this command buffer to be pre-empted by another if a "reasonable" - // amount of work has been done. On highend machines, this reduces the - // latency of GPU commands. However, on Android, this can cause the - // kernel to thrash between generating GPU commands and executing them. - clock_t current_time = clock(); - if (current_time - last_flush_time_ > kFlushDelay * CLOCKS_PER_SEC) - Flush(); -#endif + CalcImmediateEntries(count); + if (immediate_entry_count_ < count) { + // Buffer is full. Need to wait for entries. + TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries1"); + while (immediate_entry_count_ < count) { + // Do not loop forever if the flush fails, meaning the command buffer + // reader has shutdown. + if (!FlushSync()) + return; + CalcImmediateEntries(count); + } + } } } -CommandBufferEntry* CommandBufferHelper::GetSpace(uint32 entries) { - AllocateRingBuffer(); - if (!usable()) { - return NULL; - } - DCHECK(HaveRingBuffer()); - ++commands_issued_; - WaitForAvailableEntries(entries); - CommandBufferEntry* space = &entries_[put_]; - put_ += entries; - DCHECK_LE(put_, total_entry_count_); - if (put_ == total_entry_count_) { - put_ = 0; - } - return space; -} } // namespace gpu diff --git a/gpu/command_buffer/client/cmd_buffer_helper.h b/gpu/command_buffer/client/cmd_buffer_helper.h index a50dc7b..513e00f 100644 --- a/gpu/command_buffer/client/cmd_buffer_helper.h +++ b/gpu/command_buffer/client/cmd_buffer_helper.h @@ -17,6 +17,15 @@ namespace gpu { +#if !defined(OS_ANDROID) +#define CMD_HELPER_PERIODIC_FLUSH_CHECK +const int kCommandsPerFlushCheck = 100; +const float kPeriodicFlushDelay = 1.0f / (5.0f * 60.0f); +#endif + +const int kAutoFlushSmall = 16; // 1/16 of the buffer +const int kAutoFlushBig = 2; // 1/2 of the buffer + // Command buffer helper class. This class simplifies ring buffer management: // it will allocate the buffer, give it to the buffer interface, and let the // user add commands to it, while taking care of the synchronization (put and @@ -92,14 +101,43 @@ class GPU_EXPORT CommandBufferHelper { // Called prior to each command being issued. Waits for a certain amount of // space to be available. Returns address of space. - CommandBufferEntry* GetSpace(uint32 entries); + CommandBufferEntry* GetSpace(int32 entries) { +#if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK) + // Allow this command buffer to be pre-empted by another if a "reasonable" + // amount of work has been done. On highend machines, this reduces the + // latency of GPU commands. However, on Android, this can cause the + // kernel to thrash between generating GPU commands and executing them. + ++commands_issued_; + if (flush_automatically_ && + (commands_issued_ % kCommandsPerFlushCheck == 0)) { + PeriodicFlushCheck(); + } +#endif + + // Test for immediate entries. + if (entries > immediate_entry_count_) { + WaitForAvailableEntries(entries); + if (entries > immediate_entry_count_) + return NULL; + } + + DCHECK_LE(entries, immediate_entry_count_); + + // Allocate space and advance put_. + CommandBufferEntry* space = &entries_[put_]; + put_ += entries; + immediate_entry_count_ -= entries; + + DCHECK_LE(put_, total_entry_count_); + return space; + } // Typed version of GetSpace. Gets enough room for the given type and returns // a reference to it. template <typename T> T* GetCmdSpace() { COMPILE_ASSERT(T::kArgFlags == cmd::kFixed, Cmd_kArgFlags_not_kFixed); - uint32 space_needed = ComputeNumEntries(sizeof(T)); + int32 space_needed = ComputeNumEntries(sizeof(T)); void* data = GetSpace(space_needed); return reinterpret_cast<T*>(data); } @@ -108,7 +146,7 @@ class GPU_EXPORT CommandBufferHelper { template <typename T> T* GetImmediateCmdSpace(size_t data_space) { COMPILE_ASSERT(T::kArgFlags == cmd::kAtLeastN, Cmd_kArgFlags_not_kAtLeastN); - uint32 space_needed = ComputeNumEntries(sizeof(T) + data_space); + int32 space_needed = ComputeNumEntries(sizeof(T) + data_space); void* data = GetSpace(space_needed); return reinterpret_cast<T*>(data); } @@ -117,7 +155,7 @@ class GPU_EXPORT CommandBufferHelper { template <typename T> T* GetImmediateCmdSpaceTotalSize(size_t total_space) { COMPILE_ASSERT(T::kArgFlags == cmd::kAtLeastN, Cmd_kArgFlags_not_kAtLeastN); - uint32 space_needed = ComputeNumEntries(total_space); + int32 space_needed = ComputeNumEntries(total_space); void* data = GetSpace(space_needed); return reinterpret_cast<T*>(data); } @@ -230,6 +268,7 @@ class GPU_EXPORT CommandBufferHelper { void ClearUsable() { usable_ = false; + CalcImmediateEntries(0); } private: @@ -241,19 +280,30 @@ class GPU_EXPORT CommandBufferHelper { return (get_offset() - put_ - 1 + total_entry_count_) % total_entry_count_; } + void CalcImmediateEntries(int waiting_count); bool AllocateRingBuffer(); void FreeResources(); +#if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK) + // Calls Flush if automatic flush conditions are met. + void PeriodicFlushCheck(); +#endif + CommandBuffer* command_buffer_; int32 ring_buffer_id_; int32 ring_buffer_size_; Buffer ring_buffer_; CommandBufferEntry* entries_; int32 total_entry_count_; // the total number of entries + int32 immediate_entry_count_; int32 token_; int32 put_; int32 last_put_sent_; + +#if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK) int commands_issued_; +#endif + bool usable_; bool context_lost_; bool flush_automatically_; diff --git a/gpu/command_buffer/client/cmd_buffer_helper_test.cc b/gpu/command_buffer/client/cmd_buffer_helper_test.cc index 130cd64..184f8e0 100644 --- a/gpu/command_buffer/client/cmd_buffer_helper_test.cc +++ b/gpu/command_buffer/client/cmd_buffer_helper_test.cc @@ -4,8 +4,11 @@ // Tests for the Command Buffer Helper. +#include <list> + #include "base/bind.h" #include "base/bind_helpers.h" +#include "base/memory/linked_ptr.h" #include "base/message_loop/message_loop.h" #include "gpu/command_buffer/client/cmd_buffer_helper.h" #include "gpu/command_buffer/service/command_buffer_service.h" @@ -28,7 +31,7 @@ using testing::DoAll; using testing::Invoke; using testing::_; -const int32 kTotalNumCommandEntries = 10; +const int32 kTotalNumCommandEntries = 32; const int32 kCommandBufferSizeBytes = kTotalNumCommandEntries * sizeof(CommandBufferEntry); const int32 kUnusedCommandId = 5; // we use 0 and 2 currently. @@ -89,17 +92,22 @@ class CommandBufferHelperTest : public testing::Test { helper_.reset(new CommandBufferHelper(command_buffer_.get())); helper_->Initialize(kCommandBufferSizeBytes); + + test_command_next_id_ = kUnusedCommandId; } virtual void TearDown() { // If the GpuScheduler posts any tasks, this forces them to run. base::MessageLoop::current()->RunUntilIdle(); + test_command_args_.clear(); } const CommandParser* GetParser() const { return gpu_scheduler_->parser(); } + int32 ImmediateEntryCount() const { return helper_->immediate_entry_count_; } + // Adds a command to the buffer through the helper, while adding it as an // expected call on the API mock. void AddCommandWithExpect(error::Error _return, @@ -122,6 +130,25 @@ class CommandBufferHelperTest : public testing::Test { .WillOnce(Return(_return)); } + void AddUniqueCommandWithExpect(error::Error _return, int cmd_size) { + EXPECT_GE(cmd_size, 1); + EXPECT_LT(cmd_size, kTotalNumCommandEntries); + int arg_count = cmd_size - 1; + + // Allocate array for args. + linked_ptr<std::vector<CommandBufferEntry> > args_ptr( + new std::vector<CommandBufferEntry>(arg_count ? arg_count : 1)); + + for (int32 ii = 0; ii < arg_count; ++ii) { + (*args_ptr)[ii].value_uint32 = 0xF00DF00D + ii; + } + + // Add command and save args in test_command_args_ until the test completes. + AddCommandWithExpect( + _return, test_command_next_id_++, arg_count, &(*args_ptr)[0]); + test_command_args_.insert(test_command_args_.end(), args_ptr); + } + void TestCommandWrappingFull(int32 cmd_size, int32 start_commands) { const int32 num_args = cmd_size - 1; EXPECT_EQ(kTotalNumCommandEntries % cmd_size, 0); @@ -131,7 +158,7 @@ class CommandBufferHelperTest : public testing::Test { args[ii].value_uint32 = ii + 1; } - // Initially insert commands up to start_commands and Finish() + // Initially insert commands up to start_commands and Finish(). for (int32 ii = 0; ii < start_commands; ++ii) { AddCommandWithExpect( error::kNoError, ii + kUnusedCommandId, num_args, &args[0]); @@ -143,10 +170,10 @@ class CommandBufferHelperTest : public testing::Test { EXPECT_EQ(GetParser()->get(), (start_commands * cmd_size) % kTotalNumCommandEntries); - // Lock flushing to force the buffer to get full + // Lock flushing to force the buffer to get full. command_buffer_->LockFlush(); - // Add enough commands to over fill the buffer + // Add enough commands to over fill the buffer. for (int32 ii = 0; ii < kTotalNumCommandEntries / cmd_size + 2; ++ii) { AddCommandWithExpect(error::kNoError, start_commands + ii + kUnusedCommandId, @@ -154,7 +181,7 @@ class CommandBufferHelperTest : public testing::Test { &args[0]); } - // Flush all commands + // Flush all commands. command_buffer_->UnlockFlush(); helper_->Finish(); @@ -196,6 +223,10 @@ class CommandBufferHelperTest : public testing::Test { return command_buffer_->GetState().put_offset; } + int32 GetHelperGetOffset() { return helper_->get_offset(); } + + int32 GetHelperPutOffset() { return helper_->put_; } + error::Error GetError() { return command_buffer_->GetState().error; } @@ -211,9 +242,173 @@ class CommandBufferHelperTest : public testing::Test { scoped_ptr<CommandBufferServiceLocked> command_buffer_; scoped_ptr<GpuScheduler> gpu_scheduler_; scoped_ptr<CommandBufferHelper> helper_; + std::list<linked_ptr<std::vector<CommandBufferEntry> > > test_command_args_; + unsigned int test_command_next_id_; Sequence sequence_; }; +// Checks immediate_entry_count_ changes based on 'usable' state. +TEST_F(CommandBufferHelperTest, TestCalcImmediateEntriesNotUsable) { + // Auto flushing mode is tested separately. + helper_->SetAutomaticFlushes(false); + EXPECT_EQ(helper_->usable(), true); + EXPECT_EQ(ImmediateEntryCount(), kTotalNumCommandEntries - 1); + helper_->ClearUsable(); + EXPECT_EQ(ImmediateEntryCount(), 0); +} + +// Checks immediate_entry_count_ changes based on RingBuffer state. +TEST_F(CommandBufferHelperTest, TestCalcImmediateEntriesNoRingBuffer) { + helper_->SetAutomaticFlushes(false); + EXPECT_EQ(ImmediateEntryCount(), kTotalNumCommandEntries - 1); + helper_->FreeRingBuffer(); + EXPECT_EQ(ImmediateEntryCount(), 0); +} + +// Checks immediate_entry_count_ calc when Put >= Get and Get == 0. +TEST_F(CommandBufferHelperTest, TestCalcImmediateEntriesGetAtZero) { + // No internal auto flushing. + helper_->SetAutomaticFlushes(false); + command_buffer_->LockFlush(); + + // Start at Get = Put = 0. + EXPECT_EQ(GetHelperPutOffset(), 0); + EXPECT_EQ(GetHelperGetOffset(), 0); + + // Immediate count should be 1 less than the end of the buffer. + EXPECT_EQ(ImmediateEntryCount(), kTotalNumCommandEntries - 1); + AddUniqueCommandWithExpect(error::kNoError, 2); + EXPECT_EQ(ImmediateEntryCount(), kTotalNumCommandEntries - 3); + + helper_->Finish(); + + // Check that the commands did happen. + Mock::VerifyAndClearExpectations(api_mock_.get()); + + // Check the error status. + EXPECT_EQ(error::kNoError, GetError()); +} + +// Checks immediate_entry_count_ calc when Put >= Get and Get > 0. +TEST_F(CommandBufferHelperTest, TestCalcImmediateEntriesGetInMiddle) { + // No internal auto flushing. + helper_->SetAutomaticFlushes(false); + command_buffer_->LockFlush(); + + // Move to Get = Put = 2. + AddUniqueCommandWithExpect(error::kNoError, 2); + helper_->Finish(); + EXPECT_EQ(GetHelperPutOffset(), 2); + EXPECT_EQ(GetHelperGetOffset(), 2); + + // Immediate count should be up to the end of the buffer. + EXPECT_EQ(ImmediateEntryCount(), kTotalNumCommandEntries - 2); + AddUniqueCommandWithExpect(error::kNoError, 2); + EXPECT_EQ(ImmediateEntryCount(), kTotalNumCommandEntries - 4); + + helper_->Finish(); + + // Check that the commands did happen. + Mock::VerifyAndClearExpectations(api_mock_.get()); + + // Check the error status. + EXPECT_EQ(error::kNoError, GetError()); +} + +// Checks immediate_entry_count_ calc when Put < Get. +TEST_F(CommandBufferHelperTest, TestCalcImmediateEntriesGetBeforePut) { + // Move to Get = kTotalNumCommandEntries / 4, Put = 0. + const int kInitGetOffset = kTotalNumCommandEntries / 4; + helper_->SetAutomaticFlushes(false); + command_buffer_->LockFlush(); + AddUniqueCommandWithExpect(error::kNoError, kInitGetOffset); + helper_->Finish(); + AddUniqueCommandWithExpect(error::kNoError, + kTotalNumCommandEntries - kInitGetOffset); + + // Flush instead of Finish will let Put wrap without the command buffer + // immediately processing the data between Get and Put. + helper_->Flush(); + + EXPECT_EQ(GetHelperGetOffset(), kInitGetOffset); + EXPECT_EQ(GetHelperPutOffset(), 0); + + // Immediate count should be up to Get - 1. + EXPECT_EQ(ImmediateEntryCount(), kInitGetOffset - 1); + AddUniqueCommandWithExpect(error::kNoError, 2); + EXPECT_EQ(ImmediateEntryCount(), kInitGetOffset - 3); + + helper_->Finish(); + // Check that the commands did happen. + Mock::VerifyAndClearExpectations(api_mock_.get()); + + // Check the error status. + EXPECT_EQ(error::kNoError, GetError()); +} + +// Checks immediate_entry_count_ calc when automatic flushing is enabled. +TEST_F(CommandBufferHelperTest, TestCalcImmediateEntriesAutoFlushing) { + command_buffer_->LockFlush(); + + // Start at Get = Put = 0. + EXPECT_EQ(GetHelperPutOffset(), 0); + EXPECT_EQ(GetHelperGetOffset(), 0); + + // Without auto flushes, up to kTotalNumCommandEntries - 1 is available. + helper_->SetAutomaticFlushes(false); + EXPECT_EQ(ImmediateEntryCount(), kTotalNumCommandEntries - 1); + + // With auto flushes, and Get == Last Put, + // up to kTotalNumCommandEntries / kAutoFlushSmall is available. + helper_->SetAutomaticFlushes(true); + EXPECT_EQ(ImmediateEntryCount(), kTotalNumCommandEntries / kAutoFlushSmall); + + // With auto flushes, and Get != Last Put, + // up to kTotalNumCommandEntries / kAutoFlushBig is available. + AddUniqueCommandWithExpect(error::kNoError, 2); + helper_->Flush(); + EXPECT_EQ(ImmediateEntryCount(), kTotalNumCommandEntries / kAutoFlushBig); + + helper_->Finish(); + // Check that the commands did happen. + Mock::VerifyAndClearExpectations(api_mock_.get()); + + // Check the error status. + EXPECT_EQ(error::kNoError, GetError()); +} + +// Checks immediate_entry_count_ calc when automatic flushing is enabled, and +// we allocate commands over the immediate_entry_count_ size. +TEST_F(CommandBufferHelperTest, TestCalcImmediateEntriesOverFlushLimit) { + // Lock internal flushing. + command_buffer_->LockFlush(); + + // Start at Get = Put = 0. + EXPECT_EQ(GetHelperPutOffset(), 0); + EXPECT_EQ(GetHelperGetOffset(), 0); + + // Pre-check ImmediateEntryCount is limited with automatic flushing enabled. + helper_->SetAutomaticFlushes(true); + EXPECT_EQ(ImmediateEntryCount(), kTotalNumCommandEntries / kAutoFlushSmall); + + // Add a command larger than ImmediateEntryCount(). + AddUniqueCommandWithExpect(error::kNoError, ImmediateEntryCount() + 1); + + // ImmediateEntryCount() should now be 0, to force a flush check on the next + // command. + EXPECT_EQ(ImmediateEntryCount(), 0); + + // Add a command when ImmediateEntryCount() == 0. + AddUniqueCommandWithExpect(error::kNoError, ImmediateEntryCount() + 1); + + helper_->Finish(); + // Check that the commands did happen. + Mock::VerifyAndClearExpectations(api_mock_.get()); + + // Check the error status. + EXPECT_EQ(error::kNoError, GetError()); +} + // Checks that commands in the buffer are properly executed, and that the // status/error stay valid. TEST_F(CommandBufferHelperTest, TestCommandProcessing) { @@ -251,12 +446,16 @@ TEST_F(CommandBufferHelperTest, TestCommandProcessing) { // Checks that commands in the buffer are properly executed when wrapping the // buffer, and that the status/error stay valid. TEST_F(CommandBufferHelperTest, TestCommandWrapping) { - // Add 5 commands of size 3 through the helper to make sure we do wrap. + // Add num_commands * commands of size 3 through the helper to make sure we + // do wrap. kTotalNumCommandEntries must not be a multiple of 3. + COMPILE_ASSERT(kTotalNumCommandEntries % 3 != 0, + Is_multiple_of_num_command_entries); + const int kNumCommands = (kTotalNumCommandEntries / 3) * 2; CommandBufferEntry args1[2]; args1[0].value_uint32 = 5; args1[1].value_float = 4.f; - for (unsigned int i = 0; i < 5; ++i) { + for (int i = 0; i < kNumCommands; ++i) { AddCommandWithExpect(error::kNoError, kUnusedCommandId + i, 2, args1); } @@ -293,14 +492,18 @@ TEST_F(CommandBufferHelperTest, TestCommandWrappingExactMultiple) { EXPECT_EQ(error::kNoError, GetError()); } +// Checks exact wrapping condition with Get = 0. TEST_F(CommandBufferHelperTest, TestCommandWrappingFullAtStart) { TestCommandWrappingFull(2, 0); } +// Checks exact wrapping condition with 0 < Get < kTotalNumCommandEntries. TEST_F(CommandBufferHelperTest, TestCommandWrappingFullInMiddle) { TestCommandWrappingFull(2, 1); } +// Checks exact wrapping condition with Get = kTotalNumCommandEntries. +// Get should wrap back to 0, but making sure. TEST_F(CommandBufferHelperTest, TestCommandWrappingFullAtEnd) { TestCommandWrappingFull(2, kTotalNumCommandEntries / 2); } |