summaryrefslogtreecommitdiffstats
path: root/o3d/command_buffer/client
diff options
context:
space:
mode:
Diffstat (limited to 'o3d/command_buffer/client')
-rw-r--r--o3d/command_buffer/client/cmd_buffer_helper.cc197
-rw-r--r--o3d/command_buffer/client/cmd_buffer_helper.h212
-rw-r--r--o3d/command_buffer/client/cmd_buffer_helper_test.cc298
-rw-r--r--o3d/command_buffer/client/effect_helper.cc250
-rw-r--r--o3d/command_buffer/client/effect_helper.h156
-rw-r--r--o3d/command_buffer/client/fenced_allocator.cc214
-rw-r--r--o3d/command_buffer/client/fenced_allocator.h266
-rw-r--r--o3d/command_buffer/client/fenced_allocator_test.cc496
-rw-r--r--o3d/command_buffer/client/id_allocator.cc85
-rw-r--r--o3d/command_buffer/client/id_allocator.h78
-rw-r--r--o3d/command_buffer/client/id_allocator_test.cc112
-rw-r--r--o3d/command_buffer/client/o3d_cmd_helper.cc42
-rw-r--r--o3d/command_buffer/client/o3d_cmd_helper.h636
13 files changed, 3042 insertions, 0 deletions
diff --git a/o3d/command_buffer/client/cmd_buffer_helper.cc b/o3d/command_buffer/client/cmd_buffer_helper.cc
new file mode 100644
index 0000000..4dd2ece
--- /dev/null
+++ b/o3d/command_buffer/client/cmd_buffer_helper.cc
@@ -0,0 +1,197 @@
+/*
+ * Copyright 2009, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+// This file contains the implementation of the command buffer helper class.
+
+#include "command_buffer/client/cmd_buffer_helper.h"
+#include "o3d/gpu_plugin/np_utils/np_utils.h"
+
+namespace command_buffer {
+
+using gpu_plugin::CommandBuffer;
+using gpu_plugin::NPBrowser;
+using gpu_plugin::NPInvoke;
+using gpu_plugin::NPObjectPointer;
+
+CommandBufferHelper::CommandBufferHelper(
+ NPP npp,
+ const NPObjectPointer<CommandBuffer>& command_buffer)
+ : npp_(npp),
+ command_buffer_(command_buffer),
+ entries_(NULL),
+ entry_count_(0),
+ token_(0),
+ last_token_read_(-1),
+ get_(0),
+ put_(0) {
+}
+
+bool CommandBufferHelper::Initialize() {
+ ring_buffer_ = command_buffer_->GetRingBuffer();
+ if (!ring_buffer_)
+ return false;
+
+ // Map the ring buffer into this process.
+ if (!ring_buffer_->Map(ring_buffer_->max_size()))
+ return false;
+
+ entries_ = static_cast<CommandBufferEntry*>(ring_buffer_->memory());
+ entry_count_ = command_buffer_->GetSize();
+ get_ = command_buffer_->GetGetOffset();
+ put_ = command_buffer_->GetPutOffset();
+ last_token_read_ = command_buffer_->GetToken();
+
+ return true;
+}
+
+CommandBufferHelper::~CommandBufferHelper() {
+}
+
+bool CommandBufferHelper::Flush() {
+ get_ = command_buffer_->SyncOffsets(put_);
+ return !command_buffer_->GetErrorStatus();
+}
+
+// Calls Flush() and then waits until the buffer is empty. Break early if the
+// error is set.
+bool CommandBufferHelper::Finish() {
+ do {
+ // Do not loop forever if the flush fails, meaning the command buffer reader
+ // has shutdown).
+ if (!Flush())
+ return false;
+ } while (put_ != get_);
+
+ return true;
+}
+
+// Inserts a new token into the command stream. It uses an increasing value
+// scheme so that we don't lose tokens (a token has passed if the current token
+// value is higher than that token). Calls Finish() if the token value wraps,
+// which will be rare.
+int32 CommandBufferHelper::InsertToken() {
+ // Increment token as 31-bit integer. Negative values are used to signal an
+ // error.
+ token_ = (token_ + 1) & 0x7FFFFFFF;
+ CommandBufferEntry args;
+ args.value_uint32 = token_;
+ const uint32 kSetToken = 1; // TODO(gman): add a common set of commands.
+ AddCommand(kSetToken, 1, &args);
+ if (token_ == 0) {
+ // we wrapped
+ Finish();
+ last_token_read_ = command_buffer_->GetToken();
+ DCHECK_EQ(token_, last_token_read_);
+ }
+ return token_;
+}
+
+// Waits until the current token value is greater or equal to the value passed
+// in argument.
+void CommandBufferHelper::WaitForToken(int32 token) {
+ // Return immediately if corresponding InsertToken failed.
+ if (token < 0)
+ return;
+ if (last_token_read_ >= token) return; // fast path.
+ if (token > token_) return; // we wrapped
+ Flush();
+ last_token_read_ = command_buffer_->GetToken();
+ while (last_token_read_ < token) {
+ if (get_ == put_) {
+ LOG(FATAL) << "Empty command buffer while waiting on a token.";
+ return;
+ }
+ // Do not loop forever if the flush fails, meaning the command buffer reader
+ // has shutdown.
+ if (!Flush())
+ return;
+ last_token_read_ = command_buffer_->GetToken();
+ }
+}
+
+// Waits for available entries, basically waiting until get >= put + count + 1.
+// It actually waits for contiguous entries, so it may need to wrap the buffer
+// around, adding noops. Thus this function may change the value of put_.
+// The function will return early if an error occurs, in which case the
+// available space may not be available.
+void CommandBufferHelper::WaitForAvailableEntries(int32 count) {
+ CHECK(count < entry_count_);
+ if (put_ + count > entry_count_) {
+ // There's not enough room between the current put and the end of the
+ // buffer, so we need to wrap. We will add noops all the way to the end,
+ // but we need to make sure get wraps first, actually that get is 1 or
+ // more (since put will wrap to 0 after we add the noops).
+ DCHECK_LE(1, put_);
+ Flush();
+ while (get_ > put_ || get_ == 0) {
+ // Do not loop forever if the flush fails, meaning the command buffer
+ // reader has shutdown.
+ if (!Flush())
+ return;
+ }
+ // Add the noops. By convention, a noop is a command 0 with no args.
+ // TODO(apatrick): A noop can have a size. It would be better to add a
+ // single noop with a variable size. Watch out for size limit on
+ // individual commands.
+ CommandHeader header;
+ header.size = 1;
+ header.command = 0;
+ while (put_ < entry_count_) {
+ entries_[put_++].value_header = header;
+ }
+ put_ = 0;
+ }
+ // If we have enough room, return immediatly.
+ if (count <= AvailableEntries()) return;
+ // Otherwise flush, and wait until we do have enough room.
+ Flush();
+ while (AvailableEntries() < count) {
+ // Do not loop forever if the flush fails, meaning the command buffer reader
+ // has shutdown.
+ if (!Flush())
+ return;
+ }
+}
+
+CommandBufferEntry* CommandBufferHelper::GetSpace(uint32 entries) {
+ WaitForAvailableEntries(entries);
+ CommandBufferEntry* space = &entries_[put_];
+ put_ += entries;
+ return space;
+}
+
+parse_error::ParseError CommandBufferHelper::GetParseError() {
+ int32 parse_error = command_buffer_->ResetParseError();
+ return static_cast<parse_error::ParseError>(parse_error);
+}
+
+} // namespace command_buffer
diff --git a/o3d/command_buffer/client/cmd_buffer_helper.h b/o3d/command_buffer/client/cmd_buffer_helper.h
new file mode 100644
index 0000000..e17e234
--- /dev/null
+++ b/o3d/command_buffer/client/cmd_buffer_helper.h
@@ -0,0 +1,212 @@
+/*
+ * Copyright 2009, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+// This file contains the command buffer helper class.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_CROSS_CMD_BUFFER_HELPER_H_
+#define GPU_COMMAND_BUFFER_CLIENT_CROSS_CMD_BUFFER_HELPER_H_
+
+#include "command_buffer/common/logging.h"
+#include "command_buffer/common/constants.h"
+#include "command_buffer/common/cmd_buffer_common.h"
+#include "o3d/gpu_plugin/command_buffer.h"
+#include "o3d/gpu_plugin/np_utils/np_object_pointer.h"
+
+namespace command_buffer {
+
+// Command buffer helper class. This class simplifies ring buffer management:
+// it will allocate the buffer, give it to the buffer interface, and let the
+// user add commands to it, while taking care of the synchronization (put and
+// get). It also provides a way to ensure commands have been executed, through
+// the token mechanism:
+//
+// helper.AddCommand(...);
+// helper.AddCommand(...);
+// int32 token = helper.InsertToken();
+// helper.AddCommand(...);
+// helper.AddCommand(...);
+// [...]
+//
+// helper.WaitForToken(token); // this doesn't return until the first two
+// // commands have been executed.
+class CommandBufferHelper {
+ public:
+ CommandBufferHelper(
+ NPP npp,
+ const gpu_plugin::NPObjectPointer<gpu_plugin::CommandBuffer>&
+ command_buffer);
+ virtual ~CommandBufferHelper();
+
+ bool Initialize();
+
+ // Flushes the commands, setting the put pointer to let the buffer interface
+ // know that new commands have been added. After a flush returns, the command
+ // buffer service is aware of all pending commands and it is guaranteed to
+ // have made some progress in processing them. Returns whether the flush was
+ // successful. The flush will fail if the command buffer service has
+ // disconnected.
+ bool Flush();
+
+ // Waits until all the commands have been executed. Returns whether it
+ // was successful. The function will fail if the command buffer service has
+ // disconnected.
+ bool Finish();
+
+ // Waits until a given number of available entries are available.
+ // Parameters:
+ // count: number of entries needed. This value must be at most
+ // the size of the buffer minus one.
+ void WaitForAvailableEntries(int32 count);
+
+ // Adds a command data to the command buffer. This may wait until sufficient
+ // space is available.
+ // Parameters:
+ // entries: The command entries to add.
+ // count: The number of entries.
+ void AddCommandData(const CommandBufferEntry* entries, int32 count) {
+ WaitForAvailableEntries(count);
+ for (; count > 0; --count) {
+ entries_[put_++] = *entries++;
+ }
+ DCHECK_LE(put_, entry_count_);
+ if (put_ == entry_count_) put_ = 0;
+ }
+
+ // A typed version of AddCommandData.
+ template <typename T>
+ void AddTypedCmdData(const T& cmd) {
+ AddCommandData(reinterpret_cast<const CommandBufferEntry*>(&cmd),
+ ComputeNumEntries(sizeof(cmd)));
+ }
+
+ // Adds a command to the command buffer. This may wait until sufficient space
+ // is available.
+ // Parameters:
+ // command: the command index.
+ // arg_count: the number of arguments for the command.
+ // args: the arguments for the command (these are copied before the
+ // function returns).
+ void AddCommand(int32 command,
+ int32 arg_count,
+ const CommandBufferEntry *args) {
+ CommandHeader header;
+ header.size = arg_count + 1;
+ header.command = command;
+ WaitForAvailableEntries(header.size);
+ entries_[put_++].value_header = header;
+ for (int i = 0; i < arg_count; ++i) {
+ entries_[put_++] = args[i];
+ }
+ DCHECK_LE(put_, entry_count_);
+ if (put_ == entry_count_) put_ = 0;
+ }
+
+ // Inserts a new token into the command buffer. This token either has a value
+ // different from previously inserted tokens, or ensures that previously
+ // inserted tokens with that value have already passed through the command
+ // stream.
+ // Returns:
+ // the value of the new token or -1 if the command buffer reader has
+ // shutdown.
+ int32 InsertToken();
+
+ // Waits until the token of a particular value has passed through the command
+ // stream (i.e. commands inserted before that token have been executed).
+ // NOTE: This will call Flush if it needs to block.
+ // Parameters:
+ // the value of the token to wait for.
+ void WaitForToken(int32 token);
+
+ // Waits for a certain amount of space to be available. Returns address
+ // of space.
+ CommandBufferEntry* GetSpace(uint32 entries);
+
+ // Typed version of GetSpace. Gets enough room for the given type and returns
+ // a reference to it.
+ template <typename T>
+ T& GetCmdSpace() {
+ COMPILE_ASSERT(T::kArgFlags == cmd::kFixed, Cmd_kArgFlags_not_kFixed);
+ uint32 space_needed = ComputeNumEntries(sizeof(T));
+ void* data = GetSpace(space_needed);
+ return *reinterpret_cast<T*>(data);
+ }
+
+ // Typed version of GetSpace for immediate commands.
+ template <typename T>
+ T& GetImmediateCmdSpace(size_t space) {
+ COMPILE_ASSERT(T::kArgFlags == cmd::kAtLeastN, Cmd_kArgFlags_not_kAtLeastN);
+ uint32 space_needed = ComputeNumEntries(sizeof(T) + space);
+ void* data = GetSpace(space_needed);
+ return *reinterpret_cast<T*>(data);
+ }
+
+ parse_error::ParseError GetParseError();
+
+ // Common Commands
+ void Noop(uint32 skip_count) {
+ cmd::Noop& cmd = GetImmediateCmdSpace<cmd::Noop>(
+ skip_count * sizeof(CommandBufferEntry));
+ cmd.Init(skip_count);
+ }
+
+ void SetToken(uint32 token) {
+ cmd::SetToken& cmd = GetCmdSpace<cmd::SetToken>();
+ cmd.Init(token);
+ }
+
+
+ private:
+ // Waits until get changes, updating the value of get_.
+ void WaitForGetChange();
+
+ // Returns the number of available entries (they may not be contiguous).
+ int32 AvailableEntries() {
+ return (get_ - put_ - 1 + entry_count_) % entry_count_;
+ }
+
+ NPP npp_;
+ gpu_plugin::NPObjectPointer<gpu_plugin::CommandBuffer> command_buffer_;
+ ::base::SharedMemory* ring_buffer_;
+ CommandBufferEntry *entries_;
+ int32 entry_count_;
+ int32 token_;
+ int32 last_token_read_;
+ int32 get_;
+ int32 put_;
+
+ friend class CommandBufferHelperTest;
+ DISALLOW_COPY_AND_ASSIGN(CommandBufferHelper);
+};
+
+} // namespace command_buffer
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_CROSS_CMD_BUFFER_HELPER_H_
diff --git a/o3d/command_buffer/client/cmd_buffer_helper_test.cc b/o3d/command_buffer/client/cmd_buffer_helper_test.cc
new file mode 100644
index 0000000..b7a4f85
--- /dev/null
+++ b/o3d/command_buffer/client/cmd_buffer_helper_test.cc
@@ -0,0 +1,298 @@
+/*
+ * Copyright 2009, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+// Tests for the Command Buffer Helper.
+
+#include "tests/common/win/testing_common.h"
+#include "base/message_loop.h"
+#include "command_buffer/client/cmd_buffer_helper.h"
+#include "command_buffer/service/mocks.h"
+#include "gpu_plugin/command_buffer.h"
+#include "gpu_plugin/gpu_processor.h"
+#include "gpu_plugin/np_utils/np_object_pointer.h"
+
+namespace command_buffer {
+
+using gpu_plugin::CommandBuffer;
+using gpu_plugin::GPUProcessor;
+using gpu_plugin::NPCreateObject;
+using gpu_plugin::NPObjectPointer;
+using testing::Return;
+using testing::Mock;
+using testing::Truly;
+using testing::Sequence;
+using testing::DoAll;
+using testing::Invoke;
+using testing::_;
+
+const int32 kNumCommandEntries = 10;
+const int32 kCommandBufferSizeBytes = kNumCommandEntries * sizeof(int32);
+
+// Test fixture for CommandBufferHelper test - Creates a CommandBufferHelper,
+// using a CommandBufferEngine with a mock AsyncAPIInterface for its interface
+// (calling it directly, not through the RPC mechanism).
+class CommandBufferHelperTest : public testing::Test {
+ protected:
+ virtual void SetUp() {
+ api_mock_.reset(new AsyncAPIMock);
+ // ignore noops in the mock - we don't want to inspect the internals of the
+ // helper.
+ EXPECT_CALL(*api_mock_, DoCommand(0, 0, _))
+ .WillRepeatedly(Return(parse_error::kParseNoError));
+
+ ::base::SharedMemory* ring_buffer = new ::base::SharedMemory;
+ ring_buffer->Create(std::wstring(), false, false, kCommandBufferSizeBytes);
+ ring_buffer->Map(1024);
+
+ command_buffer_ = NPCreateObject<CommandBuffer>(NULL);
+ command_buffer_->Initialize(ring_buffer);
+
+ parser_ = new command_buffer::CommandParser(ring_buffer->memory(),
+ kCommandBufferSizeBytes,
+ 0,
+ kCommandBufferSizeBytes,
+ 0,
+ api_mock_.get());
+
+ scoped_refptr<GPUProcessor> gpu_processor(new GPUProcessor(
+ NULL, command_buffer_.Get(), NULL, NULL, parser_, 1));
+ command_buffer_->SetPutOffsetChangeCallback(NewCallback(
+ gpu_processor.get(), &GPUProcessor::ProcessCommands));
+
+ api_mock_->set_engine(gpu_processor.get());
+
+ helper_.reset(new CommandBufferHelper(NULL, command_buffer_));
+ helper_->Initialize();
+ }
+
+ virtual void TearDown() {
+ // If the GPUProcessor posts any tasks, this forces them to run.
+ MessageLoop::current()->RunAllPending();
+ helper_.release();
+ }
+
+ // Adds a command to the buffer through the helper, while adding it as an
+ // expected call on the API mock.
+ void AddCommandWithExpect(parse_error::ParseError _return,
+ unsigned int command,
+ unsigned int arg_count,
+ CommandBufferEntry *args) {
+ helper_->AddCommand(command, arg_count, args);
+ EXPECT_CALL(*api_mock_, DoCommand(command, arg_count,
+ Truly(AsyncAPIMock::IsArgs(arg_count, args))))
+ .InSequence(sequence_)
+ .WillOnce(Return(_return));
+ }
+
+ // Checks that the buffer from put to put+size is free in the parser.
+ void CheckFreeSpace(CommandBufferOffset put, unsigned int size) {
+ CommandBufferOffset parser_put = parser_->put();
+ CommandBufferOffset parser_get = parser_->get();
+ CommandBufferOffset limit = put + size;
+ if (parser_get > parser_put) {
+ // "busy" buffer wraps, so "free" buffer is between put (inclusive) and
+ // get (exclusive).
+ EXPECT_LE(parser_put, put);
+ EXPECT_GT(parser_get, limit);
+ } else {
+ // "busy" buffer does not wrap, so the "free" buffer is the top side (from
+ // put to the limit) and the bottom side (from 0 to get).
+ if (put >= parser_put) {
+ // we're on the top side, check we are below the limit.
+ EXPECT_GE(kNumCommandEntries, limit);
+ } else {
+ // we're on the bottom side, check we are below get.
+ EXPECT_GT(parser_get, limit);
+ }
+ }
+ }
+
+ CommandBufferOffset get_helper_put() { return helper_->put_; }
+
+ scoped_ptr<AsyncAPIMock> api_mock_;
+ NPObjectPointer<CommandBuffer> command_buffer_;
+ command_buffer::CommandParser* parser_;
+ scoped_ptr<CommandBufferHelper> helper_;
+ Sequence sequence_;
+};
+
+// Checks that commands in the buffer are properly executed, and that the
+// status/error stay valid.
+TEST_F(CommandBufferHelperTest, TestCommandProcessing) {
+ // Check initial state of the engine - it should have been configured by the
+ // helper.
+ EXPECT_TRUE(parser_ != NULL);
+ EXPECT_FALSE(command_buffer_->GetErrorStatus());
+ EXPECT_EQ(parse_error::kParseNoError, command_buffer_->ResetParseError());
+ EXPECT_EQ(0u, command_buffer_->GetGetOffset());
+
+ // Add 3 commands through the helper
+ AddCommandWithExpect(parse_error::kParseNoError, 1, 0, NULL);
+
+ CommandBufferEntry args1[2];
+ args1[0].value_uint32 = 3;
+ args1[1].value_float = 4.f;
+ AddCommandWithExpect(parse_error::kParseNoError, 2, 2, args1);
+
+ CommandBufferEntry args2[2];
+ args2[0].value_uint32 = 5;
+ args2[1].value_float = 6.f;
+ AddCommandWithExpect(parse_error::kParseNoError, 3, 2, args2);
+
+ helper_->Flush();
+ // Check that the engine has work to do now.
+ EXPECT_FALSE(parser_->IsEmpty());
+
+ // Wait until it's done.
+ helper_->Finish();
+ // Check that the engine has no more work to do.
+ EXPECT_TRUE(parser_->IsEmpty());
+
+ // Check that the commands did happen.
+ Mock::VerifyAndClearExpectations(api_mock_.get());
+
+ // Check the error status.
+ EXPECT_FALSE(command_buffer_->GetErrorStatus());
+ EXPECT_EQ(parse_error::kParseNoError, command_buffer_->ResetParseError());
+}
+
+// Checks that commands in the buffer are properly executed when wrapping the
+// buffer, and that the status/error stay valid.
+TEST_F(CommandBufferHelperTest, TestCommandWrapping) {
+ // Add 5 commands of size 3 through the helper to make sure we do wrap.
+ CommandBufferEntry args1[2];
+ args1[0].value_uint32 = 3;
+ args1[1].value_float = 4.f;
+
+ for (unsigned int i = 0; i < 5; ++i) {
+ AddCommandWithExpect(parse_error::kParseNoError, i + 1, 2, args1);
+ }
+
+ helper_->Finish();
+ // Check that the commands did happen.
+ Mock::VerifyAndClearExpectations(api_mock_.get());
+
+ // Check the error status.
+ EXPECT_FALSE(command_buffer_->GetErrorStatus());
+ EXPECT_EQ(parse_error::kParseNoError, command_buffer_->ResetParseError());
+}
+
+
+// Checks that commands in the buffer are properly executed, even if they
+// generate a recoverable error. Check that the error status is properly set,
+// and reset when queried.
+TEST_F(CommandBufferHelperTest, TestRecoverableError) {
+ CommandBufferEntry args[2];
+ args[0].value_uint32 = 3;
+ args[1].value_float = 4.f;
+
+ // Create a command buffer with 3 commands, 2 of them generating errors
+ AddCommandWithExpect(parse_error::kParseNoError, 1, 2, args);
+ AddCommandWithExpect(parse_error::kParseUnknownCommand, 2, 2, args);
+ AddCommandWithExpect(parse_error::kParseInvalidArguments, 3, 2,
+ args);
+
+ helper_->Finish();
+ // Check that the commands did happen.
+ Mock::VerifyAndClearExpectations(api_mock_.get());
+
+ // Check that the error status was set to the first error.
+ EXPECT_EQ(parse_error::kParseUnknownCommand,
+ command_buffer_->ResetParseError());
+ // Check that the error status was reset after the query.
+ EXPECT_EQ(parse_error::kParseNoError, command_buffer_->ResetParseError());
+}
+
+// Checks that asking for available entries work, and that the parser
+// effectively won't use that space.
+TEST_F(CommandBufferHelperTest, TestAvailableEntries) {
+ CommandBufferEntry args[2];
+ args[0].value_uint32 = 3;
+ args[1].value_float = 4.f;
+
+ // Add 2 commands through the helper - 8 entries
+ AddCommandWithExpect(parse_error::kParseNoError, 1, 0, NULL);
+ AddCommandWithExpect(parse_error::kParseNoError, 2, 0, NULL);
+ AddCommandWithExpect(parse_error::kParseNoError, 3, 2, args);
+ AddCommandWithExpect(parse_error::kParseNoError, 4, 2, args);
+
+ // Ask for 5 entries.
+ helper_->WaitForAvailableEntries(5);
+
+ CommandBufferOffset put = get_helper_put();
+ CheckFreeSpace(put, 5);
+
+ // Add more commands.
+ AddCommandWithExpect(parse_error::kParseNoError, 5, 2, args);
+
+ // Wait until everything is done done.
+ helper_->Finish();
+
+ // Check that the commands did happen.
+ Mock::VerifyAndClearExpectations(api_mock_.get());
+
+ // Check the error status.
+ EXPECT_FALSE(command_buffer_->GetErrorStatus());
+ EXPECT_EQ(parse_error::kParseNoError, command_buffer_->ResetParseError());
+}
+
+// Checks that the InsertToken/WaitForToken work.
+TEST_F(CommandBufferHelperTest, TestToken) {
+ CommandBufferEntry args[2];
+ args[0].value_uint32 = 3;
+ args[1].value_float = 4.f;
+
+ // Add a first command.
+ AddCommandWithExpect(parse_error::kParseNoError, 3, 2, args);
+ // keep track of the buffer position.
+ CommandBufferOffset command1_put = get_helper_put();
+ int32 token = helper_->InsertToken();
+
+ EXPECT_CALL(*api_mock_.get(), DoCommand(cmd::kSetToken, 1, _))
+ .WillOnce(DoAll(Invoke(api_mock_.get(), &AsyncAPIMock::SetToken),
+ Return(parse_error::kParseNoError)));
+ // Add another command.
+ AddCommandWithExpect(parse_error::kParseNoError, 4, 2, args);
+ helper_->WaitForToken(token);
+ // check that the get pointer is beyond the first command.
+ EXPECT_LE(command1_put, command_buffer_->GetGetOffset());
+ helper_->Finish();
+
+ // Check that the commands did happen.
+ Mock::VerifyAndClearExpectations(api_mock_.get());
+
+ // Check the error status.
+ EXPECT_FALSE(command_buffer_->GetErrorStatus());
+ EXPECT_EQ(parse_error::kParseNoError, command_buffer_->ResetParseError());
+}
+
+} // namespace command_buffer
diff --git a/o3d/command_buffer/client/effect_helper.cc b/o3d/command_buffer/client/effect_helper.cc
new file mode 100644
index 0000000..eb7faf9
--- /dev/null
+++ b/o3d/command_buffer/client/effect_helper.cc
@@ -0,0 +1,250 @@
+/*
+ * Copyright 2009, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+// This file implements the EffectHelper class.
+
+#include "command_buffer/common/o3d_cmd_format.h"
+#include "command_buffer/client/cmd_buffer_helper.h"
+#include "command_buffer/client/effect_helper.h"
+#include "command_buffer/client/fenced_allocator.h"
+#include "command_buffer/client/id_allocator.h"
+
+// TODO: write a unit test.
+
+namespace command_buffer {
+
+bool EffectHelper::CreateEffectParameters(ResourceId effect_id,
+ std::vector<EffectParamDesc> *descs) {
+ using effect_param::Desc;
+ DCHECK_NE(effect_id, kInvalidResource);
+ DCHECK(descs);
+ descs->clear();
+
+ // Get the param count.
+ Uint32 *retval = shm_allocator_->AllocTyped<Uint32>(1);
+ helper_->GetParamCount(effect_id, sizeof(*retval),
+ shm_id_, shm_allocator_->GetOffset(retval));
+ // Finish has to be called to get the result.
+ helper_->Finish();
+
+ // We could have failed if the effect_id is invalid.
+ if (helper_->GetParseError() != parse_error::kParseNoError) {
+ shm_allocator_->Free(retval);
+ return false;
+ }
+ unsigned int param_count = *retval;
+
+ shm_allocator_->Free(retval);
+ unsigned int max_buffer_size = shm_allocator_->GetLargestFreeOrPendingSize();
+ if (max_buffer_size < sizeof(Desc)) { // NOLINT
+ // Not enough memory to get at least 1 param desc.
+ return false;
+ }
+ descs->resize(param_count);
+ for (unsigned int i = 0; i < param_count; ++i) {
+ EffectParamDesc *desc = &((*descs)[i]);
+ desc->id = param_id_allocator_->AllocateID();
+ helper_->CreateParam(desc->id, effect_id, i);
+ }
+
+ // Read param descriptions in batches. We use as much shared memory as
+ // possible so that we only call Finish as little as possible.
+ unsigned int max_param_per_batch =
+ std::min(static_cast<unsigned>(param_count),
+ static_cast<unsigned>(max_buffer_size / sizeof(Desc))); // NOLINT
+ Desc *raw_descs = shm_allocator_->AllocTyped<Desc>(max_param_per_batch);
+ DCHECK(raw_descs);
+ for (unsigned int i = 0; i < param_count; i += max_param_per_batch) {
+ unsigned int count = std::min(param_count - i, max_param_per_batch);
+ for (unsigned int j = 0 ; j < count; ++j) {
+ EffectParamDesc *desc = &((*descs)[i + j]);
+ Desc *raw_desc = raw_descs + j;
+ helper_->GetParamDesc(desc->id, sizeof(*raw_desc),
+ shm_id_,
+ shm_allocator_->GetOffset(raw_desc));
+ }
+ // Finish to get the results.
+ helper_->Finish();
+ DCHECK_EQ(helper_->GetParseError(), parse_error::kParseNoError);
+ for (unsigned int j = 0 ; j < count; ++j) {
+ EffectParamDesc *desc = &((*descs)[i + j]);
+ Desc *raw_desc = raw_descs + j;
+ desc->data_type = raw_desc->data_type;
+ desc->data_size = raw_desc->data_size;
+ desc->num_elements = raw_desc->num_elements;
+ desc->cmd_desc_size = raw_desc->size;
+ }
+ }
+ shm_allocator_->Free(raw_descs);
+ return true;
+}
+
+bool EffectHelper::GetParamStrings(EffectParamDesc *desc) {
+ using effect_param::Desc;
+ DCHECK(desc);
+ DCHECK_NE(desc->id, kInvalidResource);
+ // desc may not have come directly from CreateEffectParameters, so it may be
+ // less than the minimum required size.
+ unsigned int size = std::max(static_cast<unsigned>(desc->cmd_desc_size),
+ static_cast<unsigned>(sizeof(Desc))); // NOLINT
+ Desc *raw_desc = static_cast<Desc *>(shm_allocator_->Alloc(size));
+ if (!raw_desc) {
+ // Not enough memory to get the param desc.
+ return false;
+ }
+ helper_->GetParamDesc(desc->id, size,
+ shm_id_,
+ shm_allocator_->GetOffset(raw_desc));
+
+ // Finish to get the results.
+ helper_->Finish();
+
+ // We could have failed if the param ID is invalid.
+ if (helper_->GetParseError() != parse_error::kParseNoError) {
+ shm_allocator_->Free(raw_desc);
+ return false;
+ }
+
+ if (raw_desc->size > size) {
+ // We had not allocated enough memory the first time (e.g. if the
+ // EffectParamDesc didn't come from CreateEffectParameters, so the user had
+ // no way of knowing what size was needed for the strings), so re-allocate
+ // and try again.
+ size = raw_desc->size;
+ desc->cmd_desc_size = size;
+ shm_allocator_->Free(raw_desc);
+ raw_desc = static_cast<Desc *>(shm_allocator_->Alloc(size));
+ if (!raw_desc) {
+ // Not enough memory to get the param desc.
+ return false;
+ }
+ helper_->GetParamDesc(desc->id, size,
+ shm_id_,
+ shm_allocator_->GetOffset(raw_desc));
+ // Finish to get the results.
+ helper_->Finish();
+ DCHECK_EQ(helper_->GetParseError(), parse_error::kParseNoError);
+ DCHECK_EQ(raw_desc->size, size);
+ }
+
+ const char *raw_desc_string = reinterpret_cast<char *>(raw_desc);
+ if (raw_desc->name_offset) {
+ DCHECK_LE(raw_desc->name_offset + raw_desc->name_size, raw_desc->size);
+ DCHECK_GT(raw_desc->name_size, 0U);
+ DCHECK_EQ(raw_desc_string[raw_desc->name_offset + raw_desc->name_size - 1],
+ 0);
+ desc->name = String(raw_desc_string + raw_desc->name_offset,
+ raw_desc->name_size - 1);
+ } else {
+ desc->name.clear();
+ }
+ if (raw_desc->semantic_offset) {
+ DCHECK_LE(raw_desc->semantic_offset + raw_desc->semantic_size,
+ raw_desc->size);
+ DCHECK_GT(raw_desc->semantic_size, 0U);
+ DCHECK_EQ(raw_desc_string[raw_desc->semantic_offset +
+ raw_desc->semantic_size - 1],
+ 0);
+ desc->semantic = String(raw_desc_string + raw_desc->semantic_offset,
+ raw_desc->semantic_size - 1);
+ } else {
+ desc->semantic.clear();
+ }
+ shm_allocator_->Free(raw_desc);
+ return true;
+}
+
+void EffectHelper::DestroyEffectParameters(
+ const std::vector<EffectParamDesc> &descs) {
+ for (unsigned int i = 0; i < descs.size(); ++i) {
+ const EffectParamDesc &desc = descs[i];
+ helper_->DestroyParam(desc.id);
+ param_id_allocator_->FreeID(desc.id);
+ }
+}
+
+bool EffectHelper::GetEffectStreams(ResourceId effect_id,
+ std::vector<EffectStreamDesc> *descs) {
+ using effect_stream::Desc;
+ DCHECK_NE(effect_id, kInvalidResource);
+
+ // Get the param count.
+ Uint32 *retval = shm_allocator_->AllocTyped<Uint32>(1);
+ helper_->GetStreamCount(effect_id, sizeof(*retval),
+ shm_id_,
+ shm_allocator_->GetOffset(retval));
+ // Finish has to be called to get the result.
+ helper_->Finish();
+
+ // We could have failed if the effect_id is invalid.
+ if (helper_->GetParseError() != parse_error::kParseNoError) {
+ shm_allocator_->Free(retval);
+ return false;
+ }
+ unsigned int stream_count = *retval;
+ shm_allocator_->Free(retval);
+ unsigned int max_buffer_size = shm_allocator_->GetLargestFreeOrPendingSize();
+ if (max_buffer_size < sizeof(Desc)) { // NOLINT
+ // Not enough memory to get at least 1 stream desc.
+ return false;
+ }
+ descs->resize(stream_count);
+
+ // Read stream descriptions in batches. We use as much shared memory as
+ // possible so that we only call Finish as little as possible.
+ unsigned int max_stream_per_batch =
+ std::min(static_cast<unsigned>(stream_count),
+ static_cast<unsigned>(max_buffer_size / sizeof(Desc))); // NOLINT
+ Desc *raw_descs = shm_allocator_->AllocTyped<Desc>(max_stream_per_batch);
+ DCHECK(raw_descs);
+ for (unsigned int i = 0; i < stream_count; i += max_stream_per_batch) {
+ unsigned int count = std::min(stream_count - i, max_stream_per_batch);
+ for (unsigned int j = 0 ; j < count; ++j) {
+ Desc *raw_desc = raw_descs + j;
+ helper_->GetStreamDesc(effect_id, i + j, sizeof(*raw_desc),
+ shm_id_,
+ shm_allocator_->GetOffset(raw_desc));
+ }
+ // Finish to get the results.
+ helper_->Finish();
+ DCHECK_EQ(helper_->GetParseError(), parse_error::kParseNoError);
+ for (unsigned int j = 0 ; j < count; ++j) {
+ EffectStreamDesc *desc = &((*descs)[i + j]);
+ Desc *raw_desc = raw_descs + j;
+ desc->semantic = static_cast<vertex_struct::Semantic>(raw_desc->semantic);
+ desc->semantic_index = raw_desc->semantic_index;
+ }
+ }
+ shm_allocator_->Free(raw_descs);
+ return true;
+}
+} // namespace command_buffer
diff --git a/o3d/command_buffer/client/effect_helper.h b/o3d/command_buffer/client/effect_helper.h
new file mode 100644
index 0000000..a20375c
--- /dev/null
+++ b/o3d/command_buffer/client/effect_helper.h
@@ -0,0 +1,156 @@
+/*
+ * Copyright 2009, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+// This file defines the EffectHelper class.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_CROSS_EFFECT_HELPER_H_
+#define GPU_COMMAND_BUFFER_CLIENT_CROSS_EFFECT_HELPER_H_
+
+#include <vector>
+#include "command_buffer/common/resource.h"
+#include "command_buffer/client/o3d_cmd_helper.h"
+
+namespace command_buffer {
+
+class FencedAllocatorWrapper;
+class IdAllocator;
+class CommandBufferHelper;
+
+// A helper class to find parameters in an effect.
+class EffectHelper {
+ public:
+ // A more usable version of effect_param::Desc
+ struct EffectParamDesc {
+ ResourceId id; // The resource ID for the param.
+ String name; // The name of the param.
+ String semantic; // The semantic of the param.
+ effect_param::DataType data_type; // The data type of a param.
+ unsigned int data_size; // The size of the data for a param.
+ int num_elements; // The number of array entries if the
+ // parameter is an array, 0 otherwise.
+ unsigned int cmd_desc_size; // The size of the effect_param::Desc
+ // structure (counting strings) for a
+ // param.
+ };
+ struct EffectStreamDesc {
+ vertex_struct::Semantic semantic; // The semantic enum type.
+ unsigned int semantic_index;
+ };
+
+ EffectHelper(O3DCmdHelper *helper,
+ FencedAllocatorWrapper *shm_allocator,
+ unsigned int shm_id,
+ IdAllocator *param_id_allocator)
+ : helper_(helper),
+ shm_allocator_(shm_allocator),
+ shm_id_(shm_id),
+ param_id_allocator_(param_id_allocator) {
+ DCHECK(helper);
+ DCHECK(shm_allocator);
+ DCHECK(param_id_allocator);
+ }
+
+ // Creates all the parameters in an effect and gets their descriptions. The
+ // strings will not be retrieved, so name and semantic will be empty. The
+ // cmd_desc_size field will be set to the proper size to be able to get the
+ // strings with a single command within GetParamStrings, so it should be left
+ // alone.
+ //
+ // The ResourceIDs will be allocated in the param_id_allocator.
+ // Temporary buffers will be allocated in the shm_allocator, but they will be
+ // freed before the function returns (possibly pending a token). At least
+ // sizeof(effect_param::Desc) must be available for this function to succeed.
+ // This function will call Finish(), hence will block.
+ //
+ // Parameters:
+ // effect_id: the ResourceId of the effect.
+ // descs: A pointer to a vector containing the returned descriptions.
+ // The pointed vector will be cleared.
+ // Returns:
+ // true if successful. Reasons for failure are:
+ // - invalid effect_id,
+ // - not enough memory in the shm_allocator_.
+ bool CreateEffectParameters(ResourceId effect_id,
+ std::vector<EffectParamDesc> *descs);
+
+ // Gets the strings for a desc. This will fill in the values for the name and
+ // semantic fields.
+ // Temporary buffers will be allocated in the shm_allocator, but they will be
+ // freed before the function returns (possibly pending a token). At least
+ // desc.cmd_desc_size (as returned by CreateEffectParameters) must be
+ // available for this function to succeed.
+ // This function will call Finish(), hence will block.
+ //
+ // Parameters:
+ // desc: a pointer to the description for a parameter. The id field should
+ // be set to the ResourceId of the parameter.
+ // Returns:
+ // true if successful. Reasons for failure are:
+ // - invalid parameter ResourceId,
+ // - not enough memory in the shm_allocator_.
+ bool GetParamStrings(EffectParamDesc *desc);
+
+ // Destroys all parameter resources referenced by the descriptions. The
+ // ResourceId will be freed from the param_id_allocator.
+ // Parameters:
+ // descs: the vector of descriptions containing the ResourceIDs of the
+ // parameters to destroy.
+ void DestroyEffectParameters(const std::vector<EffectParamDesc> &descs);
+
+ // Gets all the input stream semantics and semantic indices in an
+ // array. These will be retrieved as many as possible at a time. At least
+ // sizeof(effect_param::Desc) must be available for this function to succeed.
+ // This function will call Finish(), hence will block.
+ //
+ // Parameters:
+ // effect_id: the ResourceId of the effect.
+ // descs: A pointer to a vector containing the returned descriptions.
+ // The pointed vector will be cleared.
+ // Returns:
+ // true if successful. Reasons for failure are:
+ // - invalid effect_id,
+ // - not enough memory in the shm_allocator_.
+ bool GetEffectStreams(ResourceId effect_id,
+ std::vector<EffectStreamDesc> *descs);
+
+ private:
+ O3DCmdHelper *helper_;
+ FencedAllocatorWrapper *shm_allocator_;
+ unsigned int shm_id_;
+ IdAllocator *param_id_allocator_;
+
+ DISALLOW_COPY_AND_ASSIGN(EffectHelper);
+};
+
+} // namespace command_buffer
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_CROSS_EFFECT_HELPER_H_
diff --git a/o3d/command_buffer/client/fenced_allocator.cc b/o3d/command_buffer/client/fenced_allocator.cc
new file mode 100644
index 0000000..990d013
--- /dev/null
+++ b/o3d/command_buffer/client/fenced_allocator.cc
@@ -0,0 +1,214 @@
+/*
+ * Copyright 2009, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+// This file contains the implementation of the FencedAllocator class.
+
+#include "command_buffer/client/fenced_allocator.h"
+#include <algorithm>
+#include "command_buffer/client/cmd_buffer_helper.h"
+
+namespace command_buffer {
+
+#ifndef COMPILER_MSVC
+const FencedAllocator::Offset FencedAllocator::kInvalidOffset;
+#endif
+
+FencedAllocator::~FencedAllocator() {
+ // Free blocks pending tokens.
+ for (unsigned int i = 0; i < blocks_.size(); ++i) {
+ if (blocks_[i].state == FREE_PENDING_TOKEN) {
+ i = WaitForTokenAndFreeBlock(i);
+ }
+ }
+ DCHECK_EQ(blocks_.size(), 1u);
+ DCHECK_EQ(blocks_[0].state, FREE);
+}
+
+// Looks for a non-allocated block that is big enough. Search in the FREE
+// blocks first (for direct usage), first-fit, then in the FREE_PENDING_TOKEN
+// blocks, waiting for them. The current implementation isn't smart about
+// optimizing what to wait for, just looks inside the block in order (first-fit
+// as well).
+FencedAllocator::Offset FencedAllocator::Alloc(unsigned int size) {
+ // Similarly to malloc, an allocation of 0 allocates at least 1 byte, to
+ // return different pointers every time.
+ if (size == 0) size = 1;
+
+ // Try first to allocate in a free block.
+ for (unsigned int i = 0; i < blocks_.size(); ++i) {
+ Block &block = blocks_[i];
+ if (block.state == FREE && block.size >= size) {
+ return AllocInBlock(i, size);
+ }
+ }
+
+ // No free block is available. Look for blocks pending tokens, and wait for
+ // them to be re-usable.
+ for (unsigned int i = 0; i < blocks_.size(); ++i) {
+ if (blocks_[i].state != FREE_PENDING_TOKEN)
+ continue;
+ i = WaitForTokenAndFreeBlock(i);
+ if (blocks_[i].size >= size)
+ return AllocInBlock(i, size);
+ }
+ return kInvalidOffset;
+}
+
+// Looks for the corresponding block, mark it FREE, and collapse it if
+// necessary.
+void FencedAllocator::Free(FencedAllocator::Offset offset) {
+ BlockIndex index = GetBlockByOffset(offset);
+ DCHECK_NE(blocks_[index].state, FREE);
+ blocks_[index].state = FREE;
+ CollapseFreeBlock(index);
+}
+
+// Looks for the corresponding block, mark it FREE_PENDING_TOKEN.
+void FencedAllocator::FreePendingToken(FencedAllocator::Offset offset,
+ unsigned int token) {
+ BlockIndex index = GetBlockByOffset(offset);
+ Block &block = blocks_[index];
+ block.state = FREE_PENDING_TOKEN;
+ block.token = token;
+}
+
+// Gets the max of the size of the blocks marked as free.
+unsigned int FencedAllocator::GetLargestFreeSize() {
+ unsigned int max_size = 0;
+ for (unsigned int i = 0; i < blocks_.size(); ++i) {
+ Block &block = blocks_[i];
+ if (block.state == FREE)
+ max_size = std::max(max_size, block.size);
+ }
+ return max_size;
+}
+
+// Gets the size of the largest segment of blocks that are either FREE or
+// FREE_PENDING_TOKEN.
+unsigned int FencedAllocator::GetLargestFreeOrPendingSize() {
+ unsigned int max_size = 0;
+ unsigned int current_size = 0;
+ for (unsigned int i = 0; i < blocks_.size(); ++i) {
+ Block &block = blocks_[i];
+ if (block.state == IN_USE) {
+ max_size = std::max(max_size, current_size);
+ current_size = 0;
+ } else {
+ DCHECK(block.state == FREE || block.state == FREE_PENDING_TOKEN);
+ current_size += block.size;
+ }
+ }
+ return std::max(max_size, current_size);
+}
+
+// Makes sure that:
+// - there is at least one block.
+// - there are no contiguous FREE blocks (they should have been collapsed).
+// - the successive offsets match the block sizes, and they are in order.
+bool FencedAllocator::CheckConsistency() {
+ if (blocks_.size() < 1) return false;
+ for (unsigned int i = 0; i < blocks_.size() - 1; ++i) {
+ Block &current = blocks_[i];
+ Block &next = blocks_[i + 1];
+ // This test is NOT included in the next one, because offset is unsigned.
+ if (next.offset <= current.offset)
+ return false;
+ if (next.offset != current.offset + current.size)
+ return false;
+ if (current.state == FREE && next.state == FREE)
+ return false;
+ }
+ return true;
+}
+
+// Collapse the block to the next one, then to the previous one. Provided the
+// structure is consistent, those are the only blocks eligible for collapse.
+FencedAllocator::BlockIndex FencedAllocator::CollapseFreeBlock(
+ BlockIndex index) {
+ if (index + 1 < blocks_.size()) {
+ Block &next = blocks_[index + 1];
+ if (next.state == FREE) {
+ blocks_[index].size += next.size;
+ blocks_.erase(blocks_.begin() + index + 1);
+ }
+ }
+ if (index > 0) {
+ Block &prev = blocks_[index - 1];
+ if (prev.state == FREE) {
+ prev.size += blocks_[index].size;
+ blocks_.erase(blocks_.begin() + index);
+ --index;
+ }
+ }
+ return index;
+}
+
+// Waits for the block's token, then mark the block as free, then collapse it.
+FencedAllocator::BlockIndex FencedAllocator::WaitForTokenAndFreeBlock(
+ BlockIndex index) {
+ Block &block = blocks_[index];
+ DCHECK_EQ(block.state, FREE_PENDING_TOKEN);
+ helper_->WaitForToken(block.token);
+ block.state = FREE;
+ return CollapseFreeBlock(index);
+}
+
+// If the block is exactly the requested size, simply mark it IN_USE, otherwise
+// split it and mark the first one (of the requested size) IN_USE.
+FencedAllocator::Offset FencedAllocator::AllocInBlock(BlockIndex index,
+ unsigned int size) {
+ Block &block = blocks_[index];
+ DCHECK_GE(block.size, size);
+ DCHECK_EQ(block.state, FREE);
+ Offset offset = block.offset;
+ if (block.size == size) {
+ block.state = IN_USE;
+ return offset;
+ }
+ Block newblock = { FREE, offset + size, block.size - size, kUnusedToken};
+ block.state = IN_USE;
+ block.size = size;
+ // this is the last thing being done because it may invalidate block;
+ blocks_.insert(blocks_.begin() + index + 1, newblock);
+ return offset;
+}
+
+// The blocks are in offset order, so we can do a binary search.
+FencedAllocator::BlockIndex FencedAllocator::GetBlockByOffset(Offset offset) {
+ Block templ = { IN_USE, offset, 0, kUnusedToken };
+ Container::iterator it = std::lower_bound(blocks_.begin(), blocks_.end(),
+ templ, OffsetCmp());
+ DCHECK(it != blocks_.end() && it->offset == offset);
+ return it-blocks_.begin();
+}
+
+} // namespace command_buffer
diff --git a/o3d/command_buffer/client/fenced_allocator.h b/o3d/command_buffer/client/fenced_allocator.h
new file mode 100644
index 0000000..96c3a8a
--- /dev/null
+++ b/o3d/command_buffer/client/fenced_allocator.h
@@ -0,0 +1,266 @@
+/*
+ * Copyright 2009, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+// This file contains the definition of the FencedAllocator class.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_CROSS_FENCED_ALLOCATOR_H_
+#define GPU_COMMAND_BUFFER_CLIENT_CROSS_FENCED_ALLOCATOR_H_
+
+#include <vector>
+#include "base/basictypes.h"
+#include "command_buffer/common/logging.h"
+
+namespace command_buffer {
+class CommandBufferHelper;
+
+// FencedAllocator provides a mechanism to manage allocations within a fixed
+// block of memory (storing the book-keeping externally). Furthermore this
+// class allows to free data "pending" the passage of a command buffer token,
+// that is, the memory won't be reused until the command buffer has processed
+// that token.
+//
+// NOTE: Although this class is intended to be used in the command buffer
+// environment which is multi-process, this class isn't "thread safe", because
+// it isn't meant to be shared across modules. It is thread-compatible though
+// (see http://www.corp.google.com/eng/doc/cpp_primer.html#thread_safety).
+class FencedAllocator {
+ public:
+ typedef unsigned int Offset;
+ // Invalid offset, returned by Alloc in case of failure.
+ static const Offset kInvalidOffset = 0xffffffffU;
+
+ // Creates a FencedAllocator. Note that the size of the buffer is passed, but
+ // not its base address: everything is handled as offsets into the buffer.
+ FencedAllocator(unsigned int size,
+ CommandBufferHelper *helper)
+ : helper_(helper) {
+ Block block = { FREE, 0, size, kUnusedToken };
+ blocks_.push_back(block);
+ }
+
+ ~FencedAllocator();
+
+ // Allocates a block of memory. If the buffer is out of directly available
+ // memory, this function may wait until memory that was freed "pending a
+ // token" can be re-used.
+ //
+ // Parameters:
+ // size: the size of the memory block to allocate.
+ //
+ // Returns:
+ // the offset of the allocated memory block, or kInvalidOffset if out of
+ // memory.
+ Offset Alloc(unsigned int size);
+
+ // Frees a block of memory.
+ //
+ // Parameters:
+ // offset: the offset of the memory block to free.
+ void Free(Offset offset);
+
+ // Frees a block of memory, pending the passage of a token. That memory won't
+ // be re-allocated until the token has passed through the command stream.
+ //
+ // Parameters:
+ // offset: the offset of the memory block to free.
+ // token: the token value to wait for before re-using the memory.
+ void FreePendingToken(Offset offset, unsigned int token);
+
+ // Gets the size of the largest free block that is available without waiting.
+ unsigned int GetLargestFreeSize();
+
+ // Gets the size of the largest free block that can be allocated if the
+ // caller can wait. Allocating a block of this size will succeed, but may
+ // block.
+ unsigned int GetLargestFreeOrPendingSize();
+
+ // Checks for consistency inside the book-keeping structures. Used for
+ // testing.
+ bool CheckConsistency();
+
+ private:
+ // Status of a block of memory, for book-keeping.
+ enum State {
+ IN_USE,
+ FREE,
+ FREE_PENDING_TOKEN
+ };
+
+ // Book-keeping sturcture that describes a block of memory.
+ struct Block {
+ State state;
+ Offset offset;
+ unsigned int size;
+ unsigned int token; // token to wait for in the FREE_PENDING_TOKEN case.
+ };
+
+ // Comparison functor for memory block sorting.
+ class OffsetCmp {
+ public:
+ bool operator() (const Block &left, const Block &right) {
+ return left.offset < right.offset;
+ }
+ };
+
+ typedef std::vector<Block> Container;
+ typedef unsigned int BlockIndex;
+
+ static const unsigned int kUnusedToken = 0;
+
+ // Gets the index of a memory block, given its offset.
+ BlockIndex GetBlockByOffset(Offset offset);
+
+ // Collapse a free block with its neighbours if they are free. Returns the
+ // index of the collapsed block.
+ // NOTE: this will invalidate block indices.
+ BlockIndex CollapseFreeBlock(BlockIndex index);
+
+ // Waits for a FREE_PENDING_TOKEN block to be usable, and free it. Returns
+ // the new index of that block (since it may have been collapsed).
+ // NOTE: this will invalidate block indices.
+ BlockIndex WaitForTokenAndFreeBlock(BlockIndex index);
+
+ // Allocates a block of memory inside a given block, splitting it in two
+ // (unless that block is of the exact requested size).
+ // NOTE: this will invalidate block indices.
+ // Returns the offset of the allocated block (NOTE: this is different from
+ // the other functions that return a block index).
+ Offset AllocInBlock(BlockIndex index, unsigned int size);
+
+ command_buffer::CommandBufferHelper *helper_;
+ Container blocks_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FencedAllocator);
+};
+
+// This class functions just like FencedAllocator, but its API uses pointers
+// instead of offsets.
+class FencedAllocatorWrapper {
+ public:
+ FencedAllocatorWrapper(unsigned int size,
+ CommandBufferHelper *helper,
+ void *base)
+ : allocator_(size, helper),
+ base_(base) { }
+
+ // Allocates a block of memory. If the buffer is out of directly available
+ // memory, this function may wait until memory that was freed "pending a
+ // token" can be re-used.
+ //
+ // Parameters:
+ // size: the size of the memory block to allocate.
+ //
+ // Returns:
+ // the pointer to the allocated memory block, or NULL if out of
+ // memory.
+ void *Alloc(unsigned int size) {
+ FencedAllocator::Offset offset = allocator_.Alloc(size);
+ return GetPointer(offset);
+ }
+
+ // Allocates a block of memory. If the buffer is out of directly available
+ // memory, this function may wait until memory that was freed "pending a
+ // token" can be re-used.
+ // This is a type-safe version of Alloc, returning a typed pointer.
+ //
+ // Parameters:
+ // count: the number of elements to allocate.
+ //
+ // Returns:
+ // the pointer to the allocated memory block, or NULL if out of
+ // memory.
+ template <typename T> T *AllocTyped(unsigned int count) {
+ return static_cast<T *>(Alloc(count * sizeof(T)));
+ }
+
+ // Frees a block of memory.
+ //
+ // Parameters:
+ // pointer: the pointer to the memory block to free.
+ void Free(void *pointer) {
+ DCHECK(pointer);
+ allocator_.Free(GetOffset(pointer));
+ }
+
+ // Frees a block of memory, pending the passage of a token. That memory won't
+ // be re-allocated until the token has passed through the command stream.
+ //
+ // Parameters:
+ // pointer: the pointer to the memory block to free.
+ // token: the token value to wait for before re-using the memory.
+ void FreePendingToken(void *pointer, unsigned int token) {
+ DCHECK(pointer);
+ allocator_.FreePendingToken(GetOffset(pointer), token);
+ }
+
+ // Gets a pointer to a memory block given the base memory and the offset.
+ // It translates FencedAllocator::kInvalidOffset to NULL.
+ void *GetPointer(FencedAllocator::Offset offset) {
+ return (offset == FencedAllocator::kInvalidOffset) ?
+ NULL : static_cast<char *>(base_) + offset;
+ }
+
+ // Gets the offset to a memory block given the base memory and the address.
+ // It translates NULL to FencedAllocator::kInvalidOffset.
+ FencedAllocator::Offset GetOffset(void *pointer) {
+ return pointer ? static_cast<char *>(pointer) - static_cast<char *>(base_) :
+ FencedAllocator::kInvalidOffset;
+ }
+
+ // Gets the size of the largest free block that is available without waiting.
+ unsigned int GetLargestFreeSize() {
+ return allocator_.GetLargestFreeSize();
+ }
+
+ // Gets the size of the largest free block that can be allocated if the
+ // caller can wait.
+ unsigned int GetLargestFreeOrPendingSize() {
+ return allocator_.GetLargestFreeOrPendingSize();
+ }
+
+ // Checks for consistency inside the book-keeping structures. Used for
+ // testing.
+ bool CheckConsistency() {
+ return allocator_.CheckConsistency();
+ }
+
+ FencedAllocator &allocator() { return allocator_; }
+
+ private:
+ FencedAllocator allocator_;
+ void *base_;
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FencedAllocatorWrapper);
+};
+
+} // namespace command_buffer
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_CROSS_FENCED_ALLOCATOR_H_
diff --git a/o3d/command_buffer/client/fenced_allocator_test.cc b/o3d/command_buffer/client/fenced_allocator_test.cc
new file mode 100644
index 0000000..aa59be1
--- /dev/null
+++ b/o3d/command_buffer/client/fenced_allocator_test.cc
@@ -0,0 +1,496 @@
+/*
+ * Copyright 2009, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+// This file contains the tests for the FencedAllocator class.
+
+#include "tests/common/win/testing_common.h"
+#include "base/message_loop.h"
+#include "command_buffer/client/cmd_buffer_helper.h"
+#include "command_buffer/client/fenced_allocator.h"
+#include "command_buffer/service/cmd_buffer_engine.h"
+#include "command_buffer/service/mocks.h"
+#include "gpu_plugin/command_buffer.h"
+#include "gpu_plugin/gpu_processor.h"
+#include "gpu_plugin/np_utils/np_object_pointer.h"
+
+namespace command_buffer {
+
+using gpu_plugin::CommandBuffer;
+using gpu_plugin::GPUProcessor;
+using gpu_plugin::NPCreateObject;
+using gpu_plugin::NPObjectPointer;
+using testing::Return;
+using testing::Mock;
+using testing::Truly;
+using testing::Sequence;
+using testing::DoAll;
+using testing::Invoke;
+using testing::_;
+
+class BaseFencedAllocatorTest : public testing::Test {
+ protected:
+ static const unsigned int kBufferSize = 1024;
+
+ virtual void SetUp() {
+ api_mock_.reset(new AsyncAPIMock);
+ // ignore noops in the mock - we don't want to inspect the internals of the
+ // helper.
+ EXPECT_CALL(*api_mock_, DoCommand(cmd::kNoop, 0, _))
+ .WillRepeatedly(Return(parse_error::kParseNoError));
+ // Forward the SetToken calls to the engine
+ EXPECT_CALL(*api_mock_.get(), DoCommand(cmd::kSetToken, 1, _))
+ .WillRepeatedly(DoAll(Invoke(api_mock_.get(), &AsyncAPIMock::SetToken),
+ Return(parse_error::kParseNoError)));
+
+ ::base::SharedMemory* ring_buffer = new ::base::SharedMemory;
+ ring_buffer->Create(std::wstring(), false, false, 1024);
+ ring_buffer->Map(1024);
+
+ command_buffer_ = NPCreateObject<CommandBuffer>(NULL);
+ command_buffer_->Initialize(ring_buffer);
+
+ parser_ = new command_buffer::CommandParser(ring_buffer->memory(),
+ kBufferSize,
+ 0,
+ kBufferSize,
+ 0,
+ api_mock_.get());
+
+ scoped_refptr<GPUProcessor> gpu_processor(new GPUProcessor(
+ NULL, command_buffer_.Get(), NULL, NULL, parser_, INT_MAX));
+ command_buffer_->SetPutOffsetChangeCallback(NewCallback(
+ gpu_processor.get(), &GPUProcessor::ProcessCommands));
+
+ api_mock_->set_engine(gpu_processor.get());
+
+ helper_.reset(new CommandBufferHelper(NULL, command_buffer_));
+ helper_->Initialize();
+ }
+
+ virtual void TearDown() {
+ helper_.release();
+ }
+
+ scoped_ptr<AsyncAPIMock> api_mock_;
+ NPObjectPointer<CommandBuffer> command_buffer_;
+ command_buffer::CommandParser* parser_;
+ scoped_ptr<CommandBufferHelper> helper_;
+};
+
+#ifndef COMPILER_MSVC
+const unsigned int BaseFencedAllocatorTest::kBufferSize;
+#endif
+
+// Test fixture for FencedAllocator test - Creates a FencedAllocator, using a
+// CommandBufferHelper with a mock AsyncAPIInterface for its interface (calling
+// it directly, not through the RPC mechanism), making sure Noops are ignored
+// and SetToken are properly forwarded to the engine.
+class FencedAllocatorTest : public BaseFencedAllocatorTest {
+ protected:
+ virtual void SetUp() {
+ BaseFencedAllocatorTest::SetUp();
+ allocator_.reset(new FencedAllocator(kBufferSize, helper_.get()));
+ }
+
+ virtual void TearDown() {
+ // If the GPUProcessor posts any tasks, this forces them to run.
+ MessageLoop::current()->RunAllPending();
+
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ allocator_.release();
+
+ BaseFencedAllocatorTest::TearDown();
+ }
+
+ scoped_ptr<FencedAllocator> allocator_;
+};
+
+// Checks basic alloc and free.
+TEST_F(FencedAllocatorTest, TestBasic) {
+ allocator_->CheckConsistency();
+
+ const unsigned int kSize = 16;
+ FencedAllocator::Offset offset = allocator_->Alloc(kSize);
+ EXPECT_NE(FencedAllocator::kInvalidOffset, offset);
+ EXPECT_GE(kBufferSize, offset+kSize);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ allocator_->Free(offset);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+}
+
+// Checks out-of-memory condition.
+TEST_F(FencedAllocatorTest, TestOutOfMemory) {
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ const unsigned int kSize = 16;
+ const unsigned int kAllocCount = kBufferSize / kSize;
+ CHECK(kAllocCount * kSize == kBufferSize);
+
+ // Allocate several buffers to fill in the memory.
+ FencedAllocator::Offset offsets[kAllocCount];
+ for (unsigned int i = 0; i < kAllocCount; ++i) {
+ offsets[i] = allocator_->Alloc(kSize);
+ EXPECT_NE(FencedAllocator::kInvalidOffset, offsets[i]);
+ EXPECT_GE(kBufferSize, offsets[i]+kSize);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ }
+
+ // This allocation should fail.
+ FencedAllocator::Offset offset_failed = allocator_->Alloc(kSize);
+ EXPECT_EQ(FencedAllocator::kInvalidOffset, offset_failed);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ // Free one successful allocation, reallocate with half the size
+ allocator_->Free(offsets[0]);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ offsets[0] = allocator_->Alloc(kSize/2);
+ EXPECT_NE(FencedAllocator::kInvalidOffset, offsets[0]);
+ EXPECT_GE(kBufferSize, offsets[0]+kSize);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ // This allocation should fail as well.
+ offset_failed = allocator_->Alloc(kSize);
+ EXPECT_EQ(FencedAllocator::kInvalidOffset, offset_failed);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ // Free up everything.
+ for (unsigned int i = 0; i < kAllocCount; ++i) {
+ allocator_->Free(offsets[i]);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ }
+}
+
+// Checks the free-pending-token mechanism.
+TEST_F(FencedAllocatorTest, TestFreePendingToken) {
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ const unsigned int kSize = 16;
+ const unsigned int kAllocCount = kBufferSize / kSize;
+ CHECK(kAllocCount * kSize == kBufferSize);
+
+ // Allocate several buffers to fill in the memory.
+ FencedAllocator::Offset offsets[kAllocCount];
+ for (unsigned int i = 0; i < kAllocCount; ++i) {
+ offsets[i] = allocator_->Alloc(kSize);
+ EXPECT_NE(FencedAllocator::kInvalidOffset, offsets[i]);
+ EXPECT_GE(kBufferSize, offsets[i]+kSize);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ }
+
+ // This allocation should fail.
+ FencedAllocator::Offset offset_failed = allocator_->Alloc(kSize);
+ EXPECT_EQ(FencedAllocator::kInvalidOffset, offset_failed);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ // Free one successful allocation, pending fence.
+ int32 token = helper_.get()->InsertToken();
+ allocator_->FreePendingToken(offsets[0], token);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ // The way we hooked up the helper and engine, it won't process commands
+ // until it has to wait for something. Which means the token shouldn't have
+ // passed yet at this point.
+ EXPECT_GT(token, command_buffer_->GetToken());
+
+ // This allocation will need to reclaim the space freed above, so that should
+ // process the commands until the token is passed.
+ offsets[0] = allocator_->Alloc(kSize);
+ EXPECT_NE(FencedAllocator::kInvalidOffset, offsets[0]);
+ EXPECT_GE(kBufferSize, offsets[0]+kSize);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ // Check that the token has indeed passed.
+ EXPECT_LE(token, command_buffer_->GetToken());
+
+ // Free up everything.
+ for (unsigned int i = 0; i < kAllocCount; ++i) {
+ allocator_->Free(offsets[i]);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ }
+}
+
+// Tests GetLargestFreeSize
+TEST_F(FencedAllocatorTest, TestGetLargestFreeSize) {
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeSize());
+
+ FencedAllocator::Offset offset = allocator_->Alloc(kBufferSize);
+ ASSERT_NE(FencedAllocator::kInvalidOffset, offset);
+ EXPECT_EQ(0u, allocator_->GetLargestFreeSize());
+ allocator_->Free(offset);
+ EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeSize());
+
+ const unsigned int kSize = 16;
+ offset = allocator_->Alloc(kSize);
+ ASSERT_NE(FencedAllocator::kInvalidOffset, offset);
+ // The following checks that the buffer is allocated "smartly" - which is
+ // dependent on the implementation. But both first-fit or best-fit would
+ // ensure that.
+ EXPECT_EQ(kBufferSize - kSize, allocator_->GetLargestFreeSize());
+
+ // Allocate 2 more buffers (now 3), and then free the first two. This is to
+ // ensure a hole. Note that this is dependent on the first-fit current
+ // implementation.
+ FencedAllocator::Offset offset1 = allocator_->Alloc(kSize);
+ ASSERT_NE(FencedAllocator::kInvalidOffset, offset1);
+ FencedAllocator::Offset offset2 = allocator_->Alloc(kSize);
+ ASSERT_NE(FencedAllocator::kInvalidOffset, offset2);
+ allocator_->Free(offset);
+ allocator_->Free(offset1);
+ EXPECT_EQ(kBufferSize - 3 * kSize, allocator_->GetLargestFreeSize());
+
+ offset = allocator_->Alloc(kBufferSize - 3 * kSize);
+ ASSERT_NE(FencedAllocator::kInvalidOffset, offset);
+ EXPECT_EQ(2 * kSize, allocator_->GetLargestFreeSize());
+
+ offset1 = allocator_->Alloc(2 * kSize);
+ ASSERT_NE(FencedAllocator::kInvalidOffset, offset1);
+ EXPECT_EQ(0u, allocator_->GetLargestFreeSize());
+
+ allocator_->Free(offset);
+ allocator_->Free(offset1);
+ allocator_->Free(offset2);
+}
+
+// Tests GetLargestFreeOrPendingSize
+TEST_F(FencedAllocatorTest, TestGetLargestFreeOrPendingSize) {
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeOrPendingSize());
+
+ FencedAllocator::Offset offset = allocator_->Alloc(kBufferSize);
+ ASSERT_NE(FencedAllocator::kInvalidOffset, offset);
+ EXPECT_EQ(0u, allocator_->GetLargestFreeOrPendingSize());
+ allocator_->Free(offset);
+ EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeOrPendingSize());
+
+ const unsigned int kSize = 16;
+ offset = allocator_->Alloc(kSize);
+ ASSERT_NE(FencedAllocator::kInvalidOffset, offset);
+ // The following checks that the buffer is allocates "smartly" - which is
+ // dependent on the implementation. But both first-fit or best-fit would
+ // ensure that.
+ EXPECT_EQ(kBufferSize - kSize, allocator_->GetLargestFreeOrPendingSize());
+
+ // Allocate 2 more buffers (now 3), and then free the first two. This is to
+ // ensure a hole. Note that this is dependent on the first-fit current
+ // implementation.
+ FencedAllocator::Offset offset1 = allocator_->Alloc(kSize);
+ ASSERT_NE(FencedAllocator::kInvalidOffset, offset1);
+ FencedAllocator::Offset offset2 = allocator_->Alloc(kSize);
+ ASSERT_NE(FencedAllocator::kInvalidOffset, offset2);
+ allocator_->Free(offset);
+ allocator_->Free(offset1);
+ EXPECT_EQ(kBufferSize - 3 * kSize,
+ allocator_->GetLargestFreeOrPendingSize());
+
+ // Free the last one, pending a token.
+ int32 token = helper_.get()->InsertToken();
+ allocator_->FreePendingToken(offset2, token);
+
+ // Now all the buffers have been freed...
+ EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeOrPendingSize());
+ // .. but one is still waiting for the token.
+ EXPECT_EQ(kBufferSize - 3 * kSize,
+ allocator_->GetLargestFreeSize());
+
+ // The way we hooked up the helper and engine, it won't process commands
+ // until it has to wait for something. Which means the token shouldn't have
+ // passed yet at this point.
+ EXPECT_GT(token, command_buffer_->GetToken());
+ // This allocation will need to reclaim the space freed above, so that should
+ // process the commands until the token is passed, but it will succeed.
+ offset = allocator_->Alloc(kBufferSize);
+ ASSERT_NE(FencedAllocator::kInvalidOffset, offset);
+ // Check that the token has indeed passed.
+ EXPECT_LE(token, command_buffer_->GetToken());
+ allocator_->Free(offset);
+
+ // Everything now has been freed...
+ EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeOrPendingSize());
+ // ... for real.
+ EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeSize());
+}
+
+// Test fixture for FencedAllocatorWrapper test - Creates a
+// FencedAllocatorWrapper, using a CommandBufferHelper with a mock
+// AsyncAPIInterface for its interface (calling it directly, not through the
+// RPC mechanism), making sure Noops are ignored and SetToken are properly
+// forwarded to the engine.
+class FencedAllocatorWrapperTest : public BaseFencedAllocatorTest {
+ protected:
+ virtual void SetUp() {
+ BaseFencedAllocatorTest::SetUp();
+
+ // Though allocating this buffer isn't strictly necessary, it makes
+ // allocations point to valid addresses, so they could be used for
+ // something.
+ buffer_.reset(new char[kBufferSize]);
+ allocator_.reset(new FencedAllocatorWrapper(kBufferSize, helper_.get(),
+ buffer_.get()));
+ }
+
+ virtual void TearDown() {
+ // If the GPUProcessor posts any tasks, this forces them to run.
+ MessageLoop::current()->RunAllPending();
+
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ allocator_.release();
+ buffer_.release();
+
+ BaseFencedAllocatorTest::TearDown();
+ }
+
+ scoped_ptr<FencedAllocatorWrapper> allocator_;
+ scoped_array<char> buffer_;
+};
+
+// Checks basic alloc and free.
+TEST_F(FencedAllocatorWrapperTest, TestBasic) {
+ allocator_->CheckConsistency();
+
+ const unsigned int kSize = 16;
+ void *pointer = allocator_->Alloc(kSize);
+ ASSERT_TRUE(pointer);
+ EXPECT_LE(buffer_.get(), static_cast<char *>(pointer));
+ EXPECT_GE(kBufferSize, static_cast<char *>(pointer) - buffer_.get() + kSize);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ allocator_->Free(pointer);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ char *pointer_char = allocator_->AllocTyped<char>(kSize);
+ ASSERT_TRUE(pointer_char);
+ EXPECT_LE(buffer_.get(), pointer_char);
+ EXPECT_GE(buffer_.get() + kBufferSize, pointer_char + kSize);
+ allocator_->Free(pointer_char);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ unsigned int *pointer_uint = allocator_->AllocTyped<unsigned int>(kSize);
+ ASSERT_TRUE(pointer_uint);
+ EXPECT_LE(buffer_.get(), reinterpret_cast<char *>(pointer_uint));
+ EXPECT_GE(buffer_.get() + kBufferSize,
+ reinterpret_cast<char *>(pointer_uint + kSize));
+
+ // Check that it did allocate kSize * sizeof(unsigned int). We can't tell
+ // directly, except from the remaining size.
+ EXPECT_EQ(kBufferSize - kSize * sizeof(*pointer_uint),
+ allocator_->GetLargestFreeSize());
+ allocator_->Free(pointer_uint);
+}
+
+// Checks out-of-memory condition.
+TEST_F(FencedAllocatorWrapperTest, TestOutOfMemory) {
+ allocator_->CheckConsistency();
+
+ const unsigned int kSize = 16;
+ const unsigned int kAllocCount = kBufferSize / kSize;
+ CHECK(kAllocCount * kSize == kBufferSize);
+
+ // Allocate several buffers to fill in the memory.
+ void *pointers[kAllocCount];
+ for (unsigned int i = 0; i < kAllocCount; ++i) {
+ pointers[i] = allocator_->Alloc(kSize);
+ EXPECT_TRUE(pointers[i]);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ }
+
+ // This allocation should fail.
+ void *pointer_failed = allocator_->Alloc(kSize);
+ EXPECT_FALSE(pointer_failed);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ // Free one successful allocation, reallocate with half the size
+ allocator_->Free(pointers[0]);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ pointers[0] = allocator_->Alloc(kSize/2);
+ EXPECT_TRUE(pointers[0]);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ // This allocation should fail as well.
+ pointer_failed = allocator_->Alloc(kSize);
+ EXPECT_FALSE(pointer_failed);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ // Free up everything.
+ for (unsigned int i = 0; i < kAllocCount; ++i) {
+ allocator_->Free(pointers[i]);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ }
+}
+
+// Checks the free-pending-token mechanism.
+TEST_F(FencedAllocatorWrapperTest, TestFreePendingToken) {
+ allocator_->CheckConsistency();
+
+ const unsigned int kSize = 16;
+ const unsigned int kAllocCount = kBufferSize / kSize;
+ CHECK(kAllocCount * kSize == kBufferSize);
+
+ // Allocate several buffers to fill in the memory.
+ void *pointers[kAllocCount];
+ for (unsigned int i = 0; i < kAllocCount; ++i) {
+ pointers[i] = allocator_->Alloc(kSize);
+ EXPECT_TRUE(pointers[i]);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ }
+
+ // This allocation should fail.
+ void *pointer_failed = allocator_->Alloc(kSize);
+ EXPECT_FALSE(pointer_failed);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ // Free one successful allocation, pending fence.
+ int32 token = helper_.get()->InsertToken();
+ allocator_->FreePendingToken(pointers[0], token);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+
+ // The way we hooked up the helper and engine, it won't process commands
+ // until it has to wait for something. Which means the token shouldn't have
+ // passed yet at this point.
+ EXPECT_GT(token, command_buffer_->GetToken());
+
+ // This allocation will need to reclaim the space freed above, so that should
+ // process the commands until the token is passed.
+ pointers[0] = allocator_->Alloc(kSize);
+ EXPECT_TRUE(pointers[0]);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ // Check that the token has indeed passed.
+ EXPECT_LE(token, command_buffer_->GetToken());
+
+ // Free up everything.
+ for (unsigned int i = 0; i < kAllocCount; ++i) {
+ allocator_->Free(pointers[i]);
+ EXPECT_TRUE(allocator_->CheckConsistency());
+ }
+}
+
+} // namespace command_buffer
diff --git a/o3d/command_buffer/client/id_allocator.cc b/o3d/command_buffer/client/id_allocator.cc
new file mode 100644
index 0000000..fc3250d
--- /dev/null
+++ b/o3d/command_buffer/client/id_allocator.cc
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2009, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+// This file contains the implementation of IdAllocator.
+
+#include "command_buffer/client/id_allocator.h"
+
+namespace command_buffer {
+
+IdAllocator::IdAllocator() : bitmap_(1) { bitmap_[0] = 0; }
+
+static const unsigned int kBitsPerUint32 = sizeof(Uint32) * 8; // NOLINT
+
+// Looks for the first non-full entry, and return the first free bit in that
+// entry. If all the entries are full, it will return the first bit of an entry
+// that would be appended, but doesn't actually append that entry to the vector.
+unsigned int IdAllocator::FindFirstFree() const {
+ size_t size = bitmap_.size();
+ for (unsigned int i = 0; i < size; ++i) {
+ Uint32 value = bitmap_[i];
+ if (value != 0xffffffffU) {
+ for (unsigned int j = 0; j < kBitsPerUint32; ++j) {
+ if (!(value & (1 << j))) return i * kBitsPerUint32 + j;
+ }
+ DLOG(FATAL) << "Code should not reach here.";
+ }
+ }
+ return size*kBitsPerUint32;
+}
+
+// Sets the correct bit in the proper entry, resizing the vector if needed.
+void IdAllocator::SetBit(unsigned int bit, bool value) {
+ size_t size = bitmap_.size();
+ if (bit >= size * kBitsPerUint32) {
+ size_t newsize = bit / kBitsPerUint32 + 1;
+ bitmap_.resize(newsize);
+ for (size_t i = size; i < newsize; ++i) bitmap_[i] = 0;
+ }
+ Uint32 mask = 1U << (bit % kBitsPerUint32);
+ if (value) {
+ bitmap_[bit / kBitsPerUint32] |= mask;
+ } else {
+ bitmap_[bit / kBitsPerUint32] &= ~mask;
+ }
+}
+
+// Gets the bit from the proper entry. This doesn't resize the vector, just
+// returns false if the bit is beyond the last entry.
+bool IdAllocator::GetBit(unsigned int bit) const {
+ size_t size = bitmap_.size();
+ if (bit / kBitsPerUint32 >= size) return false;
+ Uint32 mask = 1U << (bit % kBitsPerUint32);
+ return (bitmap_[bit / kBitsPerUint32] & mask) != 0;
+}
+
+} // namespace command_buffer
diff --git a/o3d/command_buffer/client/id_allocator.h b/o3d/command_buffer/client/id_allocator.h
new file mode 100644
index 0000000..819d13e
--- /dev/null
+++ b/o3d/command_buffer/client/id_allocator.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2009, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+// This file contains the definition of the IdAllocator class.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_CROSS_ID_ALLOCATOR_H_
+#define GPU_COMMAND_BUFFER_CLIENT_CROSS_ID_ALLOCATOR_H_
+
+#include <vector>
+#include "base/basictypes.h"
+#include "command_buffer/common/types.h"
+#include "command_buffer/common/resource.h"
+
+namespace command_buffer {
+
+// A class to manage the allocation of resource IDs. It uses a bitfield stored
+// into a vector of unsigned ints.
+class IdAllocator {
+ public:
+ IdAllocator();
+
+ // Allocates a new resource ID.
+ command_buffer::ResourceId AllocateID() {
+ unsigned int bit = FindFirstFree();
+ SetBit(bit, true);
+ return bit;
+ }
+
+ // Frees a resource ID.
+ void FreeID(command_buffer::ResourceId id) {
+ SetBit(id, false);
+ }
+
+ // Checks whether or not a resource ID is in use.
+ bool InUse(command_buffer::ResourceId id) {
+ return GetBit(id);
+ }
+ private:
+ void SetBit(unsigned int bit, bool value);
+ bool GetBit(unsigned int bit) const;
+ unsigned int FindFirstFree() const;
+
+ std::vector<Uint32> bitmap_;
+ DISALLOW_COPY_AND_ASSIGN(IdAllocator);
+};
+
+} // namespace command_buffer
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_CROSS_ID_ALLOCATOR_H_
diff --git a/o3d/command_buffer/client/id_allocator_test.cc b/o3d/command_buffer/client/id_allocator_test.cc
new file mode 100644
index 0000000..b344ab5
--- /dev/null
+++ b/o3d/command_buffer/client/id_allocator_test.cc
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2009, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+// This file has the unit tests for the IdAllocator class.
+
+#include "tests/common/win/testing_common.h"
+#include "command_buffer/client/id_allocator.h"
+
+namespace command_buffer {
+
+using command_buffer::ResourceId;
+
+class IdAllocatorTest : public testing::Test {
+ protected:
+ virtual void SetUp() {}
+ virtual void TearDown() {}
+
+ IdAllocator* id_allocator() { return &id_allocator_; }
+
+ private:
+ IdAllocator id_allocator_;
+};
+
+// Checks basic functionality: AllocateID, FreeID, InUse.
+TEST_F(IdAllocatorTest, TestBasic) {
+ IdAllocator *allocator = id_allocator();
+ // Check that resource 0 is not in use
+ EXPECT_FALSE(allocator->InUse(0));
+
+ // Allocate an ID, check that it's in use.
+ ResourceId id1 = allocator->AllocateID();
+ EXPECT_TRUE(allocator->InUse(id1));
+
+ // Allocate another ID, check that it's in use, and different from the first
+ // one.
+ ResourceId id2 = allocator->AllocateID();
+ EXPECT_TRUE(allocator->InUse(id2));
+ EXPECT_NE(id1, id2);
+
+ // Free one of the IDs, check that it's not in use any more.
+ allocator->FreeID(id1);
+ EXPECT_FALSE(allocator->InUse(id1));
+
+ // Frees the other ID, check that it's not in use any more.
+ allocator->FreeID(id2);
+ EXPECT_FALSE(allocator->InUse(id2));
+}
+
+// Checks that the resource IDs are allocated conservatively, and re-used after
+// being freed.
+TEST_F(IdAllocatorTest, TestAdvanced) {
+ IdAllocator *allocator = id_allocator();
+
+ // Allocate a significant number of resources.
+ const unsigned int kNumResources = 100;
+ ResourceId ids[kNumResources];
+ for (unsigned int i = 0; i < kNumResources; ++i) {
+ ids[i] = allocator->AllocateID();
+ EXPECT_TRUE(allocator->InUse(ids[i]));
+ }
+
+ // Check that the allocation is conservative with resource IDs, that is that
+ // the resource IDs don't go over kNumResources - so that the service doesn't
+ // have to allocate too many internal structures when the resources are used.
+ for (unsigned int i = 0; i < kNumResources; ++i) {
+ EXPECT_GT(kNumResources, ids[i]);
+ }
+
+ // Check that the next resources are still free.
+ for (unsigned int i = 0; i < kNumResources; ++i) {
+ EXPECT_FALSE(allocator->InUse(kNumResources + i));
+ }
+
+ // Check that a new allocation re-uses the resource we just freed.
+ ResourceId id1 = ids[kNumResources / 2];
+ allocator->FreeID(id1);
+ EXPECT_FALSE(allocator->InUse(id1));
+ ResourceId id2 = allocator->AllocateID();
+ EXPECT_TRUE(allocator->InUse(id2));
+ EXPECT_EQ(id1, id2);
+}
+
+} // namespace command_buffer
diff --git a/o3d/command_buffer/client/o3d_cmd_helper.cc b/o3d/command_buffer/client/o3d_cmd_helper.cc
new file mode 100644
index 0000000..cb9ffe1
--- /dev/null
+++ b/o3d/command_buffer/client/o3d_cmd_helper.cc
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2009, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+// This file contains the implementation of the o3d buffer helper class.
+
+#include "command_buffer/client/o3d_cmd_helper.h"
+
+namespace command_buffer {
+
+// Currently this is a place holder.
+
+} // namespace command_buffer
+
diff --git a/o3d/command_buffer/client/o3d_cmd_helper.h b/o3d/command_buffer/client/o3d_cmd_helper.h
new file mode 100644
index 0000000..4f4aed2
--- /dev/null
+++ b/o3d/command_buffer/client/o3d_cmd_helper.h
@@ -0,0 +1,636 @@
+/*
+ * Copyright 2009, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+// This file contains the o3d command buffer helper class.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_CROSS_O3D_CMD_HELPER_H_
+#define GPU_COMMAND_BUFFER_CLIENT_CROSS_O3D_CMD_HELPER_H_
+
+#include "command_buffer/common/logging.h"
+#include "command_buffer/common/constants.h"
+#include "command_buffer/client/cmd_buffer_helper.h"
+#include "command_buffer/common/o3d_cmd_format.h"
+#include "o3d/gpu_plugin/np_utils/np_object_pointer.h"
+
+namespace command_buffer {
+
+// A helper for O3D command buffers.
+class O3DCmdHelper : public CommandBufferHelper {
+ public:
+ O3DCmdHelper(
+ NPP npp,
+ const gpu_plugin::NPObjectPointer<gpu_plugin::CommandBuffer>&
+ command_buffer)
+ : CommandBufferHelper(npp, command_buffer) {
+ }
+ virtual ~O3DCmdHelper() {
+ }
+
+ // ------------------ Individual commands ----------------------
+
+ void BeginFrame() {
+ o3d::BeginFrame& cmd = GetCmdSpace<o3d::BeginFrame>();
+ cmd.Init();
+ }
+
+
+ void EndFrame() {
+ o3d::EndFrame& cmd = GetCmdSpace<o3d::EndFrame>();
+ cmd.Init();
+ }
+
+ void Clear(
+ uint32 buffers,
+ float red, float green, float blue, float alpha,
+ float depth, uint32 stencil) {
+ o3d::Clear& cmd = GetCmdSpace<o3d::Clear>();
+ cmd.Init(buffers, red, green, blue, alpha, depth, stencil);
+ }
+
+ void SetViewport(
+ uint32 left,
+ uint32 top,
+ uint32 width,
+ uint32 height,
+ float z_min,
+ float z_max) {
+ o3d::SetViewport& cmd = GetCmdSpace<o3d::SetViewport>();
+ cmd.Init(left, top, width, height, z_min, z_max);
+ }
+
+ void CreateVertexBuffer(
+ ResourceId vertex_buffer_id, uint32 size, vertex_buffer::Flags flags) {
+ o3d::CreateVertexBuffer& cmd = GetCmdSpace<o3d::CreateVertexBuffer>();
+ cmd.Init(vertex_buffer_id, size, flags);
+ }
+
+ void DestroyVertexBuffer(ResourceId vertex_buffer_id) {
+ o3d::DestroyVertexBuffer& cmd = GetCmdSpace<o3d::DestroyVertexBuffer>();
+ cmd.Init(vertex_buffer_id);
+ }
+
+ void SetVertexBufferDataImmediate(
+ ResourceId vertex_buffer_id, uint32 offset,
+ const void* data, uint32 size) {
+ o3d::SetVertexBufferDataImmediate& cmd =
+ GetImmediateCmdSpace<o3d::SetVertexBufferDataImmediate>(size);
+ cmd.Init(vertex_buffer_id, offset, data, size);
+ }
+
+ void SetVertexBufferData(
+ ResourceId vertex_buffer_id, uint32 offset, uint32 size,
+ uint32 shared_memory_id, uint32 shared_memory_offset) {
+ o3d::SetVertexBufferData& cmd =
+ GetCmdSpace<o3d::SetVertexBufferData>();
+ cmd.Init(vertex_buffer_id, offset, size,
+ shared_memory_id, shared_memory_offset);
+ }
+
+ void GetVertexBufferData(
+ ResourceId vertex_buffer_id, uint32 offset, uint32 size,
+ uint32 shared_memory_id, uint32 shared_memory_offset) {
+ o3d::GetVertexBufferData& cmd =
+ GetCmdSpace<o3d::GetVertexBufferData>();
+ cmd.Init(vertex_buffer_id, offset, size,
+ shared_memory_id, shared_memory_offset);
+ }
+
+ void CreateIndexBuffer(
+ ResourceId index_buffer_id, uint32 size, index_buffer::Flags flags) {
+ o3d::CreateIndexBuffer& cmd =
+ GetCmdSpace<o3d::CreateIndexBuffer>();
+ cmd.Init(index_buffer_id, size, flags);
+ }
+
+ void DestroyIndexBuffer(ResourceId index_buffer_id) {
+ o3d::DestroyIndexBuffer& cmd = GetCmdSpace<o3d::DestroyIndexBuffer>();
+ cmd.Init(index_buffer_id);
+ }
+
+ void SetIndexBufferDataImmediate(
+ ResourceId index_buffer_id, uint32 offset,
+ const void* data, uint32 size) {
+ o3d::SetIndexBufferDataImmediate& cmd =
+ GetImmediateCmdSpace<o3d::SetIndexBufferDataImmediate>(size);
+ cmd.Init(index_buffer_id, offset, data, size);
+ }
+
+ void SetIndexBufferData(
+ ResourceId index_buffer_id, uint32 offset, uint32 size,
+ uint32 shared_memory_id, uint32 shared_memory_offset) {
+ o3d::SetIndexBufferData& cmd = GetCmdSpace<o3d::SetIndexBufferData>();
+ cmd.Init(index_buffer_id, offset, size,
+ shared_memory_id, shared_memory_offset);
+ }
+
+ void GetIndexBufferData(
+ ResourceId index_buffer_id, uint32 offset, uint32 size,
+ uint32 shared_memory_id, uint32 shared_memory_offset) {
+ o3d::GetIndexBufferData& cmd = GetCmdSpace<o3d::GetIndexBufferData>();
+ cmd.Init(index_buffer_id, offset, size,
+ shared_memory_id, shared_memory_offset);
+ }
+
+ void CreateVertexStruct(ResourceId vertex_struct_id, uint32 input_count) {
+ o3d::CreateVertexStruct& cmd = GetCmdSpace<o3d::CreateVertexStruct>();
+ cmd.Init(vertex_struct_id, input_count);
+ }
+
+ void DestroyVertexStruct(ResourceId vertex_struct_id) {
+ o3d::DestroyVertexStruct& cmd = GetCmdSpace<o3d::DestroyVertexStruct>();
+ cmd.Init(vertex_struct_id);
+ }
+
+ void SetVertexInput(
+ ResourceId vertex_struct_id,
+ uint32 input_index,
+ ResourceId vertex_buffer_id,
+ uint32 offset,
+ vertex_struct::Semantic semantic,
+ uint32 semantic_index,
+ vertex_struct::Type type,
+ uint32 stride) {
+ o3d::SetVertexInput& cmd = GetCmdSpace<o3d::SetVertexInput>();
+ cmd.Init(
+ vertex_struct_id,
+ input_index,
+ vertex_buffer_id,
+ offset,
+ semantic,
+ semantic_index,
+ type,
+ stride);
+ }
+
+ void SetVertexStruct(ResourceId vertex_struct_id) {
+ o3d::SetVertexStruct& cmd = GetCmdSpace<o3d::SetVertexStruct>();
+ cmd.Init(vertex_struct_id);
+ }
+
+ void Draw(o3d::PrimitiveType primitive_type, uint32 first, uint32 count) {
+ o3d::Draw& cmd = GetCmdSpace<o3d::Draw>();
+ cmd.Init(primitive_type, first, count);
+ }
+
+ void DrawIndexed(
+ o3d::PrimitiveType primitive_type,
+ ResourceId index_buffer_id,
+ uint32 first,
+ uint32 count,
+ uint32 min_index,
+ uint32 max_index) {
+ o3d::DrawIndexed& cmd = GetCmdSpace<o3d::DrawIndexed>();
+ cmd.Init(
+ primitive_type,
+ index_buffer_id,
+ first,
+ count,
+ min_index,
+ max_index);
+ }
+
+ void CreateEffect(
+ ResourceId effect_id, uint32 size,
+ uint32 shared_memory_id, uint32 shared_memory_offset) {
+ o3d::CreateEffect& cmd = GetCmdSpace<o3d::CreateEffect>();
+ cmd.Init(effect_id, size, shared_memory_id, shared_memory_offset);
+ }
+
+ void CreateEffectImmediate(
+ ResourceId effect_id, uint32 size, const void* data) {
+ o3d::CreateEffectImmediate& cmd =
+ GetImmediateCmdSpace<o3d::CreateEffectImmediate>(size);
+ cmd.Init(effect_id, size, data);
+ }
+
+ void DestroyEffect(ResourceId effect_id) {
+ o3d::DestroyEffect& cmd = GetCmdSpace<o3d::DestroyEffect>();
+ cmd.Init(effect_id);
+ }
+
+ void SetEffect(ResourceId effect_id) {
+ o3d::SetEffect& cmd = GetCmdSpace<o3d::SetEffect>();
+ cmd.Init(effect_id);
+ }
+
+ void GetParamCount(
+ ResourceId effect_id, uint32 size,
+ uint32 shared_memory_id, uint32 shared_memory_offset) {
+ o3d::GetParamCount& cmd = GetCmdSpace<o3d::GetParamCount>();
+ cmd.Init(effect_id, size, shared_memory_id, shared_memory_offset);
+ }
+
+ void CreateParam(ResourceId param_id, ResourceId effect_id, uint32 index) {
+ o3d::CreateParam& cmd = GetCmdSpace<o3d::CreateParam>();
+ cmd.Init(param_id, effect_id, index);
+ }
+
+ void CreateParamByName(
+ ResourceId param_id, ResourceId effect_id, uint32 size,
+ uint32 shared_memory_id, uint32 shared_memory_offset) {
+ o3d::CreateParamByName& cmd = GetCmdSpace<o3d::CreateParamByName>();
+ cmd.Init(param_id, effect_id, size, shared_memory_id, shared_memory_offset);
+ }
+
+ void CreateParamByNameImmediate(
+ ResourceId param_id, ResourceId effect_id,
+ uint32 size, const void* data) {
+ o3d::CreateParamByNameImmediate& cmd =
+ GetImmediateCmdSpace<o3d::CreateParamByNameImmediate>(size);
+ cmd.Init(param_id, effect_id, size, data);
+ }
+
+ void DestroyParam(ResourceId param_id) {
+ o3d::DestroyParam& cmd = GetCmdSpace<o3d::DestroyParam>();
+ cmd.Init(param_id);
+ }
+
+ void SetParamData(
+ ResourceId param_id, uint32 size,
+ uint32 shared_memory_id, uint32 shared_memory_offset) {
+ o3d::SetParamData& cmd = GetCmdSpace<o3d::SetParamData>();
+ cmd.Init(param_id, size, shared_memory_id, shared_memory_offset);
+ }
+
+ void SetParamDataImmediate(
+ ResourceId param_id, uint32 size, const void* data) {
+ o3d::SetParamDataImmediate& cmd =
+ GetImmediateCmdSpace<o3d::SetParamDataImmediate>(size);
+ cmd.Init(param_id, size, data);
+ }
+
+ void GetParamDesc(
+ ResourceId param_id, uint32 size,
+ uint32 shared_memory_id, uint32 shared_memory_offset) {
+ o3d::GetParamDesc& cmd = GetCmdSpace<o3d::GetParamDesc>();
+ cmd.Init(param_id, size, shared_memory_id, shared_memory_offset);
+ }
+
+ void GetStreamCount(
+ ResourceId effect_id, uint32 size,
+ uint32 shared_memory_id, uint32 shared_memory_offset) {
+ o3d::GetStreamCount& cmd = GetCmdSpace<o3d::GetStreamCount>();
+ cmd.Init(effect_id, size, shared_memory_id, shared_memory_offset);
+ }
+
+ void GetStreamDesc(
+ ResourceId effect_id, uint32 index, uint32 size,
+ uint32 shared_memory_id, uint32 shared_memory_offset) {
+ o3d::GetStreamDesc& cmd = GetCmdSpace<o3d::GetStreamDesc>();
+ cmd.Init(effect_id, index, size, shared_memory_id, shared_memory_offset);
+ }
+
+ void DestroyTexture(ResourceId texture_id) {
+ o3d::DestroyTexture& cmd = GetCmdSpace<o3d::DestroyTexture>();
+ cmd.Init(texture_id);
+ }
+
+ void CreateTexture2d(
+ ResourceId texture_id,
+ uint32 width, uint32 height,
+ uint32 levels, texture::Format format,
+ bool enable_render_surfaces) {
+ o3d::CreateTexture2d& cmd = GetCmdSpace<o3d::CreateTexture2d>();
+ cmd.Init(texture_id,
+ width, height, levels, format,
+ enable_render_surfaces);
+ }
+
+ void CreateTexture3d(
+ ResourceId texture_id,
+ uint32 width, uint32 height, uint32 depth,
+ uint32 levels, texture::Format format,
+ bool enable_render_surfaces) {
+ o3d::CreateTexture3d& cmd = GetCmdSpace<o3d::CreateTexture3d>();
+ cmd.Init(texture_id,
+ width, height, depth,
+ levels, format,
+ enable_render_surfaces);
+ }
+
+ void CreateTextureCube(
+ ResourceId texture_id,
+ uint32 edge_length, uint32 levels, texture::Format format,
+ bool enable_render_surfaces) {
+ o3d::CreateTextureCube& cmd = GetCmdSpace<o3d::CreateTextureCube>();
+ cmd.Init(texture_id,
+ edge_length, levels, format,
+ enable_render_surfaces);
+ }
+
+ void SetTextureData(
+ ResourceId texture_id,
+ uint32 x,
+ uint32 y,
+ uint32 z,
+ uint32 width,
+ uint32 height,
+ uint32 depth,
+ uint32 level,
+ texture::Face face,
+ uint32 row_pitch,
+ uint32 slice_pitch,
+ uint32 size,
+ uint32 shared_memory_id,
+ uint32 shared_memory_offset) {
+ o3d::SetTextureData& cmd = GetCmdSpace<o3d::SetTextureData>();
+ cmd.Init(
+ texture_id,
+ x,
+ y,
+ z,
+ width,
+ height,
+ depth,
+ level,
+ face,
+ row_pitch,
+ slice_pitch,
+ size,
+ shared_memory_id,
+ shared_memory_offset);
+ }
+
+ void SetTextureDataImmediate(
+ ResourceId texture_id,
+ uint32 x,
+ uint32 y,
+ uint32 z,
+ uint32 width,
+ uint32 height,
+ uint32 depth,
+ uint32 level,
+ texture::Face face,
+ uint32 row_pitch,
+ uint32 slice_pitch,
+ uint32 size,
+ const void* data) {
+ o3d::SetTextureDataImmediate& cmd =
+ GetImmediateCmdSpace<o3d::SetTextureDataImmediate>(size);
+ cmd.Init(
+ texture_id,
+ x,
+ y,
+ z,
+ width,
+ height,
+ depth,
+ level,
+ face,
+ row_pitch,
+ slice_pitch,
+ size,
+ data);
+ }
+
+ void GetTextureData(
+ ResourceId texture_id,
+ uint32 x,
+ uint32 y,
+ uint32 z,
+ uint32 width,
+ uint32 height,
+ uint32 depth,
+ uint32 level,
+ texture::Face face,
+ uint32 row_pitch,
+ uint32 slice_pitch,
+ uint32 size,
+ uint32 shared_memory_id,
+ uint32 shared_memory_offset) {
+ o3d::GetTextureData& cmd = GetCmdSpace<o3d::GetTextureData>();
+ cmd.Init(
+ texture_id,
+ x,
+ y,
+ z,
+ width,
+ height,
+ depth,
+ level,
+ face,
+ row_pitch,
+ slice_pitch,
+ size,
+ shared_memory_id,
+ shared_memory_offset);
+ }
+
+ void CreateSampler(ResourceId sampler_id) {
+ o3d::CreateSampler& cmd = GetCmdSpace<o3d::CreateSampler>();
+ cmd.Init(sampler_id);
+ }
+
+ void DestroySampler(ResourceId sampler_id) {
+ o3d::DestroySampler& cmd = GetCmdSpace<o3d::DestroySampler>();
+ cmd.Init(sampler_id);
+ }
+
+ void SetSamplerStates(
+ ResourceId sampler_id,
+ sampler::AddressingMode address_u_value,
+ sampler::AddressingMode address_v_value,
+ sampler::AddressingMode address_w_value,
+ sampler::FilteringMode mag_filter_value,
+ sampler::FilteringMode min_filter_value,
+ sampler::FilteringMode mip_filter_value,
+ uint8 max_anisotropy) {
+ o3d::SetSamplerStates& cmd = GetCmdSpace<o3d::SetSamplerStates>();
+ cmd.Init(
+ sampler_id,
+ address_u_value,
+ address_v_value,
+ address_w_value,
+ mag_filter_value,
+ min_filter_value,
+ mip_filter_value,
+ max_anisotropy);
+ }
+
+ void SetSamplerBorderColor(
+ ResourceId sampler_id,
+ float red, float green, float blue, float alpha) {
+ o3d::SetSamplerBorderColor& cmd =
+ GetCmdSpace<o3d::SetSamplerBorderColor>();
+ cmd.Init(sampler_id, red, green, blue, alpha);
+ }
+
+ void SetSamplerTexture(ResourceId sampler_id, ResourceId texture_id) {
+ o3d::SetSamplerTexture& cmd = GetCmdSpace<o3d::SetSamplerTexture>();
+ cmd.Init(sampler_id, texture_id);
+ }
+
+ void SetScissor(
+ uint32 x,
+ uint32 y,
+ uint32 width,
+ uint32 height,
+ bool enable) {
+ o3d::SetScissor& cmd = GetCmdSpace<o3d::SetScissor>();
+ cmd.Init(
+ x,
+ y,
+ width,
+ height,
+ enable);
+ }
+
+ void SetPolygonOffset(float slope_factor, float units) {
+ o3d::SetPolygonOffset& cmd = GetCmdSpace<o3d::SetPolygonOffset>();
+ cmd.Init(slope_factor, units);
+ }
+
+ void SetPointLineRaster(
+ bool line_smooth_enable, bool point_sprite_enable, float point_size) {
+ o3d::SetPointLineRaster& cmd = GetCmdSpace<o3d::SetPointLineRaster>();
+ cmd.Init(line_smooth_enable, point_sprite_enable, point_size);
+ }
+
+ void SetPolygonRaster(o3d::PolygonMode fill_mode,
+ o3d::FaceCullMode cull_mode) {
+ o3d::SetPolygonRaster& cmd = GetCmdSpace<o3d::SetPolygonRaster>();
+ cmd.Init(fill_mode, cull_mode);
+ }
+
+ void SetAlphaTest(o3d::Comparison func, bool enable, float value) {
+ o3d::SetAlphaTest& cmd = GetCmdSpace<o3d::SetAlphaTest>();
+ cmd.Init(func, enable, value);
+ }
+
+ void SetDepthTest(o3d::Comparison func, bool write_enable, bool enable) {
+ o3d::SetDepthTest& cmd = GetCmdSpace<o3d::SetDepthTest>();
+ cmd.Init(func, write_enable, enable);
+ }
+
+ void SetStencilTest(
+ uint8 write_mask,
+ uint8 compare_mask,
+ uint8 reference_value,
+ bool separate_ccw,
+ bool enable,
+ o3d::Comparison cw_func,
+ o3d::StencilOp cw_pass_op,
+ o3d::StencilOp cw_fail_op,
+ o3d::StencilOp cw_z_fail_op,
+ o3d::Comparison ccw_func,
+ o3d::StencilOp ccw_pass_op,
+ o3d::StencilOp ccw_fail_op,
+ o3d::StencilOp ccw_z_fail_op) {
+ o3d::SetStencilTest& cmd = GetCmdSpace<o3d::SetStencilTest>();
+ cmd.Init(
+ write_mask,
+ compare_mask,
+ reference_value,
+ separate_ccw,
+ enable,
+ cw_func,
+ cw_pass_op,
+ cw_fail_op,
+ cw_z_fail_op,
+ ccw_func,
+ ccw_pass_op,
+ ccw_fail_op,
+ ccw_z_fail_op);
+ }
+
+ void SetColorWrite(uint8 mask, bool dither_enable) {
+ o3d::SetColorWrite& cmd = GetCmdSpace<o3d::SetColorWrite>();
+ cmd.Init(mask, dither_enable);
+ }
+
+ void SetBlending(
+ o3d::BlendFunc color_src_func,
+ o3d::BlendFunc color_dst_func,
+ o3d::BlendEq color_eq,
+ o3d::BlendFunc alpha_src_func,
+ o3d::BlendFunc alpha_dst_func,
+ o3d::BlendEq alpha_eq,
+ bool separate_alpha,
+ bool enable) {
+ o3d::SetBlending& cmd = GetCmdSpace<o3d::SetBlending>();
+ cmd.Init(
+ color_src_func,
+ color_dst_func,
+ color_eq,
+ alpha_src_func,
+ alpha_dst_func,
+ alpha_eq,
+ separate_alpha,
+ enable);
+ }
+
+ void SetBlendingColor(float red, float green, float blue, float alpha) {
+ o3d::SetBlendingColor& cmd = GetCmdSpace<o3d::SetBlendingColor>();
+ cmd.Init(red, green, blue, alpha);
+ }
+
+ void CreateRenderSurface(
+ ResourceId render_surface_id, ResourceId texture_id,
+ uint32 width, uint32 height,
+ uint32 level, uint32 side) {
+ o3d::CreateRenderSurface& cmd = GetCmdSpace<o3d::CreateRenderSurface>();
+ cmd.Init(render_surface_id, texture_id, width, height, level, side);
+ }
+
+ void DestroyRenderSurface(ResourceId render_surface_id) {
+ o3d::DestroyRenderSurface& cmd =
+ GetCmdSpace<o3d::DestroyRenderSurface>();
+ cmd.Init(render_surface_id);
+ }
+
+ void CreateDepthSurface(
+ ResourceId depth_surface_id, uint32 width, uint32 height) {
+ o3d::CreateDepthSurface& cmd = GetCmdSpace<o3d::CreateDepthSurface>();
+ cmd.Init(depth_surface_id, width, height);
+ }
+
+ void DestroyDepthSurface(ResourceId depth_surface_id) {
+ o3d::DestroyDepthSurface& cmd = GetCmdSpace<o3d::DestroyDepthSurface>();
+ cmd.Init(depth_surface_id);
+ }
+
+ void SetRenderSurface(
+ ResourceId render_surface_id, ResourceId depth_surface_id) {
+ o3d::SetRenderSurface& cmd = GetCmdSpace<o3d::SetRenderSurface>();
+ cmd.Init(render_surface_id, depth_surface_id);
+ }
+
+ void SetBackSurfaces() {
+ o3d::SetBackSurfaces& cmd = GetCmdSpace<o3d::SetBackSurfaces>();
+ cmd.Init();
+ }
+};
+
+} // namespace command_buffer
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_CROSS_O3D_CMD_HELPER_H_
+