summaryrefslogtreecommitdiffstats
path: root/gpu
diff options
context:
space:
mode:
authorgman@chromium.org <gman@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-04-28 19:44:59 +0000
committergman@chromium.org <gman@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-04-28 19:44:59 +0000
commitf6a5698b887a63d74575cedcda9e099e6373153f (patch)
tree53f1b25de71d3f8342c508c08911ba900470b946 /gpu
parentec11be60eaf6e832219328ea18656c558dec3040 (diff)
downloadchromium_src-f6a5698b887a63d74575cedcda9e099e6373153f.zip
chromium_src-f6a5698b887a63d74575cedcda9e099e6373153f.tar.gz
chromium_src-f6a5698b887a63d74575cedcda9e099e6373153f.tar.bz2
Changes the GLES2Implementation to use a RingBuffer
to manage the transfer buffer. This is significantly faster than the FencedAllocator for our purposes. TEST=some unit tests BUG=none Review URL: http://codereview.chromium.org/1796002 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@45844 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'gpu')
-rw-r--r--gpu/command_buffer/client/gles2_implementation.cc17
-rw-r--r--gpu/command_buffer/client/gles2_implementation.h23
-rw-r--r--gpu/command_buffer/client/ring_buffer.cc103
-rw-r--r--gpu/command_buffer/client/ring_buffer.h192
-rw-r--r--gpu/command_buffer/client/ring_buffer_test.cc256
-rw-r--r--gpu/gpu.gyp3
6 files changed, 576 insertions, 18 deletions
diff --git a/gpu/command_buffer/client/gles2_implementation.cc b/gpu/command_buffer/client/gles2_implementation.cc
index 724de0b..2536af5 100644
--- a/gpu/command_buffer/client/gles2_implementation.cc
+++ b/gpu/command_buffer/client/gles2_implementation.cc
@@ -334,7 +334,11 @@ GLES2Implementation::GLES2Implementation(
int32 transfer_buffer_id)
: util_(0), // TODO(gman): Get real number of compressed texture formats.
helper_(helper),
- transfer_buffer_(transfer_buffer_size, helper, transfer_buffer),
+ transfer_buffer_(
+ kStartingOffset,
+ transfer_buffer_size - kStartingOffset,
+ helper,
+ static_cast<char*>(transfer_buffer) + kStartingOffset),
transfer_buffer_id_(transfer_buffer_id),
pack_alignment_(4),
unpack_alignment_(4),
@@ -344,8 +348,8 @@ GLES2Implementation::GLES2Implementation(
#endif
error_bits_(0) {
// Allocate space for simple GL results.
- result_buffer_ = transfer_buffer_.Alloc(kMaxSizeOfSimpleResult);
- result_shm_offset_ = transfer_buffer_.GetOffset(result_buffer_);
+ result_buffer_ = transfer_buffer;
+ result_shm_offset_ = 0;
#if defined(GLES2_SUPPORT_CLIENT_SIDE_BUFFERS)
GLint max_vertex_attribs;
@@ -366,7 +370,6 @@ GLES2Implementation::GLES2Implementation(
GLES2Implementation::~GLES2Implementation() {
GLuint buffers[] = { kClientSideArrayId, kClientSideElementArrayId, };
DeleteBuffers(arraysize(buffers), &buffers[0]);
- transfer_buffer_.Free(result_buffer_);
}
void GLES2Implementation::MakeIds(
@@ -440,7 +443,7 @@ void GLES2Implementation::GetBucketContents(uint32 bucket_id,
transfer_buffer_id_, transfer_buffer_.GetOffset(buffer));
WaitForCmd();
memcpy(&(*data)[offset], buffer, part_size);
- transfer_buffer_.Free(buffer);
+ transfer_buffer_.FreePendingToken(buffer, helper_->InsertToken());
offset += part_size;
size -= part_size;
}
@@ -1140,7 +1143,7 @@ void GLES2Implementation::ReadPixels(
dest += padded_row_size;
src += padded_row_size;
}
- transfer_buffer_.Free(buffer);
+ transfer_buffer_.FreePendingToken(buffer, helper_->InsertToken());
yoffset += num_rows;
height -= num_rows;
}
@@ -1170,7 +1173,7 @@ void GLES2Implementation::ReadPixels(
return;
}
memcpy(row_dest, buffer, part_size);
- transfer_buffer_.Free(buffer);
+ transfer_buffer_.FreePendingToken(buffer, helper_->InsertToken());
row_dest += part_size;
temp_xoffset += num_pixels;
temp_width -= num_pixels;
diff --git a/gpu/command_buffer/client/gles2_implementation.h b/gpu/command_buffer/client/gles2_implementation.h
index 2c37cba..7d52485 100644
--- a/gpu/command_buffer/client/gles2_implementation.h
+++ b/gpu/command_buffer/client/gles2_implementation.h
@@ -12,7 +12,7 @@
#include "../common/scoped_ptr.h"
#include "../client/gles2_cmd_helper.h"
#include "../client/id_allocator.h"
-#include "../client/fenced_allocator.h"
+#include "../client/ring_buffer.h"
#define GLES2_SUPPORT_CLIENT_SIDE_BUFFERS 1
@@ -139,25 +139,26 @@ class GLES2Implementation {
}
private:
- // Wraps FencedAllocatorWrapper to provide aligned allocations.
- class AlignedFencedAllocator : public FencedAllocatorWrapper {
+ // Wraps RingBufferWrapper to provide aligned allocations.
+ class AlignedRingBuffer : public RingBufferWrapper {
public:
- AlignedFencedAllocator(unsigned int size,
- CommandBufferHelper *helper,
- void *base)
- : FencedAllocatorWrapper(size, helper, base) {
+ AlignedRingBuffer(RingBuffer::Offset base_offset,
+ unsigned int size,
+ CommandBufferHelper *helper,
+ void *base)
+ : RingBufferWrapper(base_offset, size, helper, base) {
}
static unsigned int RoundToAlignment(unsigned int size) {
return (size + kAlignment - 1) & ~(kAlignment - 1);
}
- // Overrriden from FencedAllocatorWrapper
+ // Overrriden from RingBufferWrapper
void *Alloc(unsigned int size) {
- return FencedAllocatorWrapper::Alloc(RoundToAlignment(size));
+ return RingBufferWrapper::Alloc(RoundToAlignment(size));
}
- // Overrriden from FencedAllocatorWrapper
+ // Overrriden from RingBufferWrapper
template <typename T> T *AllocTyped(unsigned int count) {
return static_cast<T *>(Alloc(count * sizeof(T)));
}
@@ -230,7 +231,7 @@ class GLES2Implementation {
IdAllocator renderbuffer_id_allocator_;
IdAllocator program_and_shader_id_allocator_;
IdAllocator texture_id_allocator_;
- AlignedFencedAllocator transfer_buffer_;
+ AlignedRingBuffer transfer_buffer_;
int transfer_buffer_id_;
void* result_buffer_;
uint32 result_shm_offset_;
diff --git a/gpu/command_buffer/client/ring_buffer.cc b/gpu/command_buffer/client/ring_buffer.cc
new file mode 100644
index 0000000..678110a
--- /dev/null
+++ b/gpu/command_buffer/client/ring_buffer.cc
@@ -0,0 +1,103 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the implementation of the RingBuffer class.
+
+#include "../client/ring_buffer.h"
+#include <algorithm>
+#include "../client/cmd_buffer_helper.h"
+
+namespace gpu {
+
+RingBuffer::RingBuffer(
+ Offset base_offset, unsigned int size, CommandBufferHelper* helper)
+ : helper_(helper),
+ base_offset_(base_offset),
+ size_(size),
+ free_offset_(0),
+ in_use_offset_(0) {
+}
+
+RingBuffer::~RingBuffer() {
+ // Free blocks pending tokens.
+ while (!blocks_.empty()) {
+ FreeOldestBlock();
+ }
+}
+
+void RingBuffer::FreeOldestBlock() {
+ DCHECK(!blocks_.empty()) << "no free blocks";
+ Block& block = blocks_.front();
+ DCHECK(block.valid) << "attempt to allocate more than maximum memory";
+ helper_->WaitForToken(block.token);
+ in_use_offset_ += block.size;
+ if (in_use_offset_ == size_) {
+ in_use_offset_ = 0;
+ }
+ // If they match then the entire buffer is free.
+ if (in_use_offset_ == free_offset_) {
+ in_use_offset_ = 0;
+ free_offset_ = 0;
+ }
+ blocks_.pop_back();
+}
+
+RingBuffer::Offset RingBuffer::Alloc(unsigned int size) {
+ DCHECK_LE(size, size_) << "attempt to allocate more than maximum memory";
+ // Similarly to malloc, an allocation of 0 allocates at least 1 byte, to
+ // return different pointers every time.
+ if (size == 0) size = 1;
+
+ // Wait until there is enough room.
+ while (size > GetLargestFreeSizeNoWaiting()) {
+ FreeOldestBlock();
+ }
+
+ Offset offset = free_offset_;
+ blocks_.push_back(Block(offset, size));
+ free_offset_ += size;
+ if (free_offset_ == size_) {
+ free_offset_ = 0;
+ }
+ return offset + base_offset_;
+}
+
+void RingBuffer::FreePendingToken(RingBuffer::Offset offset,
+ unsigned int token) {
+ offset -= base_offset_;
+ DCHECK(!blocks_.empty()) << "no allocations to free";
+ for (Container::reverse_iterator it = blocks_.rbegin();
+ it != blocks_.rend();
+ ++it) {
+ Block& block = *it;
+ if (block.offset == offset) {
+ DCHECK(!block.valid) << "block that corresponds to offset already freed";
+ block.token = token;
+ block.valid = true;
+ return;
+ }
+ }
+ NOTREACHED() << "attempt to free non-existant block";
+}
+
+unsigned int RingBuffer::GetLargestFreeSizeNoWaiting() {
+ if (free_offset_ == in_use_offset_) {
+ if (blocks_.empty()) {
+ // The entire buffer is free.
+ DCHECK_EQ(free_offset_, 0u);
+ return size_;
+ } else {
+ // The entire buffer is in use.
+ return 0;
+ }
+ } else if (free_offset_ > in_use_offset_) {
+ // It's free from free_offset_ to size_
+ return size_ - free_offset_;
+ } else {
+ // It's free from free_offset_ -> in_use_offset_;
+ return in_use_offset_ - free_offset_;
+ }
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/client/ring_buffer.h b/gpu/command_buffer/client/ring_buffer.h
new file mode 100644
index 0000000..0b55661
--- /dev/null
+++ b/gpu/command_buffer/client/ring_buffer.h
@@ -0,0 +1,192 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the definition of the RingBuffer class.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_RING_BUFFER_H_
+#define GPU_COMMAND_BUFFER_CLIENT_RING_BUFFER_H_
+
+#include <deque>
+#include "../common/logging.h"
+#include "../common/types.h"
+
+namespace gpu {
+class CommandBufferHelper;
+
+// RingBuffer manages a piece of memory as a ring buffer. Memory is allocated
+// with Alloc and then a is freed pending a token with FreePendingToken. Old
+// allocations must not be kept past new allocations.
+class RingBuffer {
+ public:
+ typedef unsigned int Offset;
+
+ // Creates a RingBuffer.
+ // Parameters:
+ // base_offset: The offset of the start of the buffer.
+ // size: The size of the buffer in bytes.
+ // helper: A CommandBufferHelper for dealing with tokens.
+ RingBuffer(
+ Offset base_offset, unsigned int size, CommandBufferHelper* helper);
+
+ ~RingBuffer();
+
+ // Allocates a block of memory. If the buffer is out of directly available
+ // memory, this function may wait until memory that was freed "pending a
+ // token" can be re-used.
+ //
+ // Parameters:
+ // size: the size of the memory block to allocate.
+ //
+ // Returns:
+ // the offset of the allocated memory block.
+ Offset Alloc(unsigned int size);
+
+ // Frees a block of memory, pending the passage of a token. That memory won't
+ // be re-allocated until the token has passed through the command stream.
+ //
+ // Parameters:
+ // offset: the offset of the memory block to free.
+ // token: the token value to wait for before re-using the memory.
+ void FreePendingToken(Offset offset, unsigned int token);
+
+ // Gets the size of the largest free block that is available without waiting.
+ unsigned int GetLargestFreeSizeNoWaiting();
+
+ // Gets the size of the largest free block that can be allocated if the
+ // caller can wait. Allocating a block of this size will succeed, but may
+ // block.
+ unsigned int GetLargestFreeOrPendingSize() {
+ return size_;
+ }
+
+ private:
+ // Book-keeping sturcture that describes a block of memory.
+ struct Block {
+ Block(Offset _offset, unsigned int _size)
+ : offset(_offset),
+ size(_size),
+ token(0),
+ valid(false) {
+ }
+ Offset offset;
+ unsigned int size;
+ unsigned int token; // token to wait for.
+ bool valid; // whether or not token has been set.
+ };
+
+ typedef std::deque<Block> Container;
+ typedef unsigned int BlockIndex;
+
+ void FreeOldestBlock();
+
+ CommandBufferHelper* helper_;
+
+ // Used blocks are added to the end, blocks are freed from the beginning.
+ Container blocks_;
+
+ // The base offset of the ring buffer.
+ Offset base_offset_;
+
+ // The size of the ring buffer.
+ Offset size_;
+
+ // Offset of first free byte.
+ Offset free_offset_;
+
+ // Offset of first used byte.
+ // Range between in_use_mark and free_mark is in use.
+ Offset in_use_offset_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(RingBuffer);
+};
+
+// This class functions just like RingBuffer, but its API uses pointers
+// instead of offsets.
+class RingBufferWrapper {
+ public:
+ // Parameters:
+ // base_offset: The offset to the start of the buffer
+ // size: The size of the buffer in bytes.
+ // helper: A CommandBufferHelper for dealing with tokens.
+ // base: The physical address that corresponds to base_offset.
+ RingBufferWrapper(RingBuffer::Offset base_offset,
+ unsigned int size,
+ CommandBufferHelper* helper,
+ void* base)
+ : allocator_(base_offset, size, helper),
+ base_(static_cast<int8*>(base) - base_offset) {
+ }
+
+ // Allocates a block of memory. If the buffer is out of directly available
+ // memory, this function may wait until memory that was freed "pending a
+ // token" can be re-used.
+ //
+ // Parameters:
+ // size: the size of the memory block to allocate.
+ //
+ // Returns:
+ // the pointer to the allocated memory block, or NULL if out of
+ // memory.
+ void *Alloc(unsigned int size) {
+ RingBuffer::Offset offset = allocator_.Alloc(size);
+ return GetPointer(offset);
+ }
+
+ // Allocates a block of memory. If the buffer is out of directly available
+ // memory, this function may wait until memory that was freed "pending a
+ // token" can be re-used.
+ // This is a type-safe version of Alloc, returning a typed pointer.
+ //
+ // Parameters:
+ // count: the number of elements to allocate.
+ //
+ // Returns:
+ // the pointer to the allocated memory block, or NULL if out of
+ // memory.
+ template <typename T> T *AllocTyped(unsigned int count) {
+ return static_cast<T *>(Alloc(count * sizeof(T)));
+ }
+
+ // Frees a block of memory, pending the passage of a token. That memory won't
+ // be re-allocated until the token has passed through the command stream.
+ //
+ // Parameters:
+ // pointer: the pointer to the memory block to free.
+ // token: the token value to wait for before re-using the memory.
+ void FreePendingToken(void *pointer, unsigned int token) {
+ DCHECK(pointer);
+ allocator_.FreePendingToken(GetOffset(pointer), token);
+ }
+
+ // Gets a pointer to a memory block given the base memory and the offset.
+ void *GetPointer(RingBuffer::Offset offset) {
+ return static_cast<int8*>(base_) + offset;
+ }
+
+ // Gets the offset to a memory block given the base memory and the address.
+ RingBuffer::Offset GetOffset(void *pointer) {
+ return static_cast<int8*>(pointer) - static_cast<int8*>(base_);
+ }
+
+ // Gets the size of the largest free block that is available without waiting.
+ unsigned int GetLargestFreeSizeNoWaiting() {
+ return allocator_.GetLargestFreeSizeNoWaiting();
+ }
+
+ // Gets the size of the largest free block that can be allocated if the
+ // caller can wait.
+ unsigned int GetLargestFreeOrPendingSize() {
+ return allocator_.GetLargestFreeOrPendingSize();
+ }
+
+ private:
+ RingBuffer allocator_;
+ void *base_;
+ RingBuffer::Offset base_offset_;
+ DISALLOW_IMPLICIT_CONSTRUCTORS(RingBufferWrapper);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_RING_BUFFER_H_
diff --git a/gpu/command_buffer/client/ring_buffer_test.cc b/gpu/command_buffer/client/ring_buffer_test.cc
new file mode 100644
index 0000000..a6ec71a
--- /dev/null
+++ b/gpu/command_buffer/client/ring_buffer_test.cc
@@ -0,0 +1,256 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the tests for the RingBuffer class.
+
+#include "gpu/command_buffer/client/ring_buffer.h"
+#include "base/at_exit.h"
+#include "base/callback.h"
+#include "base/message_loop.h"
+#include "base/scoped_nsautorelease_pool.h"
+#include "gpu/command_buffer/client/cmd_buffer_helper.h"
+#include "gpu/command_buffer/service/cmd_buffer_engine.h"
+#include "gpu/command_buffer/service/mocks.h"
+#include "gpu/command_buffer/service/command_buffer_service.h"
+#include "gpu/command_buffer/service/gpu_processor.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace gpu {
+
+using testing::Return;
+using testing::Mock;
+using testing::Truly;
+using testing::Sequence;
+using testing::DoAll;
+using testing::Invoke;
+using testing::_;
+
+class BaseRingBufferTest : public testing::Test {
+ protected:
+ static const unsigned int kBaseOffset = 128;
+ static const unsigned int kBufferSize = 1024;
+
+ virtual void SetUp() {
+ api_mock_.reset(new AsyncAPIMock);
+ // ignore noops in the mock - we don't want to inspect the internals of the
+ // helper.
+ EXPECT_CALL(*api_mock_, DoCommand(cmd::kNoop, 0, _))
+ .WillRepeatedly(Return(error::kNoError));
+ // Forward the SetToken calls to the engine
+ EXPECT_CALL(*api_mock_.get(), DoCommand(cmd::kSetToken, 1, _))
+ .WillRepeatedly(DoAll(Invoke(api_mock_.get(), &AsyncAPIMock::SetToken),
+ Return(error::kNoError)));
+
+ command_buffer_.reset(new CommandBufferService);
+ command_buffer_->Initialize(kBufferSize / sizeof(CommandBufferEntry));
+ Buffer ring_buffer = command_buffer_->GetRingBuffer();
+
+ parser_ = new CommandParser(ring_buffer.ptr,
+ ring_buffer.size,
+ 0,
+ ring_buffer.size,
+ 0,
+ api_mock_.get());
+
+ gpu_processor_.reset(new GPUProcessor(
+ command_buffer_.get(), NULL, parser_, INT_MAX));
+ command_buffer_->SetPutOffsetChangeCallback(NewCallback(
+ gpu_processor_.get(), &GPUProcessor::ProcessCommands));
+
+ api_mock_->set_engine(gpu_processor_.get());
+
+ helper_.reset(new CommandBufferHelper(command_buffer_.get()));
+ helper_->Initialize();
+ }
+
+ int32 GetToken() {
+ return command_buffer_->GetState().token;
+ }
+
+ virtual void TearDown() {
+ helper_.release();
+ }
+
+ base::ScopedNSAutoreleasePool autorelease_pool_;
+ base::AtExitManager at_exit_manager_;
+ MessageLoop message_loop_;
+ scoped_ptr<AsyncAPIMock> api_mock_;
+ scoped_ptr<CommandBufferService> command_buffer_;
+ scoped_ptr<GPUProcessor> gpu_processor_;
+ CommandParser* parser_;
+ scoped_ptr<CommandBufferHelper> helper_;
+};
+
+#ifndef COMPILER_MSVC
+const unsigned int BaseRingBufferTest::kBaseOffset;
+const unsigned int BaseRingBufferTest::kBufferSize;
+#endif
+
+// Test fixture for RingBuffer test - Creates a RingBuffer, using a
+// CommandBufferHelper with a mock AsyncAPIInterface for its interface (calling
+// it directly, not through the RPC mechanism), making sure Noops are ignored
+// and SetToken are properly forwarded to the engine.
+class RingBufferTest : public BaseRingBufferTest {
+ protected:
+ virtual void SetUp() {
+ BaseRingBufferTest::SetUp();
+ allocator_.reset(new RingBuffer(kBaseOffset, kBufferSize, helper_.get()));
+ }
+
+ virtual void TearDown() {
+ // If the GPUProcessor posts any tasks, this forces them to run.
+ MessageLoop::current()->RunAllPending();
+
+ allocator_.release();
+
+ BaseRingBufferTest::TearDown();
+ }
+
+ scoped_ptr<RingBuffer> allocator_;
+};
+
+// Checks basic alloc and free.
+TEST_F(RingBufferTest, TestBasic) {
+ const unsigned int kSize = 16;
+ EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeOrPendingSize());
+ EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeSizeNoWaiting());
+ RingBuffer::Offset offset = allocator_->Alloc(kSize);
+ EXPECT_GE(kBufferSize, offset - kBaseOffset + kSize);
+ EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeOrPendingSize());
+ EXPECT_EQ(kBufferSize - kSize, allocator_->GetLargestFreeSizeNoWaiting());
+ int32 token = helper_.get()->InsertToken();
+ allocator_->FreePendingToken(offset, token);
+}
+
+// Checks the free-pending-token mechanism.
+TEST_F(RingBufferTest, TestFreePendingToken) {
+ const unsigned int kSize = 16;
+ const unsigned int kAllocCount = kBufferSize / kSize;
+ CHECK(kAllocCount * kSize == kBufferSize);
+
+ // Allocate several buffers to fill in the memory.
+ int32 tokens[kAllocCount];
+ for (unsigned int ii = 0; ii < kAllocCount; ++ii) {
+ RingBuffer::Offset offset = allocator_->Alloc(kSize);
+ EXPECT_GE(kBufferSize, offset - kBaseOffset + kSize);
+ tokens[ii] = helper_.get()->InsertToken();
+ allocator_->FreePendingToken(offset, tokens[ii]);
+ }
+
+ EXPECT_EQ(kBufferSize - (kSize * kAllocCount),
+ allocator_->GetLargestFreeSizeNoWaiting());
+
+ // This allocation will need to reclaim the space freed above, so that should
+ // process the commands until a token is passed.
+ RingBuffer::Offset offset1 = allocator_->Alloc(kSize);
+ EXPECT_EQ(kBaseOffset, offset1);
+
+ // Check that the token has indeed passed.
+ EXPECT_LE(tokens[0], GetToken());
+
+ allocator_->FreePendingToken(offset1, helper_.get()->InsertToken());
+}
+
+// Tests GetLargestFreeSizeNoWaiting
+TEST_F(RingBufferTest, TestGetLargestFreeSizeNoWaiting) {
+ EXPECT_EQ(kBufferSize, allocator_->GetLargestFreeSizeNoWaiting());
+
+ RingBuffer::Offset offset = allocator_->Alloc(kBufferSize);
+ EXPECT_EQ(0u, allocator_->GetLargestFreeSizeNoWaiting());
+ allocator_->FreePendingToken(offset, helper_.get()->InsertToken());
+}
+
+// Test fixture for RingBufferWrapper test - Creates a
+// RingBufferWrapper, using a CommandBufferHelper with a mock
+// AsyncAPIInterface for its interface (calling it directly, not through the
+// RPC mechanism), making sure Noops are ignored and SetToken are properly
+// forwarded to the engine.
+class RingBufferWrapperTest : public BaseRingBufferTest {
+ protected:
+ virtual void SetUp() {
+ BaseRingBufferTest::SetUp();
+
+ // Though allocating this buffer isn't strictly necessary, it makes
+ // allocations point to valid addresses, so they could be used for
+ // something.
+ buffer_.reset(new int8[kBufferSize + kBaseOffset]);
+ buffer_start_ = buffer_.get() + kBaseOffset;
+ allocator_.reset(new RingBufferWrapper(
+ kBaseOffset, kBufferSize, helper_.get(), buffer_start_));
+ }
+
+ virtual void TearDown() {
+ // If the GPUProcessor posts any tasks, this forces them to run.
+ MessageLoop::current()->RunAllPending();
+
+ allocator_.release();
+ buffer_.release();
+
+ BaseRingBufferTest::TearDown();
+ }
+
+ scoped_ptr<RingBufferWrapper> allocator_;
+ scoped_array<int8> buffer_;
+ int8* buffer_start_;
+};
+
+// Checks basic alloc and free.
+TEST_F(RingBufferWrapperTest, TestBasic) {
+ const unsigned int kSize = 16;
+ void* pointer = allocator_->Alloc(kSize);
+ ASSERT_TRUE(pointer);
+ EXPECT_LE(buffer_start_, static_cast<int8*>(pointer));
+ EXPECT_GE(kBufferSize, static_cast<int8*>(pointer) - buffer_start_ + kSize);
+
+ allocator_->FreePendingToken(pointer, helper_.get()->InsertToken());
+
+ int8* pointer_int8 = allocator_->AllocTyped<int8>(kSize);
+ ASSERT_TRUE(pointer_int8);
+ EXPECT_LE(buffer_start_, pointer_int8);
+ EXPECT_GE(buffer_start_ + kBufferSize, pointer_int8 + kSize);
+ allocator_->FreePendingToken(pointer_int8, helper_.get()->InsertToken());
+
+ unsigned int* pointer_uint = allocator_->AllocTyped<unsigned int>(kSize);
+ ASSERT_TRUE(pointer_uint);
+ EXPECT_LE(buffer_start_, reinterpret_cast<int8*>(pointer_uint));
+ EXPECT_GE(buffer_start_ + kBufferSize,
+ reinterpret_cast<int8* >(pointer_uint + kSize));
+
+ // Check that it did allocate kSize * sizeof(unsigned int). We can't tell
+ // directly, except from the remaining size.
+ EXPECT_EQ(kBufferSize - kSize - kSize - kSize * sizeof(*pointer_uint),
+ allocator_->GetLargestFreeSizeNoWaiting());
+ allocator_->FreePendingToken(pointer_uint, helper_.get()->InsertToken());
+}
+
+// Checks the free-pending-token mechanism.
+TEST_F(RingBufferWrapperTest, TestFreePendingToken) {
+ const unsigned int kSize = 16;
+ const unsigned int kAllocCount = kBufferSize / kSize;
+ CHECK(kAllocCount * kSize == kBufferSize);
+
+ // Allocate several buffers to fill in the memory.
+ int32 tokens[kAllocCount];
+ for (unsigned int ii = 0; ii < kAllocCount; ++ii) {
+ void* pointer = allocator_->Alloc(kSize);
+ EXPECT_TRUE(pointer != NULL);
+ tokens[ii] = helper_.get()->InsertToken();
+ allocator_->FreePendingToken(pointer, helper_.get()->InsertToken());
+ }
+
+ EXPECT_EQ(kBufferSize - (kSize * kAllocCount),
+ allocator_->GetLargestFreeSizeNoWaiting());
+
+ // This allocation will need to reclaim the space freed above, so that should
+ // process the commands until the token is passed.
+ void* pointer1 = allocator_->Alloc(kSize);
+ EXPECT_EQ(buffer_start_, static_cast<int8*>(pointer1));
+
+ // Check that the token has indeed passed.
+ EXPECT_LE(tokens[0], GetToken());
+
+ allocator_->FreePendingToken(pointer1, helper_.get()->InsertToken());
+}
+
+} // namespace gpu
diff --git a/gpu/gpu.gyp b/gpu/gpu.gyp
index 01f2c4a..d8adaba 100644
--- a/gpu/gpu.gyp
+++ b/gpu/gpu.gyp
@@ -233,6 +233,8 @@
'command_buffer/client/fenced_allocator.h',
'command_buffer/client/id_allocator.cc',
'command_buffer/client/id_allocator.h',
+ 'command_buffer/client/ring_buffer.cc',
+ 'command_buffer/client/ring_buffer.h',
],
},
{
@@ -340,6 +342,7 @@
'command_buffer/client/fenced_allocator_test.cc',
'command_buffer/client/gles2_implementation_unittest.cc',
'command_buffer/client/id_allocator_test.cc',
+ 'command_buffer/client/ring_buffer_test.cc',
'command_buffer/common/bitfield_helpers_test.cc',
'command_buffer/common/gles2_cmd_format_test.cc',
'command_buffer/common/gles2_cmd_format_test_autogen.h',