summaryrefslogtreecommitdiffstats
path: root/gpu
diff options
context:
space:
mode:
authorkaanb@chromium.org <kaanb@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-08-21 10:51:51 +0000
committerkaanb@chromium.org <kaanb@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-08-21 10:51:51 +0000
commitf9b9cce60297157908bf7cf904abc0f1f8f80102 (patch)
tree34c671452771d80321a91a2d04c820a22ace6845 /gpu
parent16a35795aa612dab60605d015ecb5810936b3ccb (diff)
downloadchromium_src-f9b9cce60297157908bf7cf904abc0f1f8f80102.zip
chromium_src-f9b9cce60297157908bf7cf904abc0f1f8f80102.tar.gz
chromium_src-f9b9cce60297157908bf7cf904abc0f1f8f80102.tar.bz2
Enforce a memory limit on MappedMemoryManager
BUG=272591 Review URL: https://chromiumcodereview.appspot.com/23130004 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@218693 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'gpu')
-rw-r--r--gpu/command_buffer/client/buffer_tracker_unittest.cc3
-rw-r--r--gpu/command_buffer/client/fenced_allocator.cc17
-rw-r--r--gpu/command_buffer/client/fenced_allocator.h6
-rw-r--r--gpu/command_buffer/client/gl_in_process_context.cc3
-rw-r--r--gpu/command_buffer/client/gles2_implementation.cc13
-rw-r--r--gpu/command_buffer/client/gles2_implementation.h9
-rw-r--r--gpu/command_buffer/client/gles2_implementation_unittest.cc3
-rw-r--r--gpu/command_buffer/client/mapped_memory.cc55
-rw-r--r--gpu/command_buffer/client/mapped_memory.h28
-rw-r--r--gpu/command_buffer/client/mapped_memory_unittest.cc80
-rw-r--r--gpu/command_buffer/client/query_tracker_unittest.cc6
-rw-r--r--gpu/command_buffer/tests/gl_manager.cc4
-rw-r--r--gpu/gles2_conform_support/egl/display.cc3
13 files changed, 188 insertions, 42 deletions
diff --git a/gpu/command_buffer/client/buffer_tracker_unittest.cc b/gpu/command_buffer/client/buffer_tracker_unittest.cc
index 86b69f0..51f4f94 100644
--- a/gpu/command_buffer/client/buffer_tracker_unittest.cc
+++ b/gpu/command_buffer/client/buffer_tracker_unittest.cc
@@ -51,7 +51,8 @@ class BufferTrackerTest : public testing::Test {
command_buffer_.reset(new MockClientCommandBufferImpl());
helper_.reset(new GLES2CmdHelper(command_buffer_.get()));
helper_->Initialize(kCommandBufferSizeBytes);
- mapped_memory_.reset(new MappedMemoryManager(helper_.get()));
+ mapped_memory_.reset(new MappedMemoryManager(
+ helper_.get(), MappedMemoryManager::kNoLimit));
buffer_tracker_.reset(new BufferTracker(mapped_memory_.get()));
}
diff --git a/gpu/command_buffer/client/fenced_allocator.cc b/gpu/command_buffer/client/fenced_allocator.cc
index 6eb9ab3..02e891f 100644
--- a/gpu/command_buffer/client/fenced_allocator.cc
+++ b/gpu/command_buffer/client/fenced_allocator.cc
@@ -5,7 +5,9 @@
// This file contains the implementation of the FencedAllocator class.
#include "gpu/command_buffer/client/fenced_allocator.h"
+
#include <algorithm>
+
#include "gpu/command_buffer/client/cmd_buffer_helper.h"
namespace gpu {
@@ -33,7 +35,8 @@ const FencedAllocator::Offset FencedAllocator::kInvalidOffset;
FencedAllocator::FencedAllocator(unsigned int size,
CommandBufferHelper *helper)
- : helper_(helper) {
+ : helper_(helper),
+ bytes_in_use_(0) {
Block block = { FREE, 0, RoundDown(size), kUnusedToken };
blocks_.push_back(block);
}
@@ -90,7 +93,12 @@ FencedAllocator::Offset FencedAllocator::Alloc(unsigned int size) {
void FencedAllocator::Free(FencedAllocator::Offset offset) {
BlockIndex index = GetBlockByOffset(offset);
GPU_DCHECK_NE(blocks_[index].state, FREE);
- blocks_[index].state = FREE;
+ Block &block = blocks_[index];
+
+ if (block.state == IN_USE)
+ bytes_in_use_ -= block.size;
+
+ block.state = FREE;
CollapseFreeBlock(index);
}
@@ -99,6 +107,8 @@ void FencedAllocator::FreePendingToken(
FencedAllocator::Offset offset, int32 token) {
BlockIndex index = GetBlockByOffset(offset);
Block &block = blocks_[index];
+ if (block.state == IN_USE)
+ bytes_in_use_ -= block.size;
block.state = FREE_PENDING_TOKEN;
block.token = token;
}
@@ -153,6 +163,8 @@ bool FencedAllocator::CheckConsistency() {
return true;
}
+// Returns false if all blocks are actually FREE, in which
+// case they would be coalesced into one block, true otherwise.
bool FencedAllocator::InUse() {
return blocks_.size() != 1 || blocks_[0].state != FREE;
}
@@ -211,6 +223,7 @@ FencedAllocator::Offset FencedAllocator::AllocInBlock(BlockIndex index,
GPU_DCHECK_GE(block.size, size);
GPU_DCHECK_EQ(block.state, FREE);
Offset offset = block.offset;
+ bytes_in_use_ += size;
if (block.size == size) {
block.state = IN_USE;
return offset;
diff --git a/gpu/command_buffer/client/fenced_allocator.h b/gpu/command_buffer/client/fenced_allocator.h
index 90288d9..71e6178 100644
--- a/gpu/command_buffer/client/fenced_allocator.h
+++ b/gpu/command_buffer/client/fenced_allocator.h
@@ -83,6 +83,9 @@ class GPU_EXPORT FencedAllocator {
// True if any memory is allocated.
bool InUse();
+ // Return bytes of memory that is IN_USE
+ size_t bytes_in_use() const { return bytes_in_use_; }
+
private:
// Status of a block of memory, for book-keeping.
enum State {
@@ -134,6 +137,7 @@ class GPU_EXPORT FencedAllocator {
CommandBufferHelper *helper_;
Container blocks_;
+ size_t bytes_in_use_;
DISALLOW_IMPLICIT_CONSTRUCTORS(FencedAllocator);
};
@@ -243,6 +247,8 @@ class FencedAllocatorWrapper {
FencedAllocator &allocator() { return allocator_; }
+ size_t bytes_in_use() const { return allocator_.bytes_in_use(); }
+
private:
FencedAllocator allocator_;
void* base_;
diff --git a/gpu/command_buffer/client/gl_in_process_context.cc b/gpu/command_buffer/client/gl_in_process_context.cc
index f778047..f97099a 100644
--- a/gpu/command_buffer/client/gl_in_process_context.cc
+++ b/gpu/command_buffer/client/gl_in_process_context.cc
@@ -260,7 +260,8 @@ bool GLInProcessContextImpl::Initialize(
if (!gles2_implementation_->Initialize(
kStartTransferBufferSize,
kMinTransferBufferSize,
- kMaxTransferBufferSize)) {
+ kMaxTransferBufferSize,
+ gles2::GLES2Implementation::kNoLimit)) {
return false;
}
diff --git a/gpu/command_buffer/client/gles2_implementation.cc b/gpu/command_buffer/client/gles2_implementation.cc
index 5a223b4..0fde7d8 100644
--- a/gpu/command_buffer/client/gles2_implementation.cc
+++ b/gpu/command_buffer/client/gles2_implementation.cc
@@ -17,7 +17,6 @@
#include <GLES2/gl2extchromium.h>
#include "gpu/command_buffer/client/buffer_tracker.h"
#include "gpu/command_buffer/client/gpu_memory_buffer_tracker.h"
-#include "gpu/command_buffer/client/mapped_memory.h"
#include "gpu/command_buffer/client/program_info_manager.h"
#include "gpu/command_buffer/client/query_tracker.h"
#include "gpu/command_buffer/client/transfer_buffer.h"
@@ -133,7 +132,8 @@ GLES2Implementation::GLES2Implementation(
bool GLES2Implementation::Initialize(
unsigned int starting_transfer_buffer_size,
unsigned int min_transfer_buffer_size,
- unsigned int max_transfer_buffer_size) {
+ unsigned int max_transfer_buffer_size,
+ unsigned int mapped_memory_limit) {
GPU_DCHECK_GE(starting_transfer_buffer_size, min_transfer_buffer_size);
GPU_DCHECK_LE(starting_transfer_buffer_size, max_transfer_buffer_size);
GPU_DCHECK_GE(min_transfer_buffer_size, kStartingOffset);
@@ -148,8 +148,8 @@ bool GLES2Implementation::Initialize(
return false;
}
- mapped_memory_.reset(new MappedMemoryManager(helper_));
- SetSharedMemoryChunkSizeMultiple(1024 * 1024 * 2);
+ mapped_memory_.reset(new MappedMemoryManager(helper_, mapped_memory_limit));
+ mapped_memory_->set_chunk_size_multiple(2 * 1024 * 1024);
if (!QueryAndCacheStaticState())
return false;
@@ -289,11 +289,6 @@ uint32 GLES2Implementation::GetResultShmOffset() {
return transfer_buffer_->GetResultOffset();
}
-void GLES2Implementation::SetSharedMemoryChunkSizeMultiple(
- unsigned int multiple) {
- mapped_memory_->set_chunk_size_multiple(multiple);
-}
-
void GLES2Implementation::FreeUnusedSharedMemory() {
mapped_memory_->FreeUnused();
}
diff --git a/gpu/command_buffer/client/gles2_implementation.h b/gpu/command_buffer/client/gles2_implementation.h
index bacb582..9f8a7a2 100644
--- a/gpu/command_buffer/client/gles2_implementation.h
+++ b/gpu/command_buffer/client/gles2_implementation.h
@@ -20,6 +20,7 @@
#include "gpu/command_buffer/client/gles2_cmd_helper.h"
#include "gpu/command_buffer/client/gles2_interface.h"
#include "gpu/command_buffer/client/gpu_memory_buffer_tracker.h"
+#include "gpu/command_buffer/client/mapped_memory.h"
#include "gpu/command_buffer/client/query_tracker.h"
#include "gpu/command_buffer/client/ref_counted.h"
#include "gpu/command_buffer/client/ring_buffer.h"
@@ -98,7 +99,6 @@ struct GLUniformDefinitionCHROMIUM;
namespace gpu {
class GpuControl;
-class MappedMemoryManager;
class ScopedTransferBufferPtr;
class TransferBufferInterface;
@@ -115,6 +115,9 @@ class VertexArrayObjectManager;
// shared memory and synchronization issues.
class GLES2_IMPL_EXPORT GLES2Implementation : public GLES2Interface {
public:
+ enum MappedMemoryLimit {
+ kNoLimit = MappedMemoryManager::kNoLimit,
+ };
class ErrorMessageCallback {
public:
virtual ~ErrorMessageCallback() { }
@@ -184,7 +187,8 @@ class GLES2_IMPL_EXPORT GLES2Implementation : public GLES2Interface {
bool Initialize(
unsigned int starting_transfer_buffer_size,
unsigned int min_transfer_buffer_size,
- unsigned int max_transfer_buffer_size);
+ unsigned int max_transfer_buffer_size,
+ unsigned int mapped_memory_limit);
// The GLES2CmdHelper being used by this GLES2Implementation. You can use
// this to issue cmds at a lower level for certain kinds of optimization.
@@ -215,7 +219,6 @@ class GLES2_IMPL_EXPORT GLES2Implementation : public GLES2Interface {
GLuint program, GLuint index, GLsizei bufsize, GLsizei* length,
GLint* size, GLenum* type, char* name);
- void SetSharedMemoryChunkSizeMultiple(unsigned int multiple);
void FreeUnusedSharedMemory();
void FreeEverything();
diff --git a/gpu/command_buffer/client/gles2_implementation_unittest.cc b/gpu/command_buffer/client/gles2_implementation_unittest.cc
index 0289547..84fa749 100644
--- a/gpu/command_buffer/client/gles2_implementation_unittest.cc
+++ b/gpu/command_buffer/client/gles2_implementation_unittest.cc
@@ -407,7 +407,8 @@ class GLES2ImplementationTest : public testing::Test {
ASSERT_TRUE(gl_->Initialize(
kTransferBufferSize,
kTransferBufferSize,
- kTransferBufferSize));
+ kTransferBufferSize,
+ GLES2Implementation::kNoLimit));
}
EXPECT_CALL(*command_buffer(), OnFlush())
diff --git a/gpu/command_buffer/client/mapped_memory.cc b/gpu/command_buffer/client/mapped_memory.cc
index 82829d4..c367e69 100644
--- a/gpu/command_buffer/client/mapped_memory.cc
+++ b/gpu/command_buffer/client/mapped_memory.cc
@@ -7,6 +7,7 @@
#include <algorithm>
#include <functional>
+#include "base/debug/trace_event.h"
#include "gpu/command_buffer/client/cmd_buffer_helper.h"
namespace gpu {
@@ -18,9 +19,12 @@ MemoryChunk::MemoryChunk(
allocator_(shm.size, helper, shm.ptr) {
}
-MappedMemoryManager::MappedMemoryManager(CommandBufferHelper* helper)
+MappedMemoryManager::MappedMemoryManager(CommandBufferHelper* helper,
+ size_t unused_memory_reclaim_limit)
: chunk_size_multiple_(1),
- helper_(helper) {
+ helper_(helper),
+ allocated_memory_(0),
+ max_free_bytes_(unused_memory_reclaim_limit) {
}
MappedMemoryManager::~MappedMemoryManager() {
@@ -36,16 +40,38 @@ void* MappedMemoryManager::Alloc(
unsigned int size, int32* shm_id, unsigned int* shm_offset) {
GPU_DCHECK(shm_id);
GPU_DCHECK(shm_offset);
- // See if any of the chucks can satisfy this request.
- for (size_t ii = 0; ii < chunks_.size(); ++ii) {
- MemoryChunk* chunk = chunks_[ii];
- chunk->FreeUnused();
- if (chunk->GetLargestFreeSizeWithoutWaiting() >= size) {
- void* mem = chunk->Alloc(size);
- GPU_DCHECK(mem);
- *shm_id = chunk->shm_id();
- *shm_offset = chunk->GetOffset(mem);
- return mem;
+ if (size <= allocated_memory_) {
+ size_t total_bytes_in_use = 0;
+ // See if any of the chunks can satisfy this request.
+ for (size_t ii = 0; ii < chunks_.size(); ++ii) {
+ MemoryChunk* chunk = chunks_[ii];
+ chunk->FreeUnused();
+ total_bytes_in_use += chunk->bytes_in_use();
+ if (chunk->GetLargestFreeSizeWithoutWaiting() >= size) {
+ void* mem = chunk->Alloc(size);
+ GPU_DCHECK(mem);
+ *shm_id = chunk->shm_id();
+ *shm_offset = chunk->GetOffset(mem);
+ return mem;
+ }
+ }
+
+ // If there is a memory limit being enforced and total free
+ // memory (allocated_memory_ - total_bytes_in_use) is larger than
+ // the limit try waiting.
+ if (max_free_bytes_ != kNoLimit &&
+ (allocated_memory_ - total_bytes_in_use) >= max_free_bytes_) {
+ TRACE_EVENT0("gpu", "MappedMemoryManager::Alloc::wait");
+ for (size_t ii = 0; ii < chunks_.size(); ++ii) {
+ MemoryChunk* chunk = chunks_[ii];
+ if (chunk->GetLargestFreeSizeWithWaiting() >= size) {
+ void* mem = chunk->Alloc(size);
+ GPU_DCHECK(mem);
+ *shm_id = chunk->shm_id();
+ *shm_offset = chunk->GetOffset(mem);
+ return mem;
+ }
+ }
}
}
@@ -59,6 +85,7 @@ void* MappedMemoryManager::Alloc(
if (id < 0)
return NULL;
MemoryChunk* mc = new MemoryChunk(id, shm, helper_);
+ allocated_memory_ += mc->GetSize();
chunks_.push_back(mc);
void* mem = mc->Alloc(size);
GPU_DCHECK(mem);
@@ -97,6 +124,7 @@ void MappedMemoryManager::FreeUnused() {
chunk->FreeUnused();
if (!chunk->InUse()) {
cmd_buf->DestroyTransferBuffer(chunk->shm_id());
+ allocated_memory_ -= chunk->GetSize();
iter = chunks_.erase(iter);
} else {
++iter;
@@ -105,6 +133,3 @@ void MappedMemoryManager::FreeUnused() {
}
} // namespace gpu
-
-
-
diff --git a/gpu/command_buffer/client/mapped_memory.h b/gpu/command_buffer/client/mapped_memory.h
index ec70c43..db73ca7 100644
--- a/gpu/command_buffer/client/mapped_memory.h
+++ b/gpu/command_buffer/client/mapped_memory.h
@@ -79,7 +79,7 @@ class GPU_EXPORT MemoryChunk {
allocator_.FreePendingToken(pointer, token);
}
- // Frees any blocks who's tokens have passed.
+ // Frees any blocks whose tokens have passed.
void FreeUnused() {
allocator_.FreeUnused();
}
@@ -90,11 +90,15 @@ class GPU_EXPORT MemoryChunk {
pointer < reinterpret_cast<const int8*>(shm_.ptr) + shm_.size;
}
- // Returns true of any memory in this chuck is in use.
+ // Returns true of any memory in this chunk is in use.
bool InUse() {
return allocator_.InUse();
}
+ size_t bytes_in_use() const {
+ return allocator_.bytes_in_use();
+ }
+
private:
int32 shm_id_;
gpu::Buffer shm_;
@@ -103,10 +107,17 @@ class GPU_EXPORT MemoryChunk {
DISALLOW_COPY_AND_ASSIGN(MemoryChunk);
};
-// Manages MemoryChucks.
+// Manages MemoryChunks.
class GPU_EXPORT MappedMemoryManager {
public:
- explicit MappedMemoryManager(CommandBufferHelper* helper);
+ enum MemoryLimit {
+ kNoLimit = 0,
+ };
+
+ // |unused_memory_reclaim_limit|: When exceeded this causes pending memory
+ // to be reclaimed before allocating more memory.
+ MappedMemoryManager(CommandBufferHelper* helper,
+ size_t unused_memory_reclaim_limit);
~MappedMemoryManager();
@@ -146,10 +157,15 @@ class GPU_EXPORT MappedMemoryManager {
void FreeUnused();
// Used for testing
- size_t num_chunks() {
+ size_t num_chunks() const {
return chunks_.size();
}
+ // Used for testing
+ size_t allocated_memory() const {
+ return allocated_memory_;
+ }
+
private:
typedef ScopedVector<MemoryChunk> MemoryChunkVector;
@@ -157,6 +173,8 @@ class GPU_EXPORT MappedMemoryManager {
unsigned int chunk_size_multiple_;
CommandBufferHelper* helper_;
MemoryChunkVector chunks_;
+ size_t allocated_memory_;
+ size_t max_free_bytes_;
DISALLOW_COPY_AND_ASSIGN(MappedMemoryManager);
};
diff --git a/gpu/command_buffer/client/mapped_memory_unittest.cc b/gpu/command_buffer/client/mapped_memory_unittest.cc
index 99e6e8d..90d1ce7 100644
--- a/gpu/command_buffer/client/mapped_memory_unittest.cc
+++ b/gpu/command_buffer/client/mapped_memory_unittest.cc
@@ -149,7 +149,8 @@ class MappedMemoryManagerTest : public MappedMemoryTestBase {
protected:
virtual void SetUp() {
MappedMemoryTestBase::SetUp();
- manager_.reset(new MappedMemoryManager(helper_.get()));
+ manager_.reset(new MappedMemoryManager(
+ helper_.get(), MappedMemoryManager::kNoLimit));
}
virtual void TearDown() {
@@ -306,4 +307,81 @@ TEST_F(MappedMemoryManagerTest, ChunkSizeMultiple) {
EXPECT_EQ(0u, offset3);
}
+TEST_F(MappedMemoryManagerTest, UnusedMemoryLimit) {
+ const unsigned int kChunkSize = 2048;
+ // Reset the manager with a memory limit.
+ manager_.reset(new MappedMemoryManager(helper_.get(), kChunkSize));
+ manager_->set_chunk_size_multiple(kChunkSize);
+
+ // Allocate one chunk worth of memory.
+ int32 id1 = -1;
+ unsigned int offset1 = 0xFFFFFFFFU;
+ void* mem1 = manager_->Alloc(kChunkSize, &id1, &offset1);
+ ASSERT_TRUE(mem1);
+ EXPECT_NE(-1, id1);
+ EXPECT_EQ(0u, offset1);
+
+ // Allocate half a chunk worth of memory again.
+ // The same chunk will be used.
+ int32 id2 = -1;
+ unsigned int offset2 = 0xFFFFFFFFU;
+ void* mem2 = manager_->Alloc(kChunkSize, &id2, &offset2);
+ ASSERT_TRUE(mem2);
+ EXPECT_NE(-1, id2);
+ EXPECT_EQ(0u, offset2);
+
+ // Expect two chunks to be allocated, exceeding the limit,
+ // since all memory is in use.
+ EXPECT_EQ(2 * kChunkSize, manager_->allocated_memory());
+}
+
+TEST_F(MappedMemoryManagerTest, MemoryLimitWithReuse) {
+ const unsigned int kSize = 1024;
+ // Reset the manager with a memory limit.
+ manager_.reset(new MappedMemoryManager(helper_.get(), kSize));
+ const unsigned int kChunkSize = 2 * 1024;
+ manager_->set_chunk_size_multiple(kChunkSize);
+
+ // Allocate half a chunk worth of memory.
+ int32 id1 = -1;
+ unsigned int offset1 = 0xFFFFFFFFU;
+ void* mem1 = manager_->Alloc(kSize, &id1, &offset1);
+ ASSERT_TRUE(mem1);
+ EXPECT_NE(-1, id1);
+ EXPECT_EQ(0u, offset1);
+
+ // Allocate half a chunk worth of memory again.
+ // The same chunk will be used.
+ int32 id2 = -1;
+ unsigned int offset2 = 0xFFFFFFFFU;
+ void* mem2 = manager_->Alloc(kSize, &id2, &offset2);
+ ASSERT_TRUE(mem2);
+ EXPECT_NE(-1, id2);
+ EXPECT_EQ(kSize, offset2);
+
+ // Free one successful allocation, pending fence.
+ int32 token = helper_.get()->InsertToken();
+ manager_->FreePendingToken(mem2, token);
+
+ // The way we hooked up the helper and engine, it won't process commands
+ // until it has to wait for something. Which means the token shouldn't have
+ // passed yet at this point.
+ EXPECT_GT(token, GetToken());
+
+ // Since we didn't call helper_.finish() the token did not pass.
+ // We won't be able to claim the free memory without waiting and
+ // as we've already met the memory limit we'll have to wait
+ // on the token.
+ int32 id3 = -1;
+ unsigned int offset3 = 0xFFFFFFFFU;
+ void* mem3 = manager_->Alloc(kSize, &id3, &offset3);
+ ASSERT_TRUE(mem3);
+ EXPECT_NE(-1, id3);
+ // It will reuse the space from the second allocation just freed.
+ EXPECT_EQ(kSize, offset3);
+
+ // Expect one chunk to be allocated
+ EXPECT_EQ(1 * kChunkSize, manager_->allocated_memory());
+}
+
} // namespace gpu
diff --git a/gpu/command_buffer/client/query_tracker_unittest.cc b/gpu/command_buffer/client/query_tracker_unittest.cc
index 2dad335..800c479 100644
--- a/gpu/command_buffer/client/query_tracker_unittest.cc
+++ b/gpu/command_buffer/client/query_tracker_unittest.cc
@@ -28,7 +28,8 @@ class QuerySyncManagerTest : public testing::Test {
command_buffer_.reset(new MockClientCommandBuffer());
helper_.reset(new GLES2CmdHelper(command_buffer_.get()));
helper_->Initialize(kCommandBufferSizeBytes);
- mapped_memory_.reset(new MappedMemoryManager(helper_.get()));
+ mapped_memory_.reset(new MappedMemoryManager(
+ helper_.get(), MappedMemoryManager::kNoLimit));
sync_manager_.reset(new QuerySyncManager(mapped_memory_.get()));
}
@@ -81,7 +82,8 @@ class QueryTrackerTest : public testing::Test {
command_buffer_.reset(new MockClientCommandBuffer());
helper_.reset(new GLES2CmdHelper(command_buffer_.get()));
helper_->Initialize(kCommandBufferSizeBytes);
- mapped_memory_.reset(new MappedMemoryManager(helper_.get()));
+ mapped_memory_.reset(new MappedMemoryManager(
+ helper_.get(), MappedMemoryManager::kNoLimit));
query_tracker_.reset(new QueryTracker(mapped_memory_.get()));
}
diff --git a/gpu/command_buffer/tests/gl_manager.cc b/gpu/command_buffer/tests/gl_manager.cc
index 2c48164..82a01f0 100644
--- a/gpu/command_buffer/tests/gl_manager.cc
+++ b/gpu/command_buffer/tests/gl_manager.cc
@@ -198,7 +198,9 @@ void GLManager::Initialize(const GLManager::Options& options) {
ASSERT_TRUE(gles2_implementation_->Initialize(
kStartTransferBufferSize,
kMinTransferBufferSize,
- kMaxTransferBufferSize)) << "Could not init GLES2Implementation";
+ kMaxTransferBufferSize,
+ gpu::gles2::GLES2Implementation::kNoLimit))
+ << "Could not init GLES2Implementation";
MakeCurrent();
}
diff --git a/gpu/gles2_conform_support/egl/display.cc b/gpu/gles2_conform_support/egl/display.cc
index 66934bb..7de9e36 100644
--- a/gpu/gles2_conform_support/egl/display.cc
+++ b/gpu/gles2_conform_support/egl/display.cc
@@ -234,7 +234,8 @@ EGLContext Display::CreateContext(EGLConfig config,
if (!context_->Initialize(
kTransferBufferSize,
kTransferBufferSize / 2,
- kTransferBufferSize * 2)) {
+ kTransferBufferSize * 2,
+ gpu::gles2::GLES2Implementation::kNoLimit)) {
return EGL_NO_CONTEXT;
}