summaryrefslogtreecommitdiffstats
path: root/gpu/command_buffer/common
diff options
context:
space:
mode:
authorgman@chromium.org <gman@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-05-03 19:14:10 +0000
committergman@chromium.org <gman@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-05-03 19:14:10 +0000
commit066849e369dae48bf61ae0cf70c9e9acaf9f1045 (patch)
tree479aebcba9d2f1d3b054dc3ea64baa9a7c753f15 /gpu/command_buffer/common
parent0411509f65aae2b1ba684bf87343a14253246de0 (diff)
downloadchromium_src-066849e369dae48bf61ae0cf70c9e9acaf9f1045.zip
chromium_src-066849e369dae48bf61ae0cf70c9e9acaf9f1045.tar.gz
chromium_src-066849e369dae48bf61ae0cf70c9e9acaf9f1045.tar.bz2
Adds support for shared resources.
It's not clear how to test this easily it seems like we an integration test is needed at some point. I did run the conformance tests with share_resources set to true and it rand without crashing. TEST=unit tests BUG=none Review URL: http://codereview.chromium.org/1817002 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@46264 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'gpu/command_buffer/common')
-rw-r--r--gpu/command_buffer/common/gles2_cmd_format.h20
-rw-r--r--gpu/command_buffer/common/gles2_cmd_format_autogen.h158
-rw-r--r--gpu/command_buffer/common/gles2_cmd_format_test_autogen.h59
-rw-r--r--gpu/command_buffer/common/gles2_cmd_id_test_autogen.h6
-rw-r--r--gpu/command_buffer/common/gles2_cmd_ids_autogen.h3
-rw-r--r--gpu/command_buffer/common/id_allocator.cc35
-rw-r--r--gpu/command_buffer/common/id_allocator.h66
-rw-r--r--gpu/command_buffer/common/id_allocator_test.cc98
8 files changed, 445 insertions, 0 deletions
diff --git a/gpu/command_buffer/common/gles2_cmd_format.h b/gpu/command_buffer/common/gles2_cmd_format.h
index 6935380..2be1e36 100644
--- a/gpu/command_buffer/common/gles2_cmd_format.h
+++ b/gpu/command_buffer/common/gles2_cmd_format.h
@@ -39,6 +39,26 @@ namespace gles2 {
#pragma pack(push, 1)
+namespace id_namespaces {
+
+// These are used when contexts share resources.
+enum IdNamespaces {
+ kBuffers,
+ kFramebuffers,
+ kProgramsAndShaders,
+ kRenderbuffers,
+ kTextures,
+};
+
+// These numbers must not change
+COMPILE_ASSERT(kBuffers == 0, kBuffers_is_not_0);
+COMPILE_ASSERT(kFramebuffers == 1, kFramebuffers_is_not_1);
+COMPILE_ASSERT(kProgramsAndShaders == 2, kProgramsAndShaders_is_not_2);
+COMPILE_ASSERT(kRenderbuffers == 3, kRenderbuffers_is_not_3);
+COMPILE_ASSERT(kTextures == 4, kTextures_is_not_4);
+
+} // namespace id_namespaces
+
// Used for some glGetXXX commands that return a result through a pointer. We
// need to know if the command succeeded or not and the size of the result. If
// the command failed its result size will 0.
diff --git a/gpu/command_buffer/common/gles2_cmd_format_autogen.h b/gpu/command_buffer/common/gles2_cmd_format_autogen.h
index b6b8128..7e07dd7 100644
--- a/gpu/command_buffer/common/gles2_cmd_format_autogen.h
+++ b/gpu/command_buffer/common/gles2_cmd_format_autogen.h
@@ -8280,6 +8280,164 @@ COMPILE_ASSERT(offsetof(GetMaxValueInBuffer, result_shm_id) == 20,
COMPILE_ASSERT(offsetof(GetMaxValueInBuffer, result_shm_offset) == 24,
OffsetOf_GetMaxValueInBuffer_result_shm_offset_not_24);
+struct GenSharedIds {
+ typedef GenSharedIds ValueType;
+ static const CommandId kCmdId = kGenSharedIds;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+
+ static uint32 ComputeSize() {
+ return static_cast<uint32>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() {
+ header.SetCmd<ValueType>();
+ }
+
+ void Init(
+ GLuint _namespace_id, GLuint _id_offset, GLsizei _n, uint32 _ids_shm_id,
+ uint32 _ids_shm_offset) {
+ SetHeader();
+ namespace_id = _namespace_id;
+ id_offset = _id_offset;
+ n = _n;
+ ids_shm_id = _ids_shm_id;
+ ids_shm_offset = _ids_shm_offset;
+ }
+
+ void* Set(
+ void* cmd, GLuint _namespace_id, GLuint _id_offset, GLsizei _n,
+ uint32 _ids_shm_id, uint32 _ids_shm_offset) {
+ static_cast<ValueType*>(
+ cmd)->Init(
+ _namespace_id, _id_offset, _n, _ids_shm_id, _ids_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32 namespace_id;
+ uint32 id_offset;
+ int32 n;
+ uint32 ids_shm_id;
+ uint32 ids_shm_offset;
+};
+
+COMPILE_ASSERT(sizeof(GenSharedIds) == 24,
+ Sizeof_GenSharedIds_is_not_24);
+COMPILE_ASSERT(offsetof(GenSharedIds, header) == 0,
+ OffsetOf_GenSharedIds_header_not_0);
+COMPILE_ASSERT(offsetof(GenSharedIds, namespace_id) == 4,
+ OffsetOf_GenSharedIds_namespace_id_not_4);
+COMPILE_ASSERT(offsetof(GenSharedIds, id_offset) == 8,
+ OffsetOf_GenSharedIds_id_offset_not_8);
+COMPILE_ASSERT(offsetof(GenSharedIds, n) == 12,
+ OffsetOf_GenSharedIds_n_not_12);
+COMPILE_ASSERT(offsetof(GenSharedIds, ids_shm_id) == 16,
+ OffsetOf_GenSharedIds_ids_shm_id_not_16);
+COMPILE_ASSERT(offsetof(GenSharedIds, ids_shm_offset) == 20,
+ OffsetOf_GenSharedIds_ids_shm_offset_not_20);
+
+struct DeleteSharedIds {
+ typedef DeleteSharedIds ValueType;
+ static const CommandId kCmdId = kDeleteSharedIds;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+
+ static uint32 ComputeSize() {
+ return static_cast<uint32>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() {
+ header.SetCmd<ValueType>();
+ }
+
+ void Init(
+ GLuint _namespace_id, GLsizei _n, uint32 _ids_shm_id,
+ uint32 _ids_shm_offset) {
+ SetHeader();
+ namespace_id = _namespace_id;
+ n = _n;
+ ids_shm_id = _ids_shm_id;
+ ids_shm_offset = _ids_shm_offset;
+ }
+
+ void* Set(
+ void* cmd, GLuint _namespace_id, GLsizei _n, uint32 _ids_shm_id,
+ uint32 _ids_shm_offset) {
+ static_cast<ValueType*>(
+ cmd)->Init(_namespace_id, _n, _ids_shm_id, _ids_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32 namespace_id;
+ int32 n;
+ uint32 ids_shm_id;
+ uint32 ids_shm_offset;
+};
+
+COMPILE_ASSERT(sizeof(DeleteSharedIds) == 20,
+ Sizeof_DeleteSharedIds_is_not_20);
+COMPILE_ASSERT(offsetof(DeleteSharedIds, header) == 0,
+ OffsetOf_DeleteSharedIds_header_not_0);
+COMPILE_ASSERT(offsetof(DeleteSharedIds, namespace_id) == 4,
+ OffsetOf_DeleteSharedIds_namespace_id_not_4);
+COMPILE_ASSERT(offsetof(DeleteSharedIds, n) == 8,
+ OffsetOf_DeleteSharedIds_n_not_8);
+COMPILE_ASSERT(offsetof(DeleteSharedIds, ids_shm_id) == 12,
+ OffsetOf_DeleteSharedIds_ids_shm_id_not_12);
+COMPILE_ASSERT(offsetof(DeleteSharedIds, ids_shm_offset) == 16,
+ OffsetOf_DeleteSharedIds_ids_shm_offset_not_16);
+
+struct RegisterSharedIds {
+ typedef RegisterSharedIds ValueType;
+ static const CommandId kCmdId = kRegisterSharedIds;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+
+ static uint32 ComputeSize() {
+ return static_cast<uint32>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() {
+ header.SetCmd<ValueType>();
+ }
+
+ void Init(
+ GLuint _namespace_id, GLsizei _n, uint32 _ids_shm_id,
+ uint32 _ids_shm_offset) {
+ SetHeader();
+ namespace_id = _namespace_id;
+ n = _n;
+ ids_shm_id = _ids_shm_id;
+ ids_shm_offset = _ids_shm_offset;
+ }
+
+ void* Set(
+ void* cmd, GLuint _namespace_id, GLsizei _n, uint32 _ids_shm_id,
+ uint32 _ids_shm_offset) {
+ static_cast<ValueType*>(
+ cmd)->Init(_namespace_id, _n, _ids_shm_id, _ids_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32 namespace_id;
+ int32 n;
+ uint32 ids_shm_id;
+ uint32 ids_shm_offset;
+};
+
+COMPILE_ASSERT(sizeof(RegisterSharedIds) == 20,
+ Sizeof_RegisterSharedIds_is_not_20);
+COMPILE_ASSERT(offsetof(RegisterSharedIds, header) == 0,
+ OffsetOf_RegisterSharedIds_header_not_0);
+COMPILE_ASSERT(offsetof(RegisterSharedIds, namespace_id) == 4,
+ OffsetOf_RegisterSharedIds_namespace_id_not_4);
+COMPILE_ASSERT(offsetof(RegisterSharedIds, n) == 8,
+ OffsetOf_RegisterSharedIds_n_not_8);
+COMPILE_ASSERT(offsetof(RegisterSharedIds, ids_shm_id) == 12,
+ OffsetOf_RegisterSharedIds_ids_shm_id_not_12);
+COMPILE_ASSERT(offsetof(RegisterSharedIds, ids_shm_offset) == 16,
+ OffsetOf_RegisterSharedIds_ids_shm_offset_not_16);
+
#endif // GPU_COMMAND_BUFFER_COMMON_GLES2_CMD_FORMAT_AUTOGEN_H_
diff --git a/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h b/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h
index d498249..4e003eb 100644
--- a/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h
+++ b/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h
@@ -3270,5 +3270,64 @@ TEST(GLES2FormatTest, GetMaxValueInBuffer) {
EXPECT_EQ(static_cast<uint32>(16), cmd.result_shm_offset);
}
+TEST(GLES2FormatTest, GenSharedIds) {
+ GenSharedIds cmd = { { 0 } };
+ void* next_cmd = cmd.Set(
+ &cmd,
+ static_cast<GLuint>(11),
+ static_cast<GLuint>(12),
+ static_cast<GLsizei>(13),
+ static_cast<uint32>(14),
+ static_cast<uint32>(15));
+ EXPECT_EQ(static_cast<uint32>(GenSharedIds::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<char*>(next_cmd),
+ reinterpret_cast<char*>(&cmd) + sizeof(cmd));
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.namespace_id);
+ EXPECT_EQ(static_cast<GLuint>(12), cmd.id_offset);
+ EXPECT_EQ(static_cast<GLsizei>(13), cmd.n);
+ EXPECT_EQ(static_cast<uint32>(14), cmd.ids_shm_id);
+ EXPECT_EQ(static_cast<uint32>(15), cmd.ids_shm_offset);
+}
+
+TEST(GLES2FormatTest, DeleteSharedIds) {
+ DeleteSharedIds cmd = { { 0 } };
+ void* next_cmd = cmd.Set(
+ &cmd,
+ static_cast<GLuint>(11),
+ static_cast<GLsizei>(12),
+ static_cast<uint32>(13),
+ static_cast<uint32>(14));
+ EXPECT_EQ(static_cast<uint32>(DeleteSharedIds::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<char*>(next_cmd),
+ reinterpret_cast<char*>(&cmd) + sizeof(cmd));
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.namespace_id);
+ EXPECT_EQ(static_cast<GLsizei>(12), cmd.n);
+ EXPECT_EQ(static_cast<uint32>(13), cmd.ids_shm_id);
+ EXPECT_EQ(static_cast<uint32>(14), cmd.ids_shm_offset);
+}
+
+TEST(GLES2FormatTest, RegisterSharedIds) {
+ RegisterSharedIds cmd = { { 0 } };
+ void* next_cmd = cmd.Set(
+ &cmd,
+ static_cast<GLuint>(11),
+ static_cast<GLsizei>(12),
+ static_cast<uint32>(13),
+ static_cast<uint32>(14));
+ EXPECT_EQ(static_cast<uint32>(RegisterSharedIds::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<char*>(next_cmd),
+ reinterpret_cast<char*>(&cmd) + sizeof(cmd));
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.namespace_id);
+ EXPECT_EQ(static_cast<GLsizei>(12), cmd.n);
+ EXPECT_EQ(static_cast<uint32>(13), cmd.ids_shm_id);
+ EXPECT_EQ(static_cast<uint32>(14), cmd.ids_shm_offset);
+}
+
#endif // GPU_COMMAND_BUFFER_COMMON_GLES2_CMD_FORMAT_TEST_AUTOGEN_H_
diff --git a/gpu/command_buffer/common/gles2_cmd_id_test_autogen.h b/gpu/command_buffer/common/gles2_cmd_id_test_autogen.h
index 7092c1e..ba87b28 100644
--- a/gpu/command_buffer/common/gles2_cmd_id_test_autogen.h
+++ b/gpu/command_buffer/common/gles2_cmd_id_test_autogen.h
@@ -377,6 +377,12 @@ TEST(GLES2CommandIdTest, CommandIdsMatch) {
GLES2_SwapBuffers_kCmdId_mismatch);
COMPILE_ASSERT(GetMaxValueInBuffer::kCmdId == 438,
GLES2_GetMaxValueInBuffer_kCmdId_mismatch);
+ COMPILE_ASSERT(GenSharedIds::kCmdId == 439,
+ GLES2_GenSharedIds_kCmdId_mismatch);
+ COMPILE_ASSERT(DeleteSharedIds::kCmdId == 440,
+ GLES2_DeleteSharedIds_kCmdId_mismatch);
+ COMPILE_ASSERT(RegisterSharedIds::kCmdId == 441,
+ GLES2_RegisterSharedIds_kCmdId_mismatch);
}
#endif // GPU_COMMAND_BUFFER_COMMON_GLES2_CMD_ID_TEST_AUTOGEN_H_
diff --git a/gpu/command_buffer/common/gles2_cmd_ids_autogen.h b/gpu/command_buffer/common/gles2_cmd_ids_autogen.h
index 4ce2215..eebe3ff 100644
--- a/gpu/command_buffer/common/gles2_cmd_ids_autogen.h
+++ b/gpu/command_buffer/common/gles2_cmd_ids_autogen.h
@@ -191,6 +191,9 @@
OP(ShaderBinary) /* 436 */ \
OP(ReleaseShaderCompiler) /* 437 */ \
OP(GetMaxValueInBuffer) /* 438 */ \
+ OP(GenSharedIds) /* 439 */ \
+ OP(DeleteSharedIds) /* 440 */ \
+ OP(RegisterSharedIds) /* 441 */ \
enum CommandId {
kStartPoint = cmd::kLastCommonId, // All GLES2 commands start after this.
diff --git a/gpu/command_buffer/common/id_allocator.cc b/gpu/command_buffer/common/id_allocator.cc
new file mode 100644
index 0000000..22c55e3
--- /dev/null
+++ b/gpu/command_buffer/common/id_allocator.cc
@@ -0,0 +1,35 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the implementation of IdAllocator.
+
+#include "../common/id_allocator.h"
+#include "../common/logging.h"
+
+namespace gpu {
+
+IdAllocator::IdAllocator() {
+}
+
+ResourceId IdAllocator::FindFirstFree() const {
+ ResourceId id = 1;
+ for (ResourceIdSet::const_iterator it = used_ids_.begin();
+ it != used_ids_.end(); ++it) {
+ if ((*it) != id) {
+ return id;
+ }
+ ++id;
+ }
+ return id;
+}
+
+ResourceId IdAllocator::AllocateIDAtOrAbove(ResourceId desired_id) {
+ DCHECK_LT(static_cast<ResourceId>(used_ids_.size()),
+ static_cast<ResourceId>(-1));
+ for (; InUse(desired_id); ++desired_id);
+ MarkAsUsed(desired_id);
+ return desired_id;
+}
+
+} // namespace gpu
diff --git a/gpu/command_buffer/common/id_allocator.h b/gpu/command_buffer/common/id_allocator.h
new file mode 100644
index 0000000..76edc34
--- /dev/null
+++ b/gpu/command_buffer/common/id_allocator.h
@@ -0,0 +1,66 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the definition of the IdAllocator class.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_ID_ALLOCATOR_H_
+#define GPU_COMMAND_BUFFER_CLIENT_ID_ALLOCATOR_H_
+
+#include <set>
+#include <utility>
+#include "../common/types.h"
+
+namespace gpu {
+
+// A resource ID, key to the resource maps.
+typedef uint32 ResourceId;
+// Invalid resource ID.
+static const ResourceId kInvalidResource = 0u;
+
+// A class to manage the allocation of resource IDs.
+class IdAllocator {
+ public:
+ IdAllocator();
+
+ // Allocates a new resource ID.
+ ResourceId AllocateID() {
+ ResourceId id = FindFirstFree();
+ MarkAsUsed(id);
+ return id;
+ }
+
+ // Allocates an Id starting at or above desired_id.
+ // Note: may wrap if it starts near limit.
+ ResourceId AllocateIDAtOrAbove(ResourceId desired_id);
+
+ // Marks an id as used. Returns false if id was already used.
+ bool MarkAsUsed(ResourceId id) {
+ std::pair<ResourceIdSet::iterator, bool> result = used_ids_.insert(id);
+ return result.second;
+ }
+
+ // Frees a resource ID.
+ void FreeID(ResourceId id) {
+ used_ids_.erase(id);
+ }
+
+ // Checks whether or not a resource ID is in use.
+ bool InUse(ResourceId id) const {
+ return id == kInvalidResource || used_ids_.find(id) != used_ids_.end();
+ }
+
+ private:
+ // TODO(gman): This would work much better with ranges or a hash table.
+ typedef std::set<ResourceId> ResourceIdSet;
+
+ ResourceId FindFirstFree() const;
+
+ ResourceIdSet used_ids_;
+
+ DISALLOW_COPY_AND_ASSIGN(IdAllocator);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_ID_ALLOCATOR_H_
diff --git a/gpu/command_buffer/common/id_allocator_test.cc b/gpu/command_buffer/common/id_allocator_test.cc
new file mode 100644
index 0000000..6869f33
--- /dev/null
+++ b/gpu/command_buffer/common/id_allocator_test.cc
@@ -0,0 +1,98 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file has the unit tests for the IdAllocator class.
+
+#include "gpu/command_buffer/common/id_allocator.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace gpu {
+
+class IdAllocatorTest : public testing::Test {
+ protected:
+ virtual void SetUp() {}
+ virtual void TearDown() {}
+
+ IdAllocator* id_allocator() { return &id_allocator_; }
+
+ private:
+ IdAllocator id_allocator_;
+};
+
+// Checks basic functionality: AllocateID, FreeID, InUse.
+TEST_F(IdAllocatorTest, TestBasic) {
+ IdAllocator *allocator = id_allocator();
+ // Check that resource 1 is not in use
+ EXPECT_FALSE(allocator->InUse(1));
+
+ // Allocate an ID, check that it's in use.
+ ResourceId id1 = allocator->AllocateID();
+ EXPECT_TRUE(allocator->InUse(id1));
+
+ // Allocate another ID, check that it's in use, and different from the first
+ // one.
+ ResourceId id2 = allocator->AllocateID();
+ EXPECT_TRUE(allocator->InUse(id2));
+ EXPECT_NE(id1, id2);
+
+ // Free one of the IDs, check that it's not in use any more.
+ allocator->FreeID(id1);
+ EXPECT_FALSE(allocator->InUse(id1));
+
+ // Frees the other ID, check that it's not in use any more.
+ allocator->FreeID(id2);
+ EXPECT_FALSE(allocator->InUse(id2));
+}
+
+// Checks that the resource IDs are re-used after being freed.
+TEST_F(IdAllocatorTest, TestAdvanced) {
+ IdAllocator *allocator = id_allocator();
+
+ // Allocate a significant number of resources.
+ const unsigned int kNumResources = 100;
+ ResourceId ids[kNumResources];
+ for (unsigned int i = 0; i < kNumResources; ++i) {
+ ids[i] = allocator->AllocateID();
+ EXPECT_TRUE(allocator->InUse(ids[i]));
+ }
+
+ // Check that a new allocation re-uses the resource we just freed.
+ ResourceId id1 = ids[kNumResources / 2];
+ allocator->FreeID(id1);
+ EXPECT_FALSE(allocator->InUse(id1));
+ ResourceId id2 = allocator->AllocateID();
+ EXPECT_TRUE(allocator->InUse(id2));
+ EXPECT_EQ(id1, id2);
+}
+
+// Checks that we can choose our own ids and they won't be reused.
+TEST_F(IdAllocatorTest, MarkAsUsed) {
+ IdAllocator* allocator = id_allocator();
+ ResourceId id = allocator->AllocateID();
+ allocator->FreeID(id);
+ EXPECT_FALSE(allocator->InUse(id));
+ EXPECT_TRUE(allocator->MarkAsUsed(id));
+ EXPECT_TRUE(allocator->InUse(id));
+ ResourceId id2 = allocator->AllocateID();
+ EXPECT_NE(id, id2);
+ EXPECT_TRUE(allocator->MarkAsUsed(id2 + 1));
+ ResourceId id3 = allocator->AllocateID();
+ // Checks our algorithm. If the algorithm changes this check should be
+ // changed.
+ EXPECT_EQ(id3, id2 + 2);
+}
+
+// Checks AllocateIdAtOrAbove.
+TEST_F(IdAllocatorTest, AllocateIdAtOrAbove) {
+ const ResourceId kOffset = 123456;
+ IdAllocator* allocator = id_allocator();
+ ResourceId id1 = allocator->AllocateIDAtOrAbove(kOffset);
+ EXPECT_EQ(kOffset, id1);
+ ResourceId id2 = allocator->AllocateIDAtOrAbove(kOffset);
+ EXPECT_GT(id2, kOffset);
+ ResourceId id3 = allocator->AllocateIDAtOrAbove(kOffset);
+ EXPECT_GT(id3, kOffset);
+}
+
+} // namespace gpu