diff options
-rw-r--r-- | gpu/DEPS | 1 | ||||
-rw-r--r-- | gpu/GLES2/gles2_command_buffer.h | 8 | ||||
-rwxr-xr-x | gpu/command_buffer/build_gles2_cmd_buffer.py | 75 | ||||
-rw-r--r-- | gpu/command_buffer/client/cmd_buffer_helper.h | 8 | ||||
-rw-r--r-- | gpu/command_buffer/client/fenced_allocator.cc | 23 | ||||
-rw-r--r-- | gpu/command_buffer/client/fenced_allocator.h | 16 | ||||
-rw-r--r-- | gpu/command_buffer/client/fenced_allocator_test.cc | 60 | ||||
-rw-r--r-- | gpu/command_buffer/client/gles2_c_lib_autogen.h | 16 | ||||
-rw-r--r-- | gpu/command_buffer/client/gles2_implementation.cc | 102 | ||||
-rw-r--r-- | gpu/command_buffer/client/gles2_implementation.h | 108 | ||||
-rw-r--r-- | gpu/command_buffer/client/gles2_implementation_autogen.h | 11 | ||||
-rw-r--r-- | gpu/command_buffer/client/gles2_implementation_unittest.cc | 187 | ||||
-rw-r--r-- | gpu/command_buffer/client/mapped_memory.cc | 75 | ||||
-rw-r--r-- | gpu/command_buffer/client/mapped_memory.h | 146 | ||||
-rw-r--r-- | gpu/command_buffer/client/mapped_memory_unittest.cc | 257 | ||||
-rw-r--r-- | gpu/gpu.gyp | 3 |
16 files changed, 1059 insertions, 37 deletions
@@ -5,6 +5,7 @@ include_rules = [ "+../command_buffer", "+../client", "+../common", + "+../GLES2", "+../service", # For IOSurface and TransportDIB support on OS X, and X11 utilities. diff --git a/gpu/GLES2/gles2_command_buffer.h b/gpu/GLES2/gles2_command_buffer.h index 1a860b4..c36b995 100644 --- a/gpu/GLES2/gles2_command_buffer.h +++ b/gpu/GLES2/gles2_command_buffer.h @@ -15,6 +15,14 @@ #define PEPPER3D_SKIP_GLSL_TRANSLATION \ "pepper3d_skip_glsl_translation" +// TODO(gman): move this somewhere else. +#ifndef GL_READ_ONLY +#define GL_READ_ONLY 0x88B8 +#endif +#ifndef GL_WRITE_ONLY +#define GL_WRITE_ONLY 0x88B9 +#endif + #endif // GPU_GLES2_GLES2_COMMAND_BUFFER_H_ diff --git a/gpu/command_buffer/build_gles2_cmd_buffer.py b/gpu/command_buffer/build_gles2_cmd_buffer.py index 2a7db1f..4eee3aa 100755 --- a/gpu/command_buffer/build_gles2_cmd_buffer.py +++ b/gpu/command_buffer/build_gles2_cmd_buffer.py @@ -182,6 +182,10 @@ GL_APICALL void GL_APIENTRY glGenSharedIds (GLuint namespace_id, GLuint GL_APICALL void GL_APIENTRY glDeleteSharedIds (GLuint namespace_id, GLsizei n, const GLuint* ids); GL_APICALL void GL_APIENTRY glRegisterSharedIds (GLuint namespace_id, GLsizei n, const GLuint* ids); GL_APICALL GLboolean GL_APIENTRY glCommandBufferEnable (const char* feature); +GL_APICALL void* GL_APIENTRY glMapBufferSubData (GLuint target, GLintptr offset, GLsizeiptr size, GLenum access); +GL_APICALL void GL_APIENTRY glUnmapBufferSubData (const void* mem); +GL_APICALL void* GL_APIENTRY glMapTexSubImage2D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, GLenum access); +GL_APICALL void GL_APIENTRY glUnmapTexSubImage2D (const void* mem); """ # This is the list of all commmands that will be generated and their Id. @@ -971,6 +975,7 @@ _ENUM_LISTS = { # them based on the GL function arguments. # a NonImmediate type is a type that stays a pointer even in # and immediate version of acommand. +# gen_cmd: Whether or not this function geneates a command. Default = True. # immediate: Whether or not to generate an immediate command for the GL # function. The default is if there is exactly 1 pointer argument # in the GL function an immediate command is generated. @@ -1310,6 +1315,8 @@ _FUNCTION_INFO = { 'expectation': False, }, 'LinkProgram': {'decoder_func': 'DoLinkProgram'}, + 'MapBufferSubData': {'gen_cmd': False}, + 'MapTexSubImage2D': {'gen_cmd': False}, 'PixelStorei': {'type': 'Manual'}, 'RenderbufferStorage': { 'decoder_func': 'DoRenderbufferStorage', @@ -1415,6 +1422,8 @@ _FUNCTION_INFO = { 'UniformMatrix2fv': {'type': 'PUTn', 'data_type': 'GLfloat', 'count': 4}, 'UniformMatrix3fv': {'type': 'PUTn', 'data_type': 'GLfloat', 'count': 9}, 'UniformMatrix4fv': {'type': 'PUTn', 'data_type': 'GLfloat', 'count': 16}, + 'UnmapBufferSubData': {'gen_cmd': False}, + 'UnmapTexSubImage2D': {'gen_cmd': False}, 'UseProgram': {'decoder_func': 'DoUseProgram', 'unit_test': False}, 'ValidateProgram': {'decoder_func': 'DoValidateProgram'}, 'VertexAttrib1f': {'decoder_func': 'DoVertexAttrib1f'}, @@ -4775,9 +4784,11 @@ class GLGenerator(object): f = Function(func_name, func_name, func_info, return_type, args, args_for_cmds, cmd_args, init_args, num_pointer_args) self.original_functions.append(f) - self.AddFunction(f) - f.type_handler.AddImmediateFunction(self, f) - f.type_handler.AddBucketFunction(self, f) + gen_cmd = f.GetInfo('gen_cmd') + if gen_cmd == True or gen_cmd == None: + self.AddFunction(f) + f.type_handler.AddImmediateFunction(self, f) + f.type_handler.AddBucketFunction(self, f) self.Log("Auto Generated Functions : %d" % len([f for f in self.functions if f.can_auto_generate or @@ -4797,9 +4808,12 @@ class GLGenerator(object): file.Write("#define GLES2_COMMAND_LIST(OP) \\\n") by_id = {} for func in self.functions: - if not func.name in _CMD_ID_TABLE: - self.Error("Command %s not in _CMD_ID_TABLE" % func.name) - by_id[_CMD_ID_TABLE[func.name]] = func + if True: + #gen_cmd = func.GetInfo('gen_cmd') + #if gen_cmd == True or gen_cmd == None: + if not func.name in _CMD_ID_TABLE: + self.Error("Command %s not in _CMD_ID_TABLE" % func.name) + by_id[_CMD_ID_TABLE[func.name]] = func for id in sorted(by_id.keys()): file.Write(" %-60s /* %d */ \\\n" % ("OP(%s)" % by_id[id].name, id)) @@ -4820,7 +4834,10 @@ class GLGenerator(object): """Writes the command buffer format""" file = CHeaderWriter(filename) for func in self.functions: - func.WriteStruct(file) + if True: + #gen_cmd = func.GetInfo('gen_cmd') + #if gen_cmd == True or gen_cmd == None: + func.WriteStruct(file) file.Write("\n") file.Close() @@ -4828,7 +4845,10 @@ class GLGenerator(object): """Writes the command buffer doc version of the commands""" file = CWriter(filename) for func in self.functions: - func.WriteDocs(file) + if True: + #gen_cmd = func.GetInfo('gen_cmd') + #if gen_cmd == True or gen_cmd == None: + func.WriteDocs(file) file.Write("\n") file.Close() @@ -4841,7 +4861,10 @@ class GLGenerator(object): "\n") for func in self.functions: - func.WriteFormatTest(file) + if True: + #gen_cmd = func.GetInfo('gen_cmd') + #if gen_cmd == True or gen_cmd == None: + func.WriteFormatTest(file) file.Close() @@ -4855,11 +4878,14 @@ class GLGenerator(object): file.Write("// Changing them will break all client programs.\n") file.Write("TEST(GLES2CommandIdTest, CommandIdsMatch) {\n") for func in self.functions: - if not func.name in _CMD_ID_TABLE: - self.Error("Command %s not in _CMD_ID_TABLE" % func.name) - file.Write(" COMPILE_ASSERT(%s::kCmdId == %d,\n" % - (func.name, _CMD_ID_TABLE[func.name])) - file.Write(" GLES2_%s_kCmdId_mismatch);\n" % func.name) + if True: + #gen_cmd = func.GetInfo('gen_cmd') + #if gen_cmd == True or gen_cmd == None: + if not func.name in _CMD_ID_TABLE: + self.Error("Command %s not in _CMD_ID_TABLE" % func.name) + file.Write(" COMPILE_ASSERT(%s::kCmdId == %d,\n" % + (func.name, _CMD_ID_TABLE[func.name])) + file.Write(" GLES2_%s_kCmdId_mismatch);\n" % func.name) file.Write("}\n") file.Write("\n") @@ -4870,7 +4896,10 @@ class GLGenerator(object): file = CHeaderWriter(filename) for func in self.functions: - func.WriteCmdHelper(file) + if True: + #gen_cmd = func.GetInfo('gen_cmd') + #if gen_cmd == True or gen_cmd == None: + func.WriteCmdHelper(file) file.Close() @@ -4881,7 +4910,10 @@ class GLGenerator(object): "// It is included by gles2_cmd_decoder.cc\n") for func in self.functions: - func.WriteServiceImplementation(file) + if True: + #gen_cmd = func.GetInfo('gen_cmd') + #if gen_cmd == True or gen_cmd == None: + func.WriteServiceImplementation(file) file.Close() @@ -4902,10 +4934,13 @@ class GLGenerator(object): end = num_tests for idx in range(test_num, end): func = self.functions[idx] - if func.GetInfo('unit_test') == False: - file.Write("// TODO(gman): %s\n" % func.name) - else: - func.WriteServiceUnitTest(file) + if True: + #gen_cmd = func.GetInfo('gen_cmd') + #if gen_cmd == True or gen_cmd == None: + if func.GetInfo('unit_test') == False: + file.Write("// TODO(gman): %s\n" % func.name) + else: + func.WriteServiceUnitTest(file) file.Close() diff --git a/gpu/command_buffer/client/cmd_buffer_helper.h b/gpu/command_buffer/client/cmd_buffer_helper.h index 49ff97e..d174831e 100644 --- a/gpu/command_buffer/client/cmd_buffer_helper.h +++ b/gpu/command_buffer/client/cmd_buffer_helper.h @@ -109,6 +109,10 @@ class CommandBufferHelper { return *reinterpret_cast<T*>(data); } + int32 last_token_read() const { + return last_token_read_; + } + error::Error GetError(); // Common Commands @@ -196,6 +200,10 @@ class CommandBufferHelper { shared_memory_offset); } + CommandBuffer* command_buffer() const { + return command_buffer_; + } + private: // Waits until get changes, updating the value of get_. void WaitForGetChange(); diff --git a/gpu/command_buffer/client/fenced_allocator.cc b/gpu/command_buffer/client/fenced_allocator.cc index 57a35fde..a58046e 100644 --- a/gpu/command_buffer/client/fenced_allocator.cc +++ b/gpu/command_buffer/client/fenced_allocator.cc @@ -21,8 +21,9 @@ FencedAllocator::~FencedAllocator() { i = WaitForTokenAndFreeBlock(i); } } - DCHECK_EQ(blocks_.size(), 1u); - DCHECK_EQ(blocks_[0].state, FREE); + // These checks are not valid if the service has crashed or lost the context. + // DCHECK_EQ(blocks_.size(), 1u); + // DCHECK_EQ(blocks_[0].state, FREE); } // Looks for a non-allocated block that is big enough. Search in the FREE @@ -65,8 +66,8 @@ void FencedAllocator::Free(FencedAllocator::Offset offset) { } // Looks for the corresponding block, mark it FREE_PENDING_TOKEN. -void FencedAllocator::FreePendingToken(FencedAllocator::Offset offset, - unsigned int token) { +void FencedAllocator::FreePendingToken( + FencedAllocator::Offset offset, int32 token) { BlockIndex index = GetBlockByOffset(offset); Block &block = blocks_[index]; block.state = FREE_PENDING_TOKEN; @@ -154,6 +155,20 @@ FencedAllocator::BlockIndex FencedAllocator::WaitForTokenAndFreeBlock( return CollapseFreeBlock(index); } +// Frees any blocks pending a token for which the token has been read. +void FencedAllocator::FreeUnused() { + int32 last_token_read = helper_->last_token_read(); + for (unsigned int i = 0; i < blocks_.size();) { + Block& block = blocks_[i]; + if (block.state == FREE_PENDING_TOKEN && block.token <= last_token_read) { + block.state = FREE; + i = CollapseFreeBlock(i); + } else { + ++i; + } + } +} + // If the block is exactly the requested size, simply mark it IN_USE, otherwise // split it and mark the first one (of the requested size) IN_USE. FencedAllocator::Offset FencedAllocator::AllocInBlock(BlockIndex index, diff --git a/gpu/command_buffer/client/fenced_allocator.h b/gpu/command_buffer/client/fenced_allocator.h index 56eb974..cbb133d 100644 --- a/gpu/command_buffer/client/fenced_allocator.h +++ b/gpu/command_buffer/client/fenced_allocator.h @@ -65,7 +65,10 @@ class FencedAllocator { // Parameters: // offset: the offset of the memory block to free. // token: the token value to wait for before re-using the memory. - void FreePendingToken(Offset offset, unsigned int token); + void FreePendingToken(Offset offset, int32 token); + + // Frees any blocks pending a token for which the token has been read. + void FreeUnused(); // Gets the size of the largest free block that is available without waiting. unsigned int GetLargestFreeSize(); @@ -92,7 +95,7 @@ class FencedAllocator { State state; Offset offset; unsigned int size; - unsigned int token; // token to wait for in the FREE_PENDING_TOKEN case. + int32 token; // token to wait for in the FREE_PENDING_TOKEN case. }; // Comparison functor for memory block sorting. @@ -106,7 +109,7 @@ class FencedAllocator { typedef std::vector<Block> Container; typedef unsigned int BlockIndex; - static const unsigned int kUnusedToken = 0; + static const int32 kUnusedToken = 0; // Gets the index of a memory block, given its offset. BlockIndex GetBlockByOffset(Offset offset); @@ -189,11 +192,16 @@ class FencedAllocatorWrapper { // Parameters: // pointer: the pointer to the memory block to free. // token: the token value to wait for before re-using the memory. - void FreePendingToken(void *pointer, unsigned int token) { + void FreePendingToken(void *pointer, int32 token) { DCHECK(pointer); allocator_.FreePendingToken(GetOffset(pointer), token); } + // Frees any blocks pending a token for which the token has been read. + void FreeUnused() { + allocator_.FreeUnused(); + } + // Gets a pointer to a memory block given the base memory and the offset. // It translates FencedAllocator::kInvalidOffset to NULL. void *GetPointer(FencedAllocator::Offset offset) { diff --git a/gpu/command_buffer/client/fenced_allocator_test.cc b/gpu/command_buffer/client/fenced_allocator_test.cc index 0fa215b..46f777c 100644 --- a/gpu/command_buffer/client/fenced_allocator_test.cc +++ b/gpu/command_buffer/client/fenced_allocator_test.cc @@ -206,6 +206,66 @@ TEST_F(FencedAllocatorTest, TestFreePendingToken) { } } +// Checks the free-pending-token mechanism using FreeUnused +TEST_F(FencedAllocatorTest, FreeUnused) { + EXPECT_TRUE(allocator_->CheckConsistency()); + + const unsigned int kSize = 16; + const unsigned int kAllocCount = kBufferSize / kSize; + CHECK(kAllocCount * kSize == kBufferSize); + + // Allocate several buffers to fill in the memory. + FencedAllocator::Offset offsets[kAllocCount]; + for (unsigned int i = 0; i < kAllocCount; ++i) { + offsets[i] = allocator_->Alloc(kSize); + EXPECT_NE(FencedAllocator::kInvalidOffset, offsets[i]); + EXPECT_GE(kBufferSize, offsets[i]+kSize); + EXPECT_TRUE(allocator_->CheckConsistency()); + } + + // No memory should be available. + EXPECT_EQ(0u, allocator_->GetLargestFreeSize()); + + // Free one successful allocation, pending fence. + int32 token = helper_.get()->InsertToken(); + allocator_->FreePendingToken(offsets[0], token); + EXPECT_TRUE(allocator_->CheckConsistency()); + + // Force the command buffer to process the token. + helper_->Finish(); + + // Tell the allocator to update what's available based on the current token. + allocator_->FreeUnused(); + + // Check that the new largest free size takes into account the unused block. + EXPECT_EQ(kSize, allocator_->GetLargestFreeSize()); + + // Free two more. + token = helper_.get()->InsertToken(); + allocator_->FreePendingToken(offsets[1], token); + token = helper_.get()->InsertToken(); + allocator_->FreePendingToken(offsets[2], token); + EXPECT_TRUE(allocator_->CheckConsistency()); + + // Check that nothing has changed. + EXPECT_EQ(kSize, allocator_->GetLargestFreeSize()); + + // Force the command buffer to process the token. + helper_->Finish(); + + // Tell the allocator to update what's available based on the current token. + allocator_->FreeUnused(); + + // Check that the new largest free size takes into account the unused blocks. + EXPECT_EQ(kSize * 3, allocator_->GetLargestFreeSize()); + + // Free up everything. + for (unsigned int i = 3; i < kAllocCount; ++i) { + allocator_->Free(offsets[i]); + EXPECT_TRUE(allocator_->CheckConsistency()); + } +} + // Tests GetLargestFreeSize TEST_F(FencedAllocatorTest, TestGetLargestFreeSize) { EXPECT_TRUE(allocator_->CheckConsistency()); diff --git a/gpu/command_buffer/client/gles2_c_lib_autogen.h b/gpu/command_buffer/client/gles2_c_lib_autogen.h index e0d388c..bae1f9e 100644 --- a/gpu/command_buffer/client/gles2_c_lib_autogen.h +++ b/gpu/command_buffer/client/gles2_c_lib_autogen.h @@ -522,6 +522,22 @@ void GLES2RegisterSharedIds( GLboolean GLES2CommandBufferEnable(const char* feature) { return gles2::GetGLContext()->CommandBufferEnable(feature); } +void* GLES2MapBufferSubData( + GLuint target, GLintptr offset, GLsizeiptr size, GLenum access) { + return gles2::GetGLContext()->MapBufferSubData(target, offset, size, access); +} +void GLES2UnmapBufferSubData(const void* mem) { + gles2::GetGLContext()->UnmapBufferSubData(mem); +} +void* GLES2MapTexSubImage2D( + GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, + GLsizei height, GLenum format, GLenum type, GLenum access) { + return gles2::GetGLContext()->MapTexSubImage2D( + target, level, xoffset, yoffset, width, height, format, type, access); +} +void GLES2UnmapTexSubImage2D(const void* mem) { + gles2::GetGLContext()->UnmapTexSubImage2D(mem); +} #endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_C_LIB_AUTOGEN_H_ diff --git a/gpu/command_buffer/client/gles2_implementation.cc b/gpu/command_buffer/client/gles2_implementation.cc index fd72ed2..52afc74 100644 --- a/gpu/command_buffer/client/gles2_implementation.cc +++ b/gpu/command_buffer/client/gles2_implementation.cc @@ -5,6 +5,8 @@ // A class to emluate GLES2 over command buffers. #include "../client/gles2_implementation.h" +#include <GLES2/gles2_command_buffer.h> +#include "../client/mapped_memory.h" #include "../common/gles2_cmd_utils.h" #include "../common/id_allocator.h" @@ -420,6 +422,8 @@ GLES2Implementation::GLES2Implementation( result_buffer_ = transfer_buffer; result_shm_offset_ = 0; + mapped_memory_.reset(new MappedMemoryManager(helper_)); + if (share_resources) { buffer_id_handler_.reset( new SharedIdHandler(this, id_namespaces::kBuffers)); @@ -1462,5 +1466,103 @@ GLboolean GLES2Implementation::CommandBufferEnable(const char* feature) { #endif // defined(GLES2_SUPPORT_CLIENT_SIDE_BUFFERS) +void* GLES2Implementation::MapBufferSubData( + GLuint target, GLintptr offset, GLsizeiptr size, GLenum access) { + // NOTE: target is NOT checked because the service will check it + // and we don't know what targets are valid. + if (access != GL_WRITE_ONLY) { + SetGLError(GL_INVALID_ENUM, "MapBufferSubData: bad access mode"); + return NULL; + } + if (offset < 0 || size < 0) { + SetGLError(GL_INVALID_VALUE, "MapBufferSubData: bad range"); + return NULL; + } + int32 shm_id; + unsigned int shm_offset; + void* mem = mapped_memory_->Alloc(size, &shm_id, &shm_offset); + if (!mem) { + SetGLError(GL_OUT_OF_MEMORY, "MapBufferSubData: out of memory"); + return NULL; + } + + std::pair<MappedBufferMap::iterator, bool> result = + mapped_buffers_.insert(std::make_pair( + mem, + MappedBuffer( + access, shm_id, mem, shm_offset, target, offset, size))); + return mem; +} + +void GLES2Implementation::UnmapBufferSubData(const void* mem) { + MappedBufferMap::iterator it = mapped_buffers_.find(mem); + if (it == mapped_buffers_.end()) { + SetGLError(GL_INVALID_VALUE, "UnmapBufferSubData: buffer not mapped"); + return; + } + const MappedBuffer& mb = it->second; + helper_->BufferSubData( + mb.target, mb.offset, mb.size, mb.shm_id, mb.shm_offset); + mapped_memory_->FreePendingToken(mb.shm_memory, helper_->InsertToken()); + mapped_buffers_.erase(it); +} + +void* GLES2Implementation::MapTexSubImage2D( + GLenum target, + GLint level, + GLint xoffset, + GLint yoffset, + GLsizei width, + GLsizei height, + GLenum format, + GLenum type, + GLenum access) { + if (access != GL_WRITE_ONLY) { + SetGLError(GL_INVALID_ENUM, "MapTexSubImage2D: bad access mode"); + return NULL; + } + // NOTE: target is NOT checked because the service will check it + // and we don't know what targets are valid. + if (level < 0 || xoffset < 0 || yoffset < 0 || width < 0 || height < 0) { + SetGLError(GL_INVALID_VALUE, "MapTexSubImage2D: bad dimensions"); + return NULL; + } + uint32 size; + if (!GLES2Util::ComputeImageDataSize( + width, height, format, type, unpack_alignment_, &size)) { + SetGLError(GL_INVALID_VALUE, "MapTexSubImage2D: image size too large"); + return NULL; + } + int32 shm_id; + unsigned int shm_offset; + void* mem = mapped_memory_->Alloc(size, &shm_id, &shm_offset); + if (!mem) { + SetGLError(GL_OUT_OF_MEMORY, "MapTexSubImage2D: out of memory"); + return NULL; + } + + std::pair<MappedTextureMap::iterator, bool> result = + mapped_textures_.insert(std::make_pair( + mem, + MappedTexture( + access, shm_id, mem, shm_offset, + target, level, xoffset, yoffset, width, height, format, type))); + return mem; +} + +void GLES2Implementation::UnmapTexSubImage2D(const void* mem) { + MappedTextureMap::iterator it = mapped_textures_.find(mem); + if (it == mapped_textures_.end()) { + SetGLError(GL_INVALID_VALUE, "UnmapTexSubImage2D: texture not mapped"); + return; + } + const MappedTexture& mt = it->second; + helper_->TexSubImage2D( + mt.target, mt.level, mt.xoffset, mt.yoffset, mt.width, mt.height, + mt.format, mt.type, mt.shm_id, mt.shm_offset); + mapped_memory_->FreePendingToken(mt.shm_memory, helper_->InsertToken()); + mapped_textures_.erase(it); +} + } // namespace gles2 } // namespace gpu diff --git a/gpu/command_buffer/client/gles2_implementation.h b/gpu/command_buffer/client/gles2_implementation.h index 5875040..e66997b 100644 --- a/gpu/command_buffer/client/gles2_implementation.h +++ b/gpu/command_buffer/client/gles2_implementation.h @@ -19,6 +19,9 @@ #define GLES2_SUPPORT_CLIENT_SIDE_BUFFERS 1 namespace gpu { + +class MappedMemoryManager; + namespace gles2 { class ClientSideBufferHelper; @@ -145,15 +148,15 @@ class GLES2Implementation { } #endif - GLuint MakeTextureId() { - GLuint id; - texture_id_handler_->MakeIds(0, 1, &id); - return id; - } + GLuint MakeTextureId() { + GLuint id; + texture_id_handler_->MakeIds(0, 1, &id); + return id; + } - void FreeTextureId(GLuint id) { - texture_id_handler_->FreeIds(1, &id); - } + void FreeTextureId(GLuint id) { + texture_id_handler_->FreeIds(1, &id); + } private: // Wraps RingBufferWrapper to provide aligned allocations. @@ -181,6 +184,87 @@ class GLES2Implementation { } }; + // Base class for mapped resources. + struct MappedResource { + MappedResource(GLenum _access, int _shm_id, void* mem, unsigned int offset) + : access(_access), + shm_id(_shm_id), + shm_memory(mem), + shm_offset(offset) { + } + + // access mode. Currently only GL_WRITE_ONLY is valid + GLenum access; + + // Shared memory ID for buffer. + int shm_id; + + // Address of shared memory + void* shm_memory; + + // Offset of shared memory + unsigned int shm_offset; + }; + + // Used to track mapped textures. + struct MappedTexture : public MappedResource { + MappedTexture( + GLenum access, + int shm_id, + void* shm_mem, + unsigned int shm_offset, + GLenum _target, + GLint _level, + GLint _xoffset, + GLint _yoffset, + GLsizei _width, + GLsizei _height, + GLenum _format, + GLenum _type) + : MappedResource(access, shm_id, shm_mem, shm_offset), + target(_target), + level(_level), + xoffset(_xoffset), + yoffset(_yoffset), + width(_width), + height(_height), + format(_format), + type(_type) { + } + + // These match the arguments to TexSubImage2D. + GLenum target; + GLint level; + GLint xoffset; + GLint yoffset; + GLsizei width; + GLsizei height; + GLenum format; + GLenum type; + }; + + // Used to track mapped buffers. + struct MappedBuffer : public MappedResource { + MappedBuffer( + GLenum access, + int shm_id, + void* shm_mem, + unsigned int shm_offset, + GLenum _target, + GLintptr _offset, + GLsizeiptr _size) + : MappedResource(access, shm_id, shm_mem, shm_offset), + target(_target), + offset(_offset), + size(_size) { + } + + // These match the arguments to BufferSubData. + GLenum target; + GLintptr offset; + GLsizeiptr size; + }; + // Gets the shared memory id for the result buffer. uint32 result_shm_id() const { return transfer_buffer_id_; @@ -291,6 +375,14 @@ class GLES2Implementation { typedef std::map<uint32, std::string> GLStringMap; GLStringMap gl_strings_; + typedef std::map<const void*, MappedBuffer> MappedBufferMap; + MappedBufferMap mapped_buffers_; + + typedef std::map<const void*, MappedTexture> MappedTextureMap; + MappedTextureMap mapped_textures_; + + scoped_ptr<MappedMemoryManager> mapped_memory_; + DISALLOW_COPY_AND_ASSIGN(GLES2Implementation); }; diff --git a/gpu/command_buffer/client/gles2_implementation_autogen.h b/gpu/command_buffer/client/gles2_implementation_autogen.h index 36e3e7f..8dd914b 100644 --- a/gpu/command_buffer/client/gles2_implementation_autogen.h +++ b/gpu/command_buffer/client/gles2_implementation_autogen.h @@ -792,5 +792,16 @@ void RegisterSharedIds(GLuint namespace_id, GLsizei n, const GLuint* ids); GLboolean CommandBufferEnable(const char* feature); +void* MapBufferSubData( + GLuint target, GLintptr offset, GLsizeiptr size, GLenum access); + +void UnmapBufferSubData(const void* mem); + +void* MapTexSubImage2D( + GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, + GLsizei height, GLenum format, GLenum type, GLenum access); + +void UnmapTexSubImage2D(const void* mem); + #endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPLEMENTATION_AUTOGEN_H_ diff --git a/gpu/command_buffer/client/gles2_implementation_unittest.cc b/gpu/command_buffer/client/gles2_implementation_unittest.cc index 78a0682..a251ee9 100644 --- a/gpu/command_buffer/client/gles2_implementation_unittest.cc +++ b/gpu/command_buffer/client/gles2_implementation_unittest.cc @@ -6,6 +6,7 @@ #include "gpu/command_buffer/client/gles2_implementation.h" #include "gpu/command_buffer/common/command_buffer.h" +#include "gpu/GLES2/gles2_command_buffer.h" #include "testing/gtest/include/gtest/gtest.h" #include "testing/gmock/include/gmock/gmock.h" @@ -636,7 +637,7 @@ TEST_F(GLES2ImplementationTest, ReservedIds) { Cmds expected; expected.get.Init(kTransferBufferId, 0); - // One call to flush to way for GetError + // One call to flush to wait for GetError EXPECT_CALL(*command_buffer_, OnFlush(_)) .WillOnce(SetMemory(GLuint(GL_NO_ERROR))) .RetiresOnSaturation(); @@ -689,6 +690,190 @@ TEST_F(GLES2ImplementationTest, ReadPixels2Reads) { EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected))); } +TEST_F(GLES2ImplementationTest, MapUnMapBufferSubData) { + struct Cmds { + BufferSubData buf; + cmd::SetToken set_token; + }; + const GLenum kTarget = GL_ELEMENT_ARRAY_BUFFER; + const GLintptr kOffset = 15; + const GLsizeiptr kSize = 16; + + int32 token = 1; + uint32 offset = 0; + Cmds expected; + expected.buf.Init( + kTarget, kOffset, kSize, kTransferBufferId, offset); + expected.set_token.Init(token++); + + void* mem = gl_->MapBufferSubData(kTarget, kOffset, kSize, GL_WRITE_ONLY); + ASSERT_TRUE(mem != NULL); + gl_->UnmapBufferSubData(mem); + EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected))); +} + +TEST_F(GLES2ImplementationTest, MapUnMapBufferSubDataBadArgs) { + const GLenum kTarget = GL_ELEMENT_ARRAY_BUFFER; + const GLintptr kOffset = 15; + const GLsizeiptr kSize = 16; + + // Calls to flush to wait for GetError + EXPECT_CALL(*command_buffer_, OnFlush(_)) + .WillOnce(SetMemory(GLuint(GL_NO_ERROR))) + .WillOnce(SetMemory(GLuint(GL_NO_ERROR))) + .WillOnce(SetMemory(GLuint(GL_NO_ERROR))) + .WillOnce(SetMemory(GLuint(GL_NO_ERROR))) + .RetiresOnSaturation(); + + void* mem; + mem = gl_->MapBufferSubData(kTarget, -1, kSize, GL_WRITE_ONLY); + ASSERT_TRUE(mem == NULL); + EXPECT_EQ(static_cast<GLenum>(GL_INVALID_VALUE), gl_->GetError()); + mem = gl_->MapBufferSubData(kTarget, kOffset, -1, GL_WRITE_ONLY); + ASSERT_TRUE(mem == NULL); + EXPECT_EQ(static_cast<GLenum>(GL_INVALID_VALUE), gl_->GetError()); + mem = gl_->MapBufferSubData(kTarget, kOffset, kSize, GL_READ_ONLY); + ASSERT_TRUE(mem == NULL); + EXPECT_EQ(static_cast<GLenum>(GL_INVALID_ENUM), gl_->GetError()); + const char* kPtr = "something"; + gl_->UnmapBufferSubData(kPtr); + EXPECT_EQ(static_cast<GLenum>(GL_INVALID_VALUE), gl_->GetError()); +} + +TEST_F(GLES2ImplementationTest, MapUnMapTexSubImage2D) { + struct Cmds { + TexSubImage2D tex; + cmd::SetToken set_token; + }; + const GLint kLevel = 1; + const GLint kXOffset = 2; + const GLint kYOffset = 3; + const GLint kWidth = 4; + const GLint kHeight = 5; + const GLenum kFormat = GL_RGBA; + const GLenum kType = GL_UNSIGNED_BYTE; + + int32 token = 1; + uint32 offset = 0; + Cmds expected; + expected.tex.Init( + GL_TEXTURE_2D, kLevel, kXOffset, kYOffset, kWidth, kHeight, kFormat, + kType, kTransferBufferId, offset); + expected.set_token.Init(token++); + + void* mem = gl_->MapTexSubImage2D( + GL_TEXTURE_2D, + kLevel, + kXOffset, + kYOffset, + kWidth, + kHeight, + kFormat, + kType, + GL_WRITE_ONLY); + ASSERT_TRUE(mem != NULL); + gl_->UnmapTexSubImage2D(mem); + EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected))); +} + +TEST_F(GLES2ImplementationTest, MapUnMapTexSubImage2DBadArgs) { + const GLint kLevel = 1; + const GLint kXOffset = 2; + const GLint kYOffset = 3; + const GLint kWidth = 4; + const GLint kHeight = 5; + const GLenum kFormat = GL_RGBA; + const GLenum kType = GL_UNSIGNED_BYTE; + + // Calls to flush to wait for GetError + EXPECT_CALL(*command_buffer_, OnFlush(_)) + .WillOnce(SetMemory(GLuint(GL_NO_ERROR))) + .WillOnce(SetMemory(GLuint(GL_NO_ERROR))) + .WillOnce(SetMemory(GLuint(GL_NO_ERROR))) + .WillOnce(SetMemory(GLuint(GL_NO_ERROR))) + .WillOnce(SetMemory(GLuint(GL_NO_ERROR))) + .WillOnce(SetMemory(GLuint(GL_NO_ERROR))) + .WillOnce(SetMemory(GLuint(GL_NO_ERROR))) + .RetiresOnSaturation(); + + void* mem; + mem = gl_->MapTexSubImage2D( + GL_TEXTURE_2D, + -1, + kXOffset, + kYOffset, + kWidth, + kHeight, + kFormat, + kType, + GL_WRITE_ONLY); + EXPECT_TRUE(mem == NULL); + EXPECT_EQ(static_cast<GLenum>(GL_INVALID_VALUE), gl_->GetError()); + mem = gl_->MapTexSubImage2D( + GL_TEXTURE_2D, + kLevel, + -1, + kYOffset, + kWidth, + kHeight, + kFormat, + kType, + GL_WRITE_ONLY); + EXPECT_TRUE(mem == NULL); + EXPECT_EQ(static_cast<GLenum>(GL_INVALID_VALUE), gl_->GetError()); + mem = gl_->MapTexSubImage2D( + GL_TEXTURE_2D, + kLevel, + kXOffset, + -1, + kWidth, + kHeight, + kFormat, + kType, + GL_WRITE_ONLY); + EXPECT_TRUE(mem == NULL); + EXPECT_EQ(static_cast<GLenum>(GL_INVALID_VALUE), gl_->GetError()); + mem = gl_->MapTexSubImage2D( + GL_TEXTURE_2D, + kLevel, + kXOffset, + kYOffset, + -1, + kHeight, + kFormat, + kType, + GL_WRITE_ONLY); + EXPECT_TRUE(mem == NULL); + EXPECT_EQ(static_cast<GLenum>(GL_INVALID_VALUE), gl_->GetError()); + mem = gl_->MapTexSubImage2D( + GL_TEXTURE_2D, + kLevel, + kXOffset, + kYOffset, + kWidth, + -1, + kFormat, + kType, + GL_WRITE_ONLY); + EXPECT_TRUE(mem == NULL); + EXPECT_EQ(static_cast<GLenum>(GL_INVALID_VALUE), gl_->GetError()); + mem = gl_->MapTexSubImage2D( + GL_TEXTURE_2D, + kLevel, + kXOffset, + kYOffset, + kWidth, + kHeight, + kFormat, + kType, + GL_READ_ONLY); + EXPECT_TRUE(mem == NULL); + EXPECT_EQ(static_cast<GLenum>(GL_INVALID_ENUM), gl_->GetError()); + const char* kPtr = "something"; + gl_->UnmapTexSubImage2D(kPtr); + EXPECT_EQ(static_cast<GLenum>(GL_INVALID_VALUE), gl_->GetError()); +} + } // namespace gles2 } // namespace gpu diff --git a/gpu/command_buffer/client/mapped_memory.cc b/gpu/command_buffer/client/mapped_memory.cc new file mode 100644 index 0000000..7aafe0a --- /dev/null +++ b/gpu/command_buffer/client/mapped_memory.cc @@ -0,0 +1,75 @@ +// Copyright (c) 2009 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "gpu/command_buffer/client/mapped_memory.h" +#include "gpu/command_buffer/client/cmd_buffer_helper.h" + +namespace gpu { + +MemoryChunk::MemoryChunk( + int32 shm_id, gpu::Buffer shm, CommandBufferHelper* helper) + : shm_id_(shm_id), + shm_(shm), + allocator_(shm.size, helper, shm.ptr) { +} + +void* MappedMemoryManager::Alloc( + unsigned int size, int32* shm_id, unsigned int* shm_offset) { + DCHECK(shm_id); + DCHECK(shm_offset); + // See if any of the chucks can satisfy this request. + for (size_t ii = 0; ii < chunks_.size(); ++ii) { + MemoryChunk* chunk = chunks_[ii].get(); + chunk->FreeUnused(); + if (chunk->GetLargestFreeSizeWithoutWaiting() >= size) { + void* mem = chunk->Alloc(size); + DCHECK(mem); + *shm_id = chunk->shm_id(); + *shm_offset = chunk->GetOffset(mem); + return mem; + } + } + + // Make a new chunk to satisfy the request. + CommandBuffer* cmd_buf = helper_->command_buffer(); + int32 id = cmd_buf->CreateTransferBuffer(size); + if (id == -1) { + return NULL; + } + gpu::Buffer shm = cmd_buf->GetTransferBuffer(id); + MemoryChunk::Ref mc(new MemoryChunk(id, shm, helper_)); + chunks_.push_back(mc); + void* mem = mc->Alloc(size); + DCHECK(mem); + *shm_id = mc->shm_id(); + *shm_offset = mc->GetOffset(mem); + return mem; +} + +void MappedMemoryManager::Free(void* pointer) { + for (size_t ii = 0; ii < chunks_.size(); ++ii) { + MemoryChunk* chunk = chunks_[ii].get(); + if (chunk->IsInChunk(pointer)) { + chunk->Free(pointer); + return; + } + } + NOTREACHED(); +} + +void MappedMemoryManager::FreePendingToken(void* pointer, int32 token) { + for (size_t ii = 0; ii < chunks_.size(); ++ii) { + MemoryChunk* chunk = chunks_[ii].get(); + if (chunk->IsInChunk(pointer)) { + chunk->FreePendingToken(pointer, token); + return; + } + } + NOTREACHED(); +} + +} // namespace gpu + + + diff --git a/gpu/command_buffer/client/mapped_memory.h b/gpu/command_buffer/client/mapped_memory.h new file mode 100644 index 0000000..95ff24b --- /dev/null +++ b/gpu/command_buffer/client/mapped_memory.h @@ -0,0 +1,146 @@ +// Copyright (c) 2009 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_ +#define GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_ + +#include <vector> +#include "base/basictypes.h" +#include "base/ref_counted.h" +#include "gpu/command_buffer/client/fenced_allocator.h" +#include "gpu/command_buffer/common/buffer.h" + +namespace gpu { + +class CommandBufferHelper; + +// Manages a shared memory segment. +class MemoryChunk : public base::RefCounted<MemoryChunk> { + public: + typedef scoped_refptr<MemoryChunk> Ref; + + MemoryChunk(int32 shm_id, gpu::Buffer shm, CommandBufferHelper* helper); + + // Gets the size of the largest free block that is available without waiting. + unsigned int GetLargestFreeSizeWithoutWaiting() { + return allocator_.GetLargestFreeSize(); + } + + // Gets the size of the largest free block that can be allocated if the + // caller can wait. + unsigned int GetLargestFreeSizeWithWaiting() { + return allocator_.GetLargestFreeOrPendingSize(); + } + + // Gets the size of the chunk. + unsigned int GetSize() const { + return shm_.size; + } + + // The shared memory id for this chunk. + int32 shm_id() const { + return shm_id_; + } + + // Allocates a block of memory. If the buffer is out of directly available + // memory, this function may wait until memory that was freed "pending a + // token" can be re-used. + // + // Parameters: + // size: the size of the memory block to allocate. + // + // Returns: + // the pointer to the allocated memory block, or NULL if out of + // memory. + void* Alloc(unsigned int size) { + return allocator_.Alloc(size); + } + + // Gets the offset to a memory block given the base memory and the address. + // It translates NULL to FencedAllocator::kInvalidOffset. + unsigned int GetOffset(void* pointer) { + return allocator_.GetOffset(pointer); + } + + // Frees a block of memory. + // + // Parameters: + // pointer: the pointer to the memory block to free. + void Free(void* pointer) { + allocator_.Free(pointer); + } + + // Frees a block of memory, pending the passage of a token. That memory won't + // be re-allocated until the token has passed through the command stream. + // + // Parameters: + // pointer: the pointer to the memory block to free. + // token: the token value to wait for before re-using the memory. + void FreePendingToken(void* pointer, unsigned int token) { + allocator_.FreePendingToken(pointer, token); + } + + // Frees any blocks who's tokens have passed. + void FreeUnused() { + allocator_.FreeUnused(); + } + + // Returns true if pointer is in the range of this block. + bool IsInChunk(void* pointer) const { + return pointer >= shm_.ptr && + pointer < reinterpret_cast<const int8*>(shm_.ptr) + shm_.size; + } + + private: + int32 shm_id_; + gpu::Buffer shm_; + FencedAllocatorWrapper allocator_; + + DISALLOW_COPY_AND_ASSIGN(MemoryChunk); +}; + +// Manages MemoryChucks. +class MappedMemoryManager { + public: + explicit MappedMemoryManager(CommandBufferHelper* helper) + : helper_(helper) { + } + + // Allocates a block of memory + // Parameters: + // size: size of memory to allocate. + // shm_id: pointer to variable to receive the shared memory id. + // shm_offset: pointer to variable to receive the shared memory offset. + // Returns: + // pointer to allocated block of memory. NULL if failure. + void* Alloc( + unsigned int size, int32* shm_id, unsigned int* shm_offset); + + // Frees a block of memory. + // + // Parameters: + // pointer: the pointer to the memory block to free. + void Free(void* pointer); + + // Frees a block of memory, pending the passage of a token. That memory won't + // be re-allocated until the token has passed through the command stream. + // + // Parameters: + // pointer: the pointer to the memory block to free. + // token: the token value to wait for before re-using the memory. + void FreePendingToken(void* pointer, int32 token); + + private: + typedef std::vector<MemoryChunk::Ref> MemoryChunkVector; + + CommandBufferHelper* helper_; + MemoryChunkVector chunks_; + + DISALLOW_COPY_AND_ASSIGN(MappedMemoryManager); +}; + +} // namespace gpu + +#endif // GPU_COMMAND_BUFFER_CLIENT_MAPPED_MEMORY_H_ + diff --git a/gpu/command_buffer/client/mapped_memory_unittest.cc b/gpu/command_buffer/client/mapped_memory_unittest.cc new file mode 100644 index 0000000..b983585 --- /dev/null +++ b/gpu/command_buffer/client/mapped_memory_unittest.cc @@ -0,0 +1,257 @@ +// Copyright (c) 2009 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "gpu/command_buffer/client/mapped_memory.h" +#include "base/callback.h" +#include "base/message_loop.h" +#include "base/scoped_nsautorelease_pool.h" +#include "gpu/command_buffer/client/cmd_buffer_helper.h" +#include "gpu/command_buffer/service/mocks.h" +#include "gpu/command_buffer/service/command_buffer_service.h" +#include "gpu/command_buffer/service/gpu_processor.h" +#include "testing/gtest/include/gtest/gtest.h" + +namespace gpu { + +using testing::Return; +using testing::Mock; +using testing::Truly; +using testing::Sequence; +using testing::DoAll; +using testing::Invoke; +using testing::_; + +class MappedMemoryTestBase : public testing::Test { + protected: + static const unsigned int kBufferSize = 1024; + + virtual void SetUp() { + api_mock_.reset(new AsyncAPIMock); + // ignore noops in the mock - we don't want to inspect the internals of the + // helper. + EXPECT_CALL(*api_mock_, DoCommand(cmd::kNoop, 0, _)) + .WillRepeatedly(Return(error::kNoError)); + // Forward the SetToken calls to the engine + EXPECT_CALL(*api_mock_.get(), DoCommand(cmd::kSetToken, 1, _)) + .WillRepeatedly(DoAll(Invoke(api_mock_.get(), &AsyncAPIMock::SetToken), + Return(error::kNoError))); + + command_buffer_.reset(new CommandBufferService); + command_buffer_->Initialize(kBufferSize); + Buffer ring_buffer = command_buffer_->GetRingBuffer(); + + parser_ = new CommandParser(ring_buffer.ptr, + ring_buffer.size, + 0, + ring_buffer.size, + 0, + api_mock_.get()); + + gpu_processor_.reset(new GPUProcessor( + command_buffer_.get(), NULL, parser_, INT_MAX)); + command_buffer_->SetPutOffsetChangeCallback(NewCallback( + gpu_processor_.get(), &GPUProcessor::ProcessCommands)); + + api_mock_->set_engine(gpu_processor_.get()); + + helper_.reset(new CommandBufferHelper(command_buffer_.get())); + helper_->Initialize(kBufferSize); + } + + int32 GetToken() { + return command_buffer_->GetState().token; + } + + base::ScopedNSAutoreleasePool autorelease_pool_; + MessageLoop message_loop_; + scoped_ptr<AsyncAPIMock> api_mock_; + scoped_ptr<CommandBufferService> command_buffer_; + scoped_ptr<GPUProcessor> gpu_processor_; + CommandParser* parser_; + scoped_ptr<CommandBufferHelper> helper_; +}; + +#ifndef _MSC_VER +const unsigned int MappedMemoryTestBase::kBufferSize; +#endif + +// Test fixture for MemoryChunk test - Creates a MemoryChunk, using a +// CommandBufferHelper with a mock AsyncAPIInterface for its interface (calling +// it directly, not through the RPC mechanism), making sure Noops are ignored +// and SetToken are properly forwarded to the engine. +class MemoryChunkTest : public MappedMemoryTestBase { + protected: + static const int32 kShmId = 123; + virtual void SetUp() { + MappedMemoryTestBase::SetUp(); + buffer_.reset(new uint8[kBufferSize]); + gpu::Buffer buf; + buf.size = kBufferSize; + buf.ptr = buffer_.get(); + chunk_ = new MemoryChunk(kShmId, buf, helper_.get()); + } + + virtual void TearDown() { + // If the GPUProcessor posts any tasks, this forces them to run. + MessageLoop::current()->RunAllPending(); + + MappedMemoryTestBase::TearDown(); + } + + MemoryChunk::Ref chunk_; + scoped_array<uint8> buffer_; +}; + +#ifndef _MSC_VER +const int32 MemoryChunkTest::kShmId; +#endif + +TEST_F(MemoryChunkTest, Basic) { + const unsigned int kSize = 16; + EXPECT_EQ(kShmId, chunk_->shm_id()); + EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithoutWaiting()); + EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithWaiting()); + EXPECT_EQ(kBufferSize, chunk_->GetSize()); + void *pointer = chunk_->Alloc(kSize); + ASSERT_TRUE(pointer); + EXPECT_LE(buffer_.get(), static_cast<uint8 *>(pointer)); + EXPECT_GE(kBufferSize, static_cast<uint8 *>(pointer) - buffer_.get() + kSize); + EXPECT_EQ(kBufferSize - kSize, chunk_->GetLargestFreeSizeWithoutWaiting()); + EXPECT_EQ(kBufferSize - kSize, chunk_->GetLargestFreeSizeWithWaiting()); + EXPECT_EQ(kBufferSize, chunk_->GetSize()); + + chunk_->Free(pointer); + EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithoutWaiting()); + EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithWaiting()); + + uint8 *pointer_char = static_cast<uint8*>(chunk_->Alloc(kSize)); + ASSERT_TRUE(pointer_char); + EXPECT_LE(buffer_.get(), pointer_char); + EXPECT_GE(buffer_.get() + kBufferSize, pointer_char + kSize); + EXPECT_EQ(kBufferSize - kSize, chunk_->GetLargestFreeSizeWithoutWaiting()); + EXPECT_EQ(kBufferSize - kSize, chunk_->GetLargestFreeSizeWithWaiting()); + chunk_->Free(pointer_char); + EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithoutWaiting()); + EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithWaiting()); +} + +class MappedMemoryManagerTest : public MappedMemoryTestBase { + protected: + virtual void SetUp() { + MappedMemoryTestBase::SetUp(); + manager_.reset(new MappedMemoryManager(helper_.get())); + } + + virtual void TearDown() { + // If the GPUProcessor posts any tasks, this forces them to run. + MessageLoop::current()->RunAllPending(); + manager_.reset(); + MappedMemoryTestBase::TearDown(); + } + + scoped_ptr<MappedMemoryManager> manager_; +}; + +TEST_F(MappedMemoryManagerTest, Basic) { + const unsigned int kSize = 1024; + // Check we can alloc. + int32 id1 = -1; + unsigned int offset1 = 0xFFFFFFFFU; + void* mem1 = manager_->Alloc(kSize, &id1, &offset1); + ASSERT_TRUE(mem1); + EXPECT_NE(-1, id1); + EXPECT_EQ(0u, offset1); + // Check if we free and realloc the same size we get the same memory + int32 id2 = -1; + unsigned int offset2 = 0xFFFFFFFFU; + manager_->Free(mem1); + void* mem2 = manager_->Alloc(kSize, &id2, &offset2); + EXPECT_EQ(mem1, mem2); + EXPECT_EQ(id1, id2); + EXPECT_EQ(offset1, offset2); + // Check if we allocate again we get different shared memory + int32 id3 = -1; + unsigned int offset3 = 0xFFFFFFFFU; + void* mem3 = manager_->Alloc(kSize, &id3, &offset3); + ASSERT_TRUE(mem3 != NULL); + EXPECT_NE(mem2, mem3); + EXPECT_NE(id2, id3); + EXPECT_EQ(0u, offset3); + // Free 3 and allocate 2 half size blocks. + manager_->Free(mem3); + int32 id4 = -1; + int32 id5 = -1; + unsigned int offset4 = 0xFFFFFFFFU; + unsigned int offset5 = 0xFFFFFFFFU; + void* mem4 = manager_->Alloc(kSize / 2, &id4, &offset4); + void* mem5 = manager_->Alloc(kSize / 2, &id5, &offset5); + ASSERT_TRUE(mem4 != NULL); + ASSERT_TRUE(mem5 != NULL); + EXPECT_EQ(id3, id4); + EXPECT_EQ(id4, id5); + EXPECT_EQ(0u, offset4); + EXPECT_EQ(kSize / 2u, offset5); + manager_->Free(mem4); + manager_->Free(mem2); + manager_->Free(mem5); +} + +TEST_F(MappedMemoryManagerTest, FreePendingToken) { + const unsigned int kSize = 128; + const unsigned int kAllocCount = (kBufferSize / kSize) * 2; + CHECK(kAllocCount * kSize == kBufferSize * 2); + + // Allocate several buffers across multiple chunks. + void *pointers[kAllocCount]; + for (unsigned int i = 0; i < kAllocCount; ++i) { + int32 id = -1; + unsigned int offset = 0xFFFFFFFFu; + pointers[i] = manager_->Alloc(kSize, &id, &offset); + EXPECT_TRUE(pointers[i]); + EXPECT_NE(id, -1); + EXPECT_NE(offset, 0xFFFFFFFFu); + } + + // Free one successful allocation, pending fence. + int32 token = helper_.get()->InsertToken(); + manager_->FreePendingToken(pointers[0], token); + + // The way we hooked up the helper and engine, it won't process commands + // until it has to wait for something. Which means the token shouldn't have + // passed yet at this point. + EXPECT_GT(token, GetToken()); + // Force it to read up to the token + helper_->Finish(); + // Check that the token has indeed passed. + EXPECT_LE(token, GetToken()); + + // This allocation should use the spot just freed above. + int32 new_id = -1; + unsigned int new_offset = 0xFFFFFFFFu; + void* new_ptr = manager_->Alloc(kSize, &new_id, &new_offset); + EXPECT_TRUE(new_ptr); + EXPECT_EQ(new_ptr, pointers[0]); + EXPECT_NE(new_id, -1); + EXPECT_NE(new_offset, 0xFFFFFFFFu); + + // Free up everything. + manager_->Free(new_ptr); + for (unsigned int i = 1; i < kAllocCount; ++i) { + manager_->Free(pointers[i]); + } +} + +// Check if we don't free we don't crash. +TEST_F(MappedMemoryManagerTest, DontFree) { + const unsigned int kSize = 1024; + // Check we can alloc. + int32 id1 = -1; + unsigned int offset1 = 0xFFFFFFFFU; + void* mem1 = manager_->Alloc(kSize, &id1, &offset1); + ASSERT_TRUE(mem1); +} + +} // namespace gpu + + diff --git a/gpu/gpu.gyp b/gpu/gpu.gyp index 0d10e60..6c9a525 100644 --- a/gpu/gpu.gyp +++ b/gpu/gpu.gyp @@ -116,6 +116,8 @@ 'command_buffer/client/cmd_buffer_helper.h', 'command_buffer/client/fenced_allocator.cc', 'command_buffer/client/fenced_allocator.h', + 'command_buffer/client/mapped_memory.cc', + 'command_buffer/client/mapped_memory.h', 'command_buffer/client/ring_buffer.cc', 'command_buffer/client/ring_buffer.h', ], @@ -233,6 +235,7 @@ 'command_buffer/client/cmd_buffer_helper_test.cc', 'command_buffer/client/fenced_allocator_test.cc', 'command_buffer/client/gles2_implementation_unittest.cc', + 'command_buffer/client/mapped_memory_unittest.cc', 'command_buffer/client/ring_buffer_test.cc', 'command_buffer/common/bitfield_helpers_test.cc', 'command_buffer/common/gles2_cmd_format_test.cc', |