summaryrefslogtreecommitdiffstats
path: root/base/memory
diff options
context:
space:
mode:
authorpliard@chromium.org <pliard@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2014-03-04 13:39:04 +0000
committerpliard@chromium.org <pliard@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2014-03-04 13:39:04 +0000
commitcf793ffeb9e5447e631b94bd668b7d363ca453fc (patch)
treec74b34c346e30540308396118839e8832cae698f /base/memory
parent2e8e956139d89aef96c12b94a234443b1ae15961 (diff)
downloadchromium_src-cf793ffeb9e5447e631b94bd668b7d363ca453fc.zip
chromium_src-cf793ffeb9e5447e631b94bd668b7d363ca453fc.tar.gz
chromium_src-cf793ffeb9e5447e631b94bd668b7d363ca453fc.tar.bz2
Fix bug in DiscardableMemoryAllocatorAndroid.
AshmemRegion keeps track of the start address of the highest allocated chunk in the region. In case the highest allocated chunk is freed then gets reused and split because it's too large, |AshmemRegion::highest_allocated_chunk_| needs to be updated as well. This was leading to invalid chaining of previous chunks. BUG=347919 R=mark@chromium.org Review URL: https://codereview.chromium.org/184103015 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@254755 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'base/memory')
-rw-r--r--base/memory/discardable_memory_allocator_android.cc25
-rw-r--r--base/memory/discardable_memory_allocator_android_unittest.cc32
2 files changed, 53 insertions, 4 deletions
diff --git a/base/memory/discardable_memory_allocator_android.cc b/base/memory/discardable_memory_allocator_android.cc
index aea94511..2054e30 100644
--- a/base/memory/discardable_memory_allocator_android.cc
+++ b/base/memory/discardable_memory_allocator_android.cc
@@ -171,10 +171,19 @@ class DiscardableMemoryAllocator::AshmemRegion {
private:
struct FreeChunk {
+ FreeChunk() : previous_chunk(NULL), start(NULL), size(0) {}
+
+ explicit FreeChunk(size_t size)
+ : previous_chunk(NULL),
+ start(NULL),
+ size(size) {
+ }
+
FreeChunk(void* previous_chunk, void* start, size_t size)
: previous_chunk(previous_chunk),
start(start),
size(size) {
+ DCHECK_NE(previous_chunk, start);
}
void* const previous_chunk;
@@ -211,7 +220,7 @@ class DiscardableMemoryAllocator::AshmemRegion {
size_t actual_size) {
allocator_->lock_.AssertAcquired();
const FreeChunk reused_chunk = RemoveFreeChunkFromIterator_Locked(
- free_chunks_.lower_bound(FreeChunk(NULL, NULL, actual_size)));
+ free_chunks_.lower_bound(FreeChunk(actual_size)));
if (reused_chunk.is_null())
return scoped_ptr<DiscardableMemory>();
@@ -225,6 +234,7 @@ class DiscardableMemoryAllocator::AshmemRegion {
DCHECK_GE(reused_chunk.size, client_requested_size);
const size_t fragmentation_bytes =
reused_chunk.size - client_requested_size;
+
if (fragmentation_bytes > kMaxChunkFragmentationBytes) {
// Split the free chunk being recycled so that its unused tail doesn't get
// reused (i.e. locked) which would prevent it from being evicted under
@@ -232,6 +242,11 @@ class DiscardableMemoryAllocator::AshmemRegion {
reused_chunk_size = actual_size;
void* const new_chunk_start =
static_cast<char*>(reused_chunk.start) + actual_size;
+ if (reused_chunk.start == highest_allocated_chunk_) {
+ // We also need to update the pointer to the highest allocated chunk in
+ // case we are splitting the highest chunk.
+ highest_allocated_chunk_ = new_chunk_start;
+ }
DCHECK_GT(reused_chunk.size, actual_size);
const size_t new_chunk_size = reused_chunk.size - actual_size;
// Note that merging is not needed here since there can't be contiguous
@@ -239,6 +254,7 @@ class DiscardableMemoryAllocator::AshmemRegion {
AddFreeChunk_Locked(
FreeChunk(reused_chunk.start, new_chunk_start, new_chunk_size));
}
+
const size_t offset =
static_cast<char*>(reused_chunk.start) - static_cast<char*>(base_);
internal::LockAshmemRegion(
@@ -278,7 +294,8 @@ class DiscardableMemoryAllocator::AshmemRegion {
new_free_chunk_size += free_chunk.size;
first_free_chunk = previous_chunk;
// There should not be more contiguous previous free chunks.
- DCHECK(!address_to_free_chunk_map_.count(free_chunk.previous_chunk));
+ previous_chunk = free_chunk.previous_chunk;
+ DCHECK(!address_to_free_chunk_map_.count(previous_chunk));
}
}
// Merge with the next chunk if free and present.
@@ -329,7 +346,7 @@ class DiscardableMemoryAllocator::AshmemRegion {
void*, std::multiset<FreeChunk>::iterator>::iterator it =
address_to_free_chunk_map_.find(chunk_start);
if (it == address_to_free_chunk_map_.end())
- return FreeChunk(NULL, NULL, 0U);
+ return FreeChunk();
return RemoveFreeChunkFromIterator_Locked(it->second);
}
@@ -338,7 +355,7 @@ class DiscardableMemoryAllocator::AshmemRegion {
std::multiset<FreeChunk>::iterator free_chunk_it) {
allocator_->lock_.AssertAcquired();
if (free_chunk_it == free_chunks_.end())
- return FreeChunk(NULL, NULL, 0U);
+ return FreeChunk();
DCHECK(free_chunk_it != free_chunks_.end());
const FreeChunk free_chunk(*free_chunk_it);
address_to_free_chunk_map_.erase(free_chunk_it->start);
diff --git a/base/memory/discardable_memory_allocator_android_unittest.cc b/base/memory/discardable_memory_allocator_android_unittest.cc
index 1da510d..3f75604 100644
--- a/base/memory/discardable_memory_allocator_android_unittest.cc
+++ b/base/memory/discardable_memory_allocator_android_unittest.cc
@@ -269,5 +269,37 @@ TEST_F(DiscardableMemoryAllocatorTest, UseMultipleAshmemRegions) {
EXPECT_EQ(memory3->Memory(), static_cast<char*>(memory1->Memory()) + size);
}
+TEST_F(DiscardableMemoryAllocatorTest,
+ HighestAllocatedChunkPointerIsUpdatedWhenHighestChunkGetsSplit) {
+ DiscardableMemoryAllocator allocator_(kAllocatorName, 32 * kPageSize);
+
+ // Prevents the ashmem region from getting closed when |memory2| gets freed.
+ scoped_ptr<DiscardableMemory> memory1(allocator_.Allocate(kPageSize));
+ ASSERT_TRUE(memory1);
+
+ scoped_ptr<DiscardableMemory> memory2(allocator_.Allocate(4 * kPageSize));
+ ASSERT_TRUE(memory2);
+
+ memory2.reset();
+ memory2 = allocator_.Allocate(kPageSize);
+ // There should now be a free chunk of size 3 * |kPageSize| starting at offset
+ // 2 * |kPageSize| and the pointer to the highest allocated chunk should have
+ // also been updated to |base_| + 2 * |kPageSize|. This pointer is used to
+ // maintain the container mapping a chunk address to its previous chunk and
+ // this map is in turn used while merging previous contiguous chunks.
+
+ // Allocate more than 3 * |kPageSize| so that the free chunk of size 3 *
+ // |kPageSize| is not reused and |highest_allocated_chunk_| gets used instead.
+ scoped_ptr<DiscardableMemory> memory3(allocator_.Allocate(4 * kPageSize));
+ ASSERT_TRUE(memory3);
+
+ // Deleting |memory3| (whose size is 4 * |kPageSize|) should result in a merge
+ // with its previous chunk which is the free chunk of size |3 * kPageSize|.
+ memory3.reset();
+ memory3 = allocator_.Allocate((3 + 4) * kPageSize);
+ EXPECT_EQ(memory3->Memory(),
+ static_cast<const char*>(memory2->Memory()) + kPageSize);
+}
+
} // namespace internal
} // namespace base