diff options
author | pliard@chromium.org <pliard@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2013-12-10 19:04:15 +0000 |
---|---|---|
committer | pliard@chromium.org <pliard@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2013-12-10 19:04:15 +0000 |
commit | 60b25c64bc64b05d712e5207762916a870f32907 (patch) | |
tree | 535fb84776eb345fcfeabc5feff214c3b0de2ff6 /base/memory | |
parent | 9976b24df235296b776c90b2933b46c051086904 (diff) | |
download | chromium_src-60b25c64bc64b05d712e5207762916a870f32907.zip chromium_src-60b25c64bc64b05d712e5207762916a870f32907.tar.gz chromium_src-60b25c64bc64b05d712e5207762916a870f32907.tar.bz2 |
Revert "Revert 239763 "Add DiscardableMemoryAllocator to minimize use of...""
The previous attempt failed due to ASAN complaining during the
DiscardableMemory.TooLargeAllocationFails test (which is quite expected).
TBR=willchan
BUG=299828
Review URL: https://codereview.chromium.org/101953003
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@239816 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'base/memory')
-rw-r--r-- | base/memory/discardable_memory.h | 2 | ||||
-rw-r--r-- | base/memory/discardable_memory_allocator_android.cc | 418 | ||||
-rw-r--r-- | base/memory/discardable_memory_allocator_android.h | 65 | ||||
-rw-r--r-- | base/memory/discardable_memory_allocator_android_unittest.cc | 232 | ||||
-rw-r--r-- | base/memory/discardable_memory_android.cc | 212 | ||||
-rw-r--r-- | base/memory/discardable_memory_android.h | 37 | ||||
-rw-r--r-- | base/memory/discardable_memory_unittest.cc | 17 |
7 files changed, 923 insertions, 60 deletions
diff --git a/base/memory/discardable_memory.h b/base/memory/discardable_memory.h index d8fd4f6..cbc2db6 100644 --- a/base/memory/discardable_memory.h +++ b/base/memory/discardable_memory.h @@ -47,6 +47,8 @@ enum LockDiscardableMemoryStatus { // - Mac: http://trac.webkit.org/browser/trunk/Source/WebCore/platform/mac/PurgeableBufferMac.cpp // the comment starting with "vm_object_purgable_control" at // http://www.opensource.apple.com/source/xnu/xnu-792.13.8/osfmk/vm/vm_object.c +// +// Thread-safety: DiscardableMemory instances are not thread-safe. class BASE_EXPORT DiscardableMemory { public: virtual ~DiscardableMemory() {} diff --git a/base/memory/discardable_memory_allocator_android.cc b/base/memory/discardable_memory_allocator_android.cc new file mode 100644 index 0000000..5e10817 --- /dev/null +++ b/base/memory/discardable_memory_allocator_android.cc @@ -0,0 +1,418 @@ +// Copyright 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/memory/discardable_memory_allocator_android.h" + +#include <algorithm> +#include <cmath> +#include <set> +#include <utility> + +#include "base/basictypes.h" +#include "base/containers/hash_tables.h" +#include "base/logging.h" +#include "base/memory/discardable_memory.h" +#include "base/memory/discardable_memory_android.h" +#include "base/memory/scoped_vector.h" +#include "base/synchronization/lock.h" +#include "base/threading/thread_checker.h" + +// The allocator consists of three parts (classes): +// - DiscardableMemoryAllocator: entry point of all allocations (through its +// Allocate() method) that are dispatched to the AshmemRegion instances (which +// it owns). +// - AshmemRegion: manages allocations and destructions inside a single large +// (e.g. 32 MBytes) ashmem region. +// - DiscardableAshmemChunk: class implementing the DiscardableMemory interface +// whose instances are returned to the client. DiscardableAshmemChunk lets the +// client seamlessly operate on a subrange of the ashmem region managed by +// AshmemRegion. + +namespace base { +namespace { + +// Only tolerate fragmentation in used chunks *caused by the client* (as opposed +// to the allocator when a free chunk is reused). The client can cause such +// fragmentation by e.g. requesting 4097 bytes. This size would be rounded up to +// 8192 by the allocator which would cause 4095 bytes of fragmentation (which is +// currently the maximum allowed). If the client requests 4096 bytes and a free +// chunk of 8192 bytes is available then the free chunk gets splitted into two +// pieces to minimize fragmentation (since 8192 - 4096 = 4096 which is greater +// than 4095). +// TODO(pliard): tune this if splitting chunks too often leads to performance +// issues. +const size_t kMaxChunkFragmentationBytes = 4096 - 1; + +} // namespace + +namespace internal { + +class DiscardableMemoryAllocator::DiscardableAshmemChunk + : public DiscardableMemory { + public: + // Note that |ashmem_region| must outlive |this|. + DiscardableAshmemChunk(AshmemRegion* ashmem_region, + int fd, + void* address, + size_t offset, + size_t size) + : ashmem_region_(ashmem_region), + fd_(fd), + address_(address), + offset_(offset), + size_(size), + locked_(true) { + } + + // Implemented below AshmemRegion since this requires the full definition of + // AshmemRegion. + virtual ~DiscardableAshmemChunk(); + + // DiscardableMemory: + virtual LockDiscardableMemoryStatus Lock() OVERRIDE { + DCHECK(!locked_); + locked_ = true; + return internal::LockAshmemRegion(fd_, offset_, size_, address_); + } + + virtual void Unlock() OVERRIDE { + DCHECK(locked_); + locked_ = false; + internal::UnlockAshmemRegion(fd_, offset_, size_, address_); + } + + virtual void* Memory() const OVERRIDE { + return address_; + } + + private: + AshmemRegion* const ashmem_region_; + const int fd_; + void* const address_; + const size_t offset_; + const size_t size_; + bool locked_; + + DISALLOW_COPY_AND_ASSIGN(DiscardableAshmemChunk); +}; + +class DiscardableMemoryAllocator::AshmemRegion { + public: + // Note that |allocator| must outlive |this|. + static scoped_ptr<AshmemRegion> Create( + size_t size, + const std::string& name, + DiscardableMemoryAllocator* allocator) { + int fd; + void* base; + if (!internal::CreateAshmemRegion(name.c_str(), size, &fd, &base)) + return scoped_ptr<AshmemRegion>(); + return make_scoped_ptr(new AshmemRegion(fd, size, base, allocator)); + } + + virtual ~AshmemRegion() { + const bool result = internal::CloseAshmemRegion(fd_, size_, base_); + DCHECK(result); + } + + // Returns a new instance of DiscardableMemory whose size is greater or equal + // than |actual_size| (which is expected to be greater or equal than + // |client_requested_size|). + // Allocation works as follows: + // 1) Reuse a previously freed chunk and return it if it succeeded. See + // ReuseFreeChunk_Locked() below for more information. + // 2) If no free chunk could be reused and the region is not big enough for + // the requested size then NULL is returned. + // 3) If there is enough room in the ashmem region then a new chunk is + // returned. This new chunk starts at |offset_| which is the end of the + // previously highest chunk in the region. + scoped_ptr<DiscardableMemory> Allocate_Locked(size_t client_requested_size, + size_t actual_size) { + DCHECK_LE(client_requested_size, actual_size); + allocator_->lock_.AssertAcquired(); + scoped_ptr<DiscardableMemory> memory = ReuseFreeChunk_Locked( + client_requested_size, actual_size); + if (memory) + return memory.Pass(); + if (size_ - offset_ < actual_size) { + // This region does not have enough space left to hold the requested size. + return scoped_ptr<DiscardableMemory>(); + } + void* const address = static_cast<char*>(base_) + offset_; + memory.reset( + new DiscardableAshmemChunk(this, fd_, address, offset_, actual_size)); + used_to_previous_chunk_map_.insert( + std::make_pair(address, highest_allocated_chunk_)); + highest_allocated_chunk_ = address; + offset_ += actual_size; + DCHECK_LE(offset_, size_); + return memory.Pass(); + } + + void OnChunkDeletion(void* chunk, size_t size) { + AutoLock auto_lock(allocator_->lock_); + MergeAndAddFreeChunk_Locked(chunk, size); + // Note that |this| might be deleted beyond this point. + } + + private: + struct FreeChunk { + FreeChunk(void* previous_chunk, void* start, size_t size) + : previous_chunk(previous_chunk), + start(start), + size(size) { + } + + void* const previous_chunk; + void* const start; + const size_t size; + + bool is_null() const { return !start; } + + bool operator<(const FreeChunk& other) const { + return size < other.size; + } + }; + + // Note that |allocator| must outlive |this|. + AshmemRegion(int fd, + size_t size, + void* base, + DiscardableMemoryAllocator* allocator) + : fd_(fd), + size_(size), + base_(base), + allocator_(allocator), + highest_allocated_chunk_(NULL), + offset_(0) { + DCHECK_GE(fd_, 0); + DCHECK_GE(size, kMinAshmemRegionSize); + DCHECK(base); + DCHECK(allocator); + } + + // Tries to reuse a previously freed chunk by doing a closest size match. + scoped_ptr<DiscardableMemory> ReuseFreeChunk_Locked( + size_t client_requested_size, + size_t actual_size) { + allocator_->lock_.AssertAcquired(); + const FreeChunk reused_chunk = RemoveFreeChunkFromIterator_Locked( + free_chunks_.lower_bound(FreeChunk(NULL, NULL, actual_size))); + if (reused_chunk.is_null()) + return scoped_ptr<DiscardableMemory>(); + + used_to_previous_chunk_map_.insert( + std::make_pair(reused_chunk.start, reused_chunk.previous_chunk)); + size_t reused_chunk_size = reused_chunk.size; + // |client_requested_size| is used below rather than |actual_size| to + // reflect the amount of bytes that would not be usable by the client (i.e. + // wasted). Using |actual_size| instead would not allow us to detect + // fragmentation caused by the client if he did misaligned allocations. + DCHECK_GE(reused_chunk.size, client_requested_size); + const size_t fragmentation_bytes = + reused_chunk.size - client_requested_size; + if (fragmentation_bytes > kMaxChunkFragmentationBytes) { + // Split the free chunk being recycled so that its unused tail doesn't get + // reused (i.e. locked) which would prevent it from being evicted under + // memory pressure. + reused_chunk_size = actual_size; + void* const new_chunk_start = + static_cast<char*>(reused_chunk.start) + actual_size; + DCHECK_GT(reused_chunk.size, actual_size); + const size_t new_chunk_size = reused_chunk.size - actual_size; + // Note that merging is not needed here since there can't be contiguous + // free chunks at this point. + AddFreeChunk_Locked( + FreeChunk(reused_chunk.start, new_chunk_start, new_chunk_size)); + } + const size_t offset = + static_cast<char*>(reused_chunk.start) - static_cast<char*>(base_); + internal::LockAshmemRegion( + fd_, offset, reused_chunk_size, reused_chunk.start); + scoped_ptr<DiscardableMemory> memory( + new DiscardableAshmemChunk(this, fd_, reused_chunk.start, offset, + reused_chunk_size)); + return memory.Pass(); + } + + // Makes the chunk identified with the provided arguments free and possibly + // merges this chunk with the previous and next contiguous ones. + // If the provided chunk is the only one used (and going to be freed) in the + // region then the internal ashmem region is closed so that the underlying + // physical pages are immediately released. + // Note that free chunks are unlocked therefore they can be reclaimed by the + // kernel if needed (under memory pressure) but they are not immediately + // released unfortunately since madvise(MADV_REMOVE) and + // fallocate(FALLOC_FL_PUNCH_HOLE) don't seem to work on ashmem. This might + // change in versions of kernel >=3.5 though. The fact that free chunks are + // not immediately released is the reason why we are trying to minimize + // fragmentation in order not to cause "artificial" memory pressure. + void MergeAndAddFreeChunk_Locked(void* chunk, size_t size) { + allocator_->lock_.AssertAcquired(); + size_t new_free_chunk_size = size; + // Merge with the previous chunk. + void* first_free_chunk = chunk; + DCHECK(!used_to_previous_chunk_map_.empty()); + const hash_map<void*, void*>::iterator previous_chunk_it = + used_to_previous_chunk_map_.find(chunk); + DCHECK(previous_chunk_it != used_to_previous_chunk_map_.end()); + void* previous_chunk = previous_chunk_it->second; + used_to_previous_chunk_map_.erase(previous_chunk_it); + if (previous_chunk) { + const FreeChunk free_chunk = RemoveFreeChunk_Locked(previous_chunk); + if (!free_chunk.is_null()) { + new_free_chunk_size += free_chunk.size; + first_free_chunk = previous_chunk; + // There should not be more contiguous previous free chunks. + DCHECK(!address_to_free_chunk_map_.count(free_chunk.previous_chunk)); + } + } + // Merge with the next chunk if free and present. + void* next_chunk = static_cast<char*>(chunk) + size; + const FreeChunk next_free_chunk = RemoveFreeChunk_Locked(next_chunk); + if (!next_free_chunk.is_null()) { + new_free_chunk_size += next_free_chunk.size; + // Same as above. + DCHECK(!address_to_free_chunk_map_.count(static_cast<char*>(next_chunk) + + next_free_chunk.size)); + } + const bool whole_ashmem_region_is_free = + used_to_previous_chunk_map_.empty(); + if (!whole_ashmem_region_is_free) { + AddFreeChunk_Locked( + FreeChunk(previous_chunk, first_free_chunk, new_free_chunk_size)); + return; + } + // The whole ashmem region is free thus it can be deleted. + DCHECK_EQ(base_, first_free_chunk); + DCHECK(free_chunks_.empty()); + DCHECK(address_to_free_chunk_map_.empty()); + DCHECK(used_to_previous_chunk_map_.empty()); + allocator_->DeleteAshmemRegion_Locked(this); // Deletes |this|. + } + + void AddFreeChunk_Locked(const FreeChunk& free_chunk) { + allocator_->lock_.AssertAcquired(); + const std::multiset<FreeChunk>::iterator it = free_chunks_.insert( + free_chunk); + address_to_free_chunk_map_.insert(std::make_pair(free_chunk.start, it)); + // Update the next used contiguous chunk, if any, since its previous chunk + // may have changed due to free chunks merging/splitting. + void* const next_used_contiguous_chunk = + static_cast<char*>(free_chunk.start) + free_chunk.size; + hash_map<void*, void*>::iterator previous_it = + used_to_previous_chunk_map_.find(next_used_contiguous_chunk); + if (previous_it != used_to_previous_chunk_map_.end()) + previous_it->second = free_chunk.start; + } + + // Finds and removes the free chunk, if any, whose start address is + // |chunk_start|. Returns a copy of the unlinked free chunk or a free chunk + // whose content is null if it was not found. + FreeChunk RemoveFreeChunk_Locked(void* chunk_start) { + allocator_->lock_.AssertAcquired(); + const hash_map< + void*, std::multiset<FreeChunk>::iterator>::iterator it = + address_to_free_chunk_map_.find(chunk_start); + if (it == address_to_free_chunk_map_.end()) + return FreeChunk(NULL, NULL, 0U); + return RemoveFreeChunkFromIterator_Locked(it->second); + } + + // Same as above but takes an iterator in. + FreeChunk RemoveFreeChunkFromIterator_Locked( + std::multiset<FreeChunk>::iterator free_chunk_it) { + allocator_->lock_.AssertAcquired(); + if (free_chunk_it == free_chunks_.end()) + return FreeChunk(NULL, NULL, 0U); + DCHECK(free_chunk_it != free_chunks_.end()); + const FreeChunk free_chunk(*free_chunk_it); + address_to_free_chunk_map_.erase(free_chunk_it->start); + free_chunks_.erase(free_chunk_it); + return free_chunk; + } + + const int fd_; + const size_t size_; + void* const base_; + DiscardableMemoryAllocator* const allocator_; + void* highest_allocated_chunk_; + // Points to the end of |highest_allocated_chunk_|. + size_t offset_; + // Allows free chunks recycling (lookup, insertion and removal) in O(log N). + // Note that FreeChunk values are indexed by their size and also note that + // multiple free chunks can have the same size (which is why multiset<> is + // used instead of e.g. set<>). + std::multiset<FreeChunk> free_chunks_; + // Used while merging free contiguous chunks to erase free chunks (from their + // start address) in constant time. Note that multiset<>::{insert,erase}() + // don't invalidate iterators (except the one for the element being removed + // obviously). + hash_map< + void*, std::multiset<FreeChunk>::iterator> address_to_free_chunk_map_; + // Maps the address of *used* chunks to the address of their previous + // contiguous chunk. + hash_map<void*, void*> used_to_previous_chunk_map_; + + DISALLOW_COPY_AND_ASSIGN(AshmemRegion); +}; + +DiscardableMemoryAllocator::DiscardableAshmemChunk::~DiscardableAshmemChunk() { + if (locked_) + internal::UnlockAshmemRegion(fd_, offset_, size_, address_); + ashmem_region_->OnChunkDeletion(address_, size_); +} + +DiscardableMemoryAllocator::DiscardableMemoryAllocator(const std::string& name) + : name_(name) { +} + +DiscardableMemoryAllocator::~DiscardableMemoryAllocator() { + DCHECK(thread_checker_.CalledOnValidThread()); + DCHECK(ashmem_regions_.empty()); +} + +scoped_ptr<DiscardableMemory> DiscardableMemoryAllocator::Allocate( + size_t size) { + const size_t aligned_size = internal::AlignToNextPage(size); + // TODO(pliard): make this function less naive by e.g. moving the free chunks + // multiset to the allocator itself in order to decrease even more + // fragmentation/speedup allocation. Note that there should not be more than a + // couple (=5) of AshmemRegion instances in practice though. + AutoLock auto_lock(lock_); + DCHECK_LE(ashmem_regions_.size(), 5U); + for (ScopedVector<AshmemRegion>::iterator it = ashmem_regions_.begin(); + it != ashmem_regions_.end(); ++it) { + scoped_ptr<DiscardableMemory> memory( + (*it)->Allocate_Locked(size, aligned_size)); + if (memory) + return memory.Pass(); + } + scoped_ptr<AshmemRegion> new_region( + AshmemRegion::Create( + std::max(static_cast<size_t>(kMinAshmemRegionSize), aligned_size), + name_.c_str(), this)); + if (!new_region) { + // TODO(pliard): consider adding an histogram to see how often this happens. + return scoped_ptr<DiscardableMemory>(); + } + ashmem_regions_.push_back(new_region.release()); + return ashmem_regions_.back()->Allocate_Locked(size, aligned_size); +} + +void DiscardableMemoryAllocator::DeleteAshmemRegion_Locked( + AshmemRegion* region) { + lock_.AssertAcquired(); + // Note that there should not be more than a couple of ashmem region instances + // in |ashmem_regions_|. + DCHECK_LE(ashmem_regions_.size(), 5U); + const ScopedVector<AshmemRegion>::iterator it = std::find( + ashmem_regions_.begin(), ashmem_regions_.end(), region); + DCHECK_NE(ashmem_regions_.end(), it); + std::swap(*it, ashmem_regions_.back()); + ashmem_regions_.pop_back(); +} + +} // namespace internal +} // namespace base diff --git a/base/memory/discardable_memory_allocator_android.h b/base/memory/discardable_memory_allocator_android.h new file mode 100644 index 0000000..7991656 --- /dev/null +++ b/base/memory/discardable_memory_allocator_android.h @@ -0,0 +1,65 @@ +// Copyright 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef BASE_MEMORY_DISCARDABLE_MEMORY_ALLOCATOR_H_ +#define BASE_MEMORY_DISCARDABLE_MEMORY_ALLOCATOR_H_ + +#include <string> + +#include "base/base_export.h" +#include "base/basictypes.h" +#include "base/memory/scoped_ptr.h" +#include "base/memory/scoped_vector.h" +#include "base/synchronization/lock.h" +#include "base/threading/thread_checker.h" + +namespace base { + +class DiscardableMemory; + +namespace internal { + +// On Android ashmem is used to implement discardable memory. It is backed by a +// file (descriptor) thus is a limited resource. This allocator minimizes the +// problem by allocating large ashmem regions internally and returning smaller +// chunks to the client. +// Allocated chunks are systematically aligned on a page boundary therefore this +// allocator should not be used for small allocations. +// +// Threading: The allocator must be deleted on the thread it was constructed on +// although its Allocate() method can be invoked on any thread. See +// discardable_memory.h for DiscardableMemory's threading guarantees. +class BASE_EXPORT_PRIVATE DiscardableMemoryAllocator { + public: + // Exposed for testing. + enum { + kMinAshmemRegionSize = 32 * 1024 * 1024, + }; + + // Note that |name| is only used for debugging/measurement purposes. + explicit DiscardableMemoryAllocator(const std::string& name); + ~DiscardableMemoryAllocator(); + + // Note that the allocator must outlive the returned DiscardableMemory + // instance. + scoped_ptr<DiscardableMemory> Allocate(size_t size); + + private: + class AshmemRegion; + class DiscardableAshmemChunk; + + void DeleteAshmemRegion_Locked(AshmemRegion* region); + + base::ThreadChecker thread_checker_; + const std::string name_; + base::Lock lock_; + ScopedVector<AshmemRegion> ashmem_regions_; + + DISALLOW_COPY_AND_ASSIGN(DiscardableMemoryAllocator); +}; + +} // namespace internal +} // namespace base + +#endif // BASE_MEMORY_DISCARDABLE_MEMORY_ALLOCATOR_H_ diff --git a/base/memory/discardable_memory_allocator_android_unittest.cc b/base/memory/discardable_memory_allocator_android_unittest.cc new file mode 100644 index 0000000..97cf5d4 --- /dev/null +++ b/base/memory/discardable_memory_allocator_android_unittest.cc @@ -0,0 +1,232 @@ +// Copyright 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/memory/discardable_memory_allocator_android.h" + +#include <sys/types.h> +#include <unistd.h> + +#include "base/memory/discardable_memory.h" +#include "base/memory/scoped_ptr.h" +#include "base/strings/string_number_conversions.h" +#include "base/strings/string_split.h" +#include "base/strings/stringprintf.h" +#include "build/build_config.h" +#include "testing/gtest/include/gtest/gtest.h" + +namespace base { +namespace internal { + +const char kAllocatorName[] = "allocator-for-testing"; + +const size_t kPageSize = 4096; +const size_t kMinAshmemRegionSize = + DiscardableMemoryAllocator::kMinAshmemRegionSize; + +class DiscardableMemoryAllocatorTest : public testing::Test { + protected: + DiscardableMemoryAllocatorTest() : allocator_(kAllocatorName) {} + + DiscardableMemoryAllocator allocator_; +}; + +void WriteToDiscardableMemory(DiscardableMemory* memory, size_t size) { + // Write to the first and the last pages only to avoid paging in up to 64 + // MBytes. + static_cast<char*>(memory->Memory())[0] = 'a'; + static_cast<char*>(memory->Memory())[size - 1] = 'a'; +} + +TEST_F(DiscardableMemoryAllocatorTest, Basic) { + const size_t size = 128; + scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(size)); + ASSERT_TRUE(memory); + WriteToDiscardableMemory(memory.get(), size); +} + +TEST_F(DiscardableMemoryAllocatorTest, LargeAllocation) { + // Note that large allocations should just use DiscardableMemoryAndroidSimple + // instead. + const size_t size = 64 * 1024 * 1024; + scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(size)); + ASSERT_TRUE(memory); + WriteToDiscardableMemory(memory.get(), size); +} + +TEST_F(DiscardableMemoryAllocatorTest, ChunksArePageAligned) { + scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(kPageSize)); + ASSERT_TRUE(memory); + EXPECT_EQ(0U, reinterpret_cast<uint64_t>(memory->Memory()) % kPageSize); + WriteToDiscardableMemory(memory.get(), kPageSize); +} + +TEST_F(DiscardableMemoryAllocatorTest, AllocateFreeAllocate) { + scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(kPageSize)); + // Extra allocation that prevents the region from being deleted when |memory| + // gets deleted. + scoped_ptr<DiscardableMemory> memory_lock(allocator_.Allocate(kPageSize)); + ASSERT_TRUE(memory); + void* const address = memory->Memory(); + memory->Unlock(); // Tests that the reused chunk is being locked correctly. + memory.reset(); + memory = allocator_.Allocate(kPageSize); + ASSERT_TRUE(memory); + // The previously freed chunk should be reused. + EXPECT_EQ(address, memory->Memory()); + WriteToDiscardableMemory(memory.get(), kPageSize); +} + +TEST_F(DiscardableMemoryAllocatorTest, FreeingWholeAshmemRegionClosesAshmem) { + scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(kPageSize)); + ASSERT_TRUE(memory); + const int kMagic = 0xdeadbeef; + *static_cast<int*>(memory->Memory()) = kMagic; + memory.reset(); + // The previous ashmem region should have been closed thus it should not be + // reused. + memory = allocator_.Allocate(kPageSize); + ASSERT_TRUE(memory); + EXPECT_NE(kMagic, *static_cast<const int*>(memory->Memory())); +} + +TEST_F(DiscardableMemoryAllocatorTest, AllocateUsesBestFitAlgorithm) { + scoped_ptr<DiscardableMemory> memory1(allocator_.Allocate(3 * kPageSize)); + ASSERT_TRUE(memory1); + scoped_ptr<DiscardableMemory> memory2(allocator_.Allocate(2 * kPageSize)); + ASSERT_TRUE(memory2); + scoped_ptr<DiscardableMemory> memory3(allocator_.Allocate(1 * kPageSize)); + ASSERT_TRUE(memory3); + void* const address_3 = memory3->Memory(); + memory1.reset(); + // Don't free |memory2| to avoid merging the 3 blocks together. + memory3.reset(); + memory1 = allocator_.Allocate(1 * kPageSize); + ASSERT_TRUE(memory1); + // The chunk whose size is closest to the requested size should be reused. + EXPECT_EQ(address_3, memory1->Memory()); + WriteToDiscardableMemory(memory1.get(), kPageSize); +} + +TEST_F(DiscardableMemoryAllocatorTest, MergeFreeChunks) { + scoped_ptr<DiscardableMemory> memory1(allocator_.Allocate(kPageSize)); + ASSERT_TRUE(memory1); + scoped_ptr<DiscardableMemory> memory2(allocator_.Allocate(kPageSize)); + ASSERT_TRUE(memory2); + scoped_ptr<DiscardableMemory> memory3(allocator_.Allocate(kPageSize)); + ASSERT_TRUE(memory3); + scoped_ptr<DiscardableMemory> memory4(allocator_.Allocate(kPageSize)); + ASSERT_TRUE(memory4); + void* const memory1_address = memory1->Memory(); + memory1.reset(); + memory3.reset(); + // Freeing |memory2| (located between memory1 and memory3) should merge the + // three free blocks together. + memory2.reset(); + memory1 = allocator_.Allocate(3 * kPageSize); + EXPECT_EQ(memory1_address, memory1->Memory()); +} + +TEST_F(DiscardableMemoryAllocatorTest, MergeFreeChunksAdvanced) { + scoped_ptr<DiscardableMemory> memory1(allocator_.Allocate(4 * kPageSize)); + ASSERT_TRUE(memory1); + scoped_ptr<DiscardableMemory> memory2(allocator_.Allocate(4 * kPageSize)); + ASSERT_TRUE(memory2); + void* const memory1_address = memory1->Memory(); + memory1.reset(); + memory1 = allocator_.Allocate(2 * kPageSize); + memory2.reset(); + // At this point, the region should be in this state: + // 8 KBytes (used), 24 KBytes (free). + memory2 = allocator_.Allocate(6 * kPageSize); + EXPECT_EQ( + static_cast<const char*>(memory2->Memory()), + static_cast<const char*>(memory1_address) + 2 * kPageSize); +} + +TEST_F(DiscardableMemoryAllocatorTest, MergeFreeChunksAdvanced2) { + scoped_ptr<DiscardableMemory> memory1(allocator_.Allocate(4 * kPageSize)); + ASSERT_TRUE(memory1); + scoped_ptr<DiscardableMemory> memory2(allocator_.Allocate(4 * kPageSize)); + ASSERT_TRUE(memory2); + void* const memory1_address = memory1->Memory(); + memory1.reset(); + memory1 = allocator_.Allocate(2 * kPageSize); + scoped_ptr<DiscardableMemory> memory3(allocator_.Allocate(2 * kPageSize)); + // At this point, the region should be in this state: + // 8 KBytes (used), 8 KBytes (used), 16 KBytes (used). + memory3.reset(); + memory2.reset(); + // At this point, the region should be in this state: + // 8 KBytes (used), 24 KBytes (free). + memory2 = allocator_.Allocate(6 * kPageSize); + EXPECT_EQ( + static_cast<const char*>(memory2->Memory()), + static_cast<const char*>(memory1_address) + 2 * kPageSize); +} + +TEST_F(DiscardableMemoryAllocatorTest, MergeFreeChunksAndDeleteAshmemRegion) { + scoped_ptr<DiscardableMemory> memory1(allocator_.Allocate(4 * kPageSize)); + ASSERT_TRUE(memory1); + scoped_ptr<DiscardableMemory> memory2(allocator_.Allocate(4 * kPageSize)); + ASSERT_TRUE(memory2); + memory1.reset(); + memory1 = allocator_.Allocate(2 * kPageSize); + scoped_ptr<DiscardableMemory> memory3(allocator_.Allocate(2 * kPageSize)); + // At this point, the region should be in this state: + // 8 KBytes (used), 8 KBytes (used), 16 KBytes (used). + memory1.reset(); + memory3.reset(); + // At this point, the region should be in this state: + // 8 KBytes (free), 8 KBytes (used), 8 KBytes (free). + const int kMagic = 0xdeadbeef; + *static_cast<int*>(memory2->Memory()) = kMagic; + memory2.reset(); + // The whole region should have been deleted. + memory2 = allocator_.Allocate(2 * kPageSize); + EXPECT_NE(kMagic, *static_cast<int*>(memory2->Memory())); +} + +TEST_F(DiscardableMemoryAllocatorTest, + TooLargeFreeChunksDontCauseTooMuchFragmentationWhenRecycled) { + // Keep |memory_1| below allocated so that the ashmem region doesn't get + // closed when |memory_2| is deleted. + scoped_ptr<DiscardableMemory> memory_1(allocator_.Allocate(64 * 1024)); + ASSERT_TRUE(memory_1); + scoped_ptr<DiscardableMemory> memory_2(allocator_.Allocate(32 * 1024)); + ASSERT_TRUE(memory_2); + void* const address = memory_2->Memory(); + memory_2.reset(); + const size_t size = 16 * 1024; + memory_2 = allocator_.Allocate(size); + ASSERT_TRUE(memory_2); + EXPECT_EQ(address, memory_2->Memory()); + WriteToDiscardableMemory(memory_2.get(), size); + scoped_ptr<DiscardableMemory> memory_3(allocator_.Allocate(size)); + // The unused tail (16 KBytes large) of the previously freed chunk should be + // reused. + EXPECT_EQ(static_cast<char*>(address) + size, memory_3->Memory()); + WriteToDiscardableMemory(memory_3.get(), size); +} + +TEST_F(DiscardableMemoryAllocatorTest, UseMultipleAshmemRegions) { + // Leave one page untouched at the end of the ashmem region. + const size_t size = kMinAshmemRegionSize - kPageSize; + scoped_ptr<DiscardableMemory> memory1(allocator_.Allocate(size)); + ASSERT_TRUE(memory1); + WriteToDiscardableMemory(memory1.get(), size); + + scoped_ptr<DiscardableMemory> memory2( + allocator_.Allocate(kMinAshmemRegionSize)); + ASSERT_TRUE(memory2); + WriteToDiscardableMemory(memory2.get(), kMinAshmemRegionSize); + // The last page of the first ashmem region should be used for this + // allocation. + scoped_ptr<DiscardableMemory> memory3(allocator_.Allocate(kPageSize)); + ASSERT_TRUE(memory3); + WriteToDiscardableMemory(memory3.get(), kPageSize); + EXPECT_EQ(memory3->Memory(), static_cast<char*>(memory1->Memory()) + size); +} + +} // namespace internal +} // namespace base diff --git a/base/memory/discardable_memory_android.cc b/base/memory/discardable_memory_android.cc index 8fee53e..7e84967 100644 --- a/base/memory/discardable_memory_android.cc +++ b/base/memory/discardable_memory_android.cc @@ -2,39 +2,139 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -#include "base/memory/discardable_memory.h" +#include "base/memory/discardable_memory_android.h" #include <sys/mman.h> +#include <sys/resource.h> +#include <sys/time.h> #include <unistd.h> +#include <limits> + #include "base/basictypes.h" #include "base/compiler_specific.h" #include "base/file_util.h" #include "base/lazy_instance.h" #include "base/logging.h" +#include "base/memory/discardable_memory.h" +#include "base/memory/discardable_memory_allocator_android.h" #include "base/synchronization/lock.h" #include "third_party/ashmem/ashmem.h" namespace base { namespace { -// Protects |g_num_discardable_memory| below. -base::LazyInstance<base::Lock>::Leaky g_discardable_memory_lock = - LAZY_INSTANCE_INITIALIZER; +const size_t kPageSize = 4096; + +const char kAshmemAllocatorName[] = "DiscardableMemoryAllocator"; + +struct GlobalContext { + GlobalContext() + : ashmem_fd_limit(GetSoftFDLimit()), + allocator(kAshmemAllocatorName), + ashmem_fd_count_(0) { + } + + const int ashmem_fd_limit; + internal::DiscardableMemoryAllocator allocator; + Lock lock; + + int ashmem_fd_count() const { + lock.AssertAcquired(); + return ashmem_fd_count_; + } + + void decrement_ashmem_fd_count() { + lock.AssertAcquired(); + --ashmem_fd_count_; + } + + void increment_ashmem_fd_count() { + lock.AssertAcquired(); + ++ashmem_fd_count_; + } + + private: + static int GetSoftFDLimit() { + struct rlimit limit_info; + if (getrlimit(RLIMIT_NOFILE, &limit_info) != 0) + return 128; + // Allow 25% of file descriptor capacity for ashmem. + return limit_info.rlim_cur / 4; + } + + int ashmem_fd_count_; +}; + +LazyInstance<GlobalContext>::Leaky g_context = LAZY_INSTANCE_INITIALIZER; + +// This is the default implementation of DiscardableMemory on Android which is +// used when file descriptor usage is under the soft limit. When file descriptor +// usage gets too high the discardable memory allocator is used instead. See +// ShouldUseAllocator() below for more details. +class DiscardableMemoryAndroidSimple : public DiscardableMemory { + public: + DiscardableMemoryAndroidSimple(int fd, void* address, size_t size) + : fd_(fd), + memory_(address), + size_(size) { + DCHECK_GE(fd_, 0); + DCHECK(memory_); + } + + virtual ~DiscardableMemoryAndroidSimple() { + internal::CloseAshmemRegion(fd_, size_, memory_); + } + + // DiscardableMemory: + virtual LockDiscardableMemoryStatus Lock() OVERRIDE { + return internal::LockAshmemRegion(fd_, 0, size_, memory_); + } + + virtual void Unlock() OVERRIDE { + internal::UnlockAshmemRegion(fd_, 0, size_, memory_); + } + + virtual void* Memory() const OVERRIDE { + return memory_; + } + + private: + const int fd_; + void* const memory_; + const size_t size_; + + DISALLOW_COPY_AND_ASSIGN(DiscardableMemoryAndroidSimple); +}; + +int GetCurrentNumberOfAshmemFDs() { + AutoLock lock(g_context.Get().lock); + return g_context.Get().ashmem_fd_count(); +} -// Total number of discardable memory in the process. -int g_num_discardable_memory = 0; +// Returns whether the provided size can be safely page-aligned (without causing +// an overflow). +bool CheckSizeCanBeAlignedToNextPage(size_t size) { + return size <= std::numeric_limits<size_t>::max() - kPageSize + 1; +} -// Upper limit on the number of discardable memory to avoid hitting file -// descriptor limit. -const int kDiscardableMemoryNumLimit = 128; +} // namespace + +namespace internal { + +size_t AlignToNextPage(size_t size) { + DCHECK_EQ(static_cast<int>(kPageSize), getpagesize()); + DCHECK(CheckSizeCanBeAlignedToNextPage(size)); + const size_t mask = ~(kPageSize - 1); + return (size + kPageSize - 1) & mask; +} bool CreateAshmemRegion(const char* name, size_t size, int* out_fd, void** out_address) { - base::AutoLock lock(g_discardable_memory_lock.Get()); - if (g_num_discardable_memory + 1 > kDiscardableMemoryNumLimit) + AutoLock lock(g_context.Get().lock); + if (g_context.Get().ashmem_fd_count() + 1 > g_context.Get().ashmem_fd_limit) return false; int fd = ashmem_create_region(name, size); if (fd < 0) { @@ -60,15 +160,15 @@ bool CreateAshmemRegion(const char* name, } ignore_result(fd_closer.release()); - ++g_num_discardable_memory; + g_context.Get().increment_ashmem_fd_count(); *out_fd = fd; *out_address = address; return true; } -bool DeleteAshmemRegion(int fd, size_t size, void* address) { - base::AutoLock lock(g_discardable_memory_lock.Get()); - --g_num_discardable_memory; +bool CloseAshmemRegion(int fd, size_t size, void* address) { + AutoLock lock(g_context.Get().lock); + g_context.Get().decrement_ashmem_fd_count(); if (munmap(address, size) == -1) { DPLOG(ERROR) << "Failed to unmap memory."; close(fd); @@ -96,62 +196,54 @@ bool UnlockAshmemRegion(int fd, size_t off, size_t size, const void* address) { return !failed; } -class DiscardableMemoryAndroid : public DiscardableMemory { - public: - DiscardableMemoryAndroid(int fd, void* address, size_t size) - : fd_(fd), - memory_(address), - size_(size) { - DCHECK_GE(fd_, 0); - DCHECK(memory_); - } - - virtual ~DiscardableMemoryAndroid() { - DeleteAshmemRegion(fd_, size_, memory_); - } - - // DiscardableMemory: - virtual LockDiscardableMemoryStatus Lock() OVERRIDE { - return LockAshmemRegion(fd_, 0, size_, memory_); - } - - virtual void Unlock() OVERRIDE { - UnlockAshmemRegion(fd_, 0, size_, memory_); - } - - virtual void* Memory() const OVERRIDE { - return memory_; - } - - private: - const int fd_; - void* const memory_; - const size_t size_; - - DISALLOW_COPY_AND_ASSIGN(DiscardableMemoryAndroid); -}; - -} // namespace +} // namespace internal // static bool DiscardableMemory::SupportedNatively() { return true; } +// Allocation can happen in two ways: +// - Each client-requested allocation is backed by an individual ashmem region. +// This allows deleting ashmem regions individually by closing the ashmem file +// descriptor. This is the default path that is taken when file descriptor usage +// allows us to do so or when the allocation size would require and entire +// ashmem region. +// - Allocations are performed by the global allocator when file descriptor +// usage gets too high. This still allows unpinning but does not allow deleting +// (i.e. releasing the physical pages backing) individual regions. +// +// TODO(pliard): consider tuning the size threshold used below. For instance we +// might want to make it a fraction of kMinAshmemRegionSize and also +// systematically have small allocations go through the allocator to let big +// allocations systematically go through individual ashmem regions. +// // static scoped_ptr<DiscardableMemory> DiscardableMemory::CreateLockedMemory( size_t size) { + if (!CheckSizeCanBeAlignedToNextPage(size)) + return scoped_ptr<DiscardableMemory>(); // Pinning & unpinning works with page granularity therefore align the size // upfront. - const size_t kPageSize = 4096; - const size_t mask = ~(kPageSize - 1); - size = (size + kPageSize - 1) & mask; - int fd; - void* address; - if (!CreateAshmemRegion("", size, &fd, &address)) - return scoped_ptr<DiscardableMemory>(); - return scoped_ptr<DiscardableMemory>( - new DiscardableMemoryAndroid(fd, address, size)); + const size_t aligned_size = internal::AlignToNextPage(size); + // Note that the following code is slightly racy. The worst that can happen in + // practice though is taking the wrong decision (e.g. using the allocator + // rather than DiscardableMemoryAndroidSimple). Moreover keeping the lock + // acquired for the whole allocation would cause a deadlock when the allocator + // tries to create an ashmem region. + const size_t kAllocatorRegionSize = + internal::DiscardableMemoryAllocator::kMinAshmemRegionSize; + GlobalContext* const global_context = g_context.Pointer(); + if (aligned_size >= kAllocatorRegionSize || + GetCurrentNumberOfAshmemFDs() < 0.9 * global_context->ashmem_fd_limit) { + int fd; + void* address; + if (internal::CreateAshmemRegion("", aligned_size, &fd, &address)) { + return scoped_ptr<DiscardableMemory>( + new DiscardableMemoryAndroidSimple(fd, address, aligned_size)); + } + } + return global_context->allocator.Allocate(size); } // static diff --git a/base/memory/discardable_memory_android.h b/base/memory/discardable_memory_android.h new file mode 100644 index 0000000..9db78b3 --- /dev/null +++ b/base/memory/discardable_memory_android.h @@ -0,0 +1,37 @@ +// Copyright 2013 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Please use discardable_memory.h since this is just an internal file providing +// utility functions used both by discardable_memory_android.cc and +// discardable_memory_allocator_android.cc. + +#ifndef BASE_MEMORY_DISCARDABLE_MEMORY_ANDROID_H_ +#define BASE_MEMORY_DISCARDABLE_MEMORY_ANDROID_H_ + +#include "base/basictypes.h" +#include "base/memory/discardable_memory.h" + +namespace base { +namespace internal { + +size_t AlignToNextPage(size_t size); + +bool CreateAshmemRegion(const char* name, size_t size, int* fd, void** address); + +bool CloseAshmemRegion(int fd, size_t size, void* address); + +LockDiscardableMemoryStatus LockAshmemRegion(int fd, + size_t offset, + size_t size, + const void* address); + +bool UnlockAshmemRegion(int fd, + size_t offset, + size_t size, + const void* address); + +} // namespace internal +} // namespace base + +#endif // BASE_MEMORY_DISCARDABLE_MEMORY_ANDROID_H_ diff --git a/base/memory/discardable_memory_unittest.cc b/base/memory/discardable_memory_unittest.cc index eb730f1..c9f67b2 100644 --- a/base/memory/discardable_memory_unittest.cc +++ b/base/memory/discardable_memory_unittest.cc @@ -3,12 +3,29 @@ // found in the LICENSE file. #include "base/memory/discardable_memory.h" + +#include <limits> + #include "testing/gtest/include/gtest/gtest.h" namespace base { const size_t kSize = 1024; +#if defined(OS_ANDROID) +TEST(DiscardableMemoryTest, TooLargeAllocationFails) { + const size_t kPageSize = 4096; + const size_t max_allowed_allocation_size = + std::numeric_limits<size_t>::max() - kPageSize + 1; + scoped_ptr<DiscardableMemory> memory( + DiscardableMemory::CreateLockedMemory(max_allowed_allocation_size + 1)); + // On certain platforms (e.g. Android), page-alignment would have caused an + // overflow resulting in a small allocation if the input size wasn't checked + // correctly. + ASSERT_FALSE(memory); +} +#endif + TEST(DiscardableMemoryTest, SupportedNatively) { #if defined(DISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY) ASSERT_TRUE(DiscardableMemory::SupportedNatively()); |