diff options
author | pliard@chromium.org <pliard@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2013-12-26 18:12:22 +0000 |
---|---|---|
committer | pliard@chromium.org <pliard@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2013-12-26 18:12:22 +0000 |
commit | 1f48f758c388b0f402e5c166a4d63b7460c7dfa4 (patch) | |
tree | 2519b5a544d771f78c5a5ba0ea329453b34e43f0 /base | |
parent | 7935926e17379c232cef3a30a14684c704dc55de (diff) | |
download | chromium_src-1f48f758c388b0f402e5c166a4d63b7460c7dfa4.zip chromium_src-1f48f758c388b0f402e5c166a4d63b7460c7dfa4.tar.gz chromium_src-1f48f758c388b0f402e5c166a4d63b7460c7dfa4.tar.bz2 |
Always use the ashmem allocator when creating DiscardableMemory instances.
Experiments and measurements showed that the possibly accumulated discardable
external fragmentation caused by the allocator's free chunks doesn't matter
much in practice.
The fact that (unpinned) free chunks could be contributing to private dirty was
the main reason to use the allocator only as a fallback (when file descriptor
usage gets too high). However experimentation showed that unpinned ashmem
eviction happened without degrading user experience (i.e. without killing other
apps nor causing jankiness).
This significantly simplifies the DiscardableMemory creation path on Android
and should provide a speed boost too (10x on Nexus10) in addition to the fact
that ashmem creation + mmap() appeared to have very unpredictable performance.
Review URL: https://codereview.chromium.org/98213004
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@242544 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'base')
-rw-r--r-- | base/memory/discardable_memory_allocator_android.cc | 2 | ||||
-rw-r--r-- | base/memory/discardable_memory_allocator_android_unittest.cc | 15 | ||||
-rw-r--r-- | base/memory/discardable_memory_android.cc | 141 | ||||
-rw-r--r-- | base/memory/discardable_memory_android.h | 1 | ||||
-rw-r--r-- | base/memory/discardable_memory_unittest.cc | 14 |
5 files changed, 28 insertions, 145 deletions
diff --git a/base/memory/discardable_memory_allocator_android.cc b/base/memory/discardable_memory_allocator_android.cc index a0a3aff..ceb49151 100644 --- a/base/memory/discardable_memory_allocator_android.cc +++ b/base/memory/discardable_memory_allocator_android.cc @@ -383,6 +383,8 @@ DiscardableMemoryAllocator::~DiscardableMemoryAllocator() { scoped_ptr<DiscardableMemory> DiscardableMemoryAllocator::Allocate( size_t size) { const size_t aligned_size = internal::AlignToNextPage(size); + if (!aligned_size) + return scoped_ptr<DiscardableMemory>(); // TODO(pliard): make this function less naive by e.g. moving the free chunks // multiset to the allocator itself in order to decrease even more // fragmentation/speedup allocation. Note that there should not be more than a diff --git a/base/memory/discardable_memory_allocator_android_unittest.cc b/base/memory/discardable_memory_allocator_android_unittest.cc index ae95d5d..d800598 100644 --- a/base/memory/discardable_memory_allocator_android_unittest.cc +++ b/base/memory/discardable_memory_allocator_android_unittest.cc @@ -46,6 +46,21 @@ TEST_F(DiscardableMemoryAllocatorTest, Basic) { WriteToDiscardableMemory(memory.get(), size); } +TEST_F(DiscardableMemoryAllocatorTest, ZeroAllocationIsNotSupported) { + scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(0)); + ASSERT_FALSE(memory); +} + +TEST_F(DiscardableMemoryAllocatorTest, TooLargeAllocationFails) { + const size_t max_allowed_allocation_size = + std::numeric_limits<size_t>::max() - kPageSize + 1; + scoped_ptr<DiscardableMemory> memory( + allocator_.Allocate(max_allowed_allocation_size + 1)); + // Page-alignment would have caused an overflow resulting in a small + // allocation if the input size wasn't checked correctly. + ASSERT_FALSE(memory); +} + TEST_F(DiscardableMemoryAllocatorTest, AshmemRegionsAreNotSmallerThanRequestedSize) { const size_t size = std::numeric_limits<size_t>::max() - kPageSize + 1; diff --git a/base/memory/discardable_memory_android.cc b/base/memory/discardable_memory_android.cc index 519aa8d..9f4f159 100644 --- a/base/memory/discardable_memory_android.cc +++ b/base/memory/discardable_memory_android.cc @@ -5,8 +5,6 @@ #include "base/memory/discardable_memory_android.h" #include <sys/mman.h> -#include <sys/resource.h> -#include <sys/time.h> #include <unistd.h> #include <limits> @@ -19,52 +17,22 @@ #include "base/logging.h" #include "base/memory/discardable_memory.h" #include "base/memory/discardable_memory_allocator_android.h" -#include "base/synchronization/lock.h" #include "third_party/ashmem/ashmem.h" namespace base { namespace { -const size_t kPageSize = 4096; - const char kAshmemAllocatorName[] = "DiscardableMemoryAllocator"; -struct GlobalContext { - GlobalContext() - : ashmem_fd_limit(GetSoftFDLimit()), - allocator(kAshmemAllocatorName, - GetOptimalAshmemRegionSizeForAllocator()), - ashmem_fd_count_(0) { +struct DiscardableMemoryAllocatorWrapper { + DiscardableMemoryAllocatorWrapper() + : allocator(kAshmemAllocatorName, + GetOptimalAshmemRegionSizeForAllocator()) { } - const int ashmem_fd_limit; internal::DiscardableMemoryAllocator allocator; - Lock lock; - - int ashmem_fd_count() const { - lock.AssertAcquired(); - return ashmem_fd_count_; - } - - void decrement_ashmem_fd_count() { - lock.AssertAcquired(); - --ashmem_fd_count_; - } - - void increment_ashmem_fd_count() { - lock.AssertAcquired(); - ++ashmem_fd_count_; - } private: - static int GetSoftFDLimit() { - struct rlimit limit_info; - if (getrlimit(RLIMIT_NOFILE, &limit_info) != 0) - return 128; - // Allow 25% of file descriptor capacity for ashmem. - return limit_info.rlim_cur / 4; - } - // Returns 64 MBytes for a 512 MBytes device, 128 MBytes for 1024 MBytes... static size_t GetOptimalAshmemRegionSizeForAllocator() { // Note that this may do some I/O (without hitting the disk though) so it @@ -72,69 +40,20 @@ struct GlobalContext { return internal::AlignToNextPage( base::android::SysUtils::AmountOfPhysicalMemoryKB() * 1024 / 8); } - - int ashmem_fd_count_; -}; - -LazyInstance<GlobalContext>::Leaky g_context = LAZY_INSTANCE_INITIALIZER; - -// This is the default implementation of DiscardableMemory on Android which is -// used when file descriptor usage is under the soft limit. When file descriptor -// usage gets too high the discardable memory allocator is used instead. See -// ShouldUseAllocator() below for more details. -class DiscardableMemoryAndroidSimple : public DiscardableMemory { - public: - DiscardableMemoryAndroidSimple(int fd, void* address, size_t size) - : fd_(fd), - memory_(address), - size_(size) { - DCHECK_GE(fd_, 0); - DCHECK(memory_); - } - - virtual ~DiscardableMemoryAndroidSimple() { - internal::CloseAshmemRegion(fd_, size_, memory_); - } - - // DiscardableMemory: - virtual LockDiscardableMemoryStatus Lock() OVERRIDE { - return internal::LockAshmemRegion(fd_, 0, size_, memory_); - } - - virtual void Unlock() OVERRIDE { - internal::UnlockAshmemRegion(fd_, 0, size_, memory_); - } - - virtual void* Memory() const OVERRIDE { - return memory_; - } - - private: - const int fd_; - void* const memory_; - const size_t size_; - - DISALLOW_COPY_AND_ASSIGN(DiscardableMemoryAndroidSimple); }; -int GetCurrentNumberOfAshmemFDs() { - AutoLock lock(g_context.Get().lock); - return g_context.Get().ashmem_fd_count(); -} - -// Returns whether the provided size can be safely page-aligned (without causing -// an overflow). -bool CheckSizeCanBeAlignedToNextPage(size_t size) { - return size <= std::numeric_limits<size_t>::max() - kPageSize + 1; -} +LazyInstance<DiscardableMemoryAllocatorWrapper>::Leaky g_context = + LAZY_INSTANCE_INITIALIZER; } // namespace namespace internal { size_t AlignToNextPage(size_t size) { + const size_t kPageSize = 4096; DCHECK_EQ(static_cast<int>(kPageSize), getpagesize()); - DCHECK(CheckSizeCanBeAlignedToNextPage(size)); + if (size > std::numeric_limits<size_t>::max() - kPageSize + 1) + return 0; const size_t mask = ~(kPageSize - 1); return (size + kPageSize - 1) & mask; } @@ -143,9 +62,6 @@ bool CreateAshmemRegion(const char* name, size_t size, int* out_fd, void** out_address) { - AutoLock lock(g_context.Get().lock); - if (g_context.Get().ashmem_fd_count() + 1 > g_context.Get().ashmem_fd_limit) - return false; int fd = ashmem_create_region(name, size); if (fd < 0) { DLOG(ERROR) << "ashmem_create_region() failed"; @@ -170,15 +86,12 @@ bool CreateAshmemRegion(const char* name, } ignore_result(fd_closer.release()); - g_context.Get().increment_ashmem_fd_count(); *out_fd = fd; *out_address = address; return true; } bool CloseAshmemRegion(int fd, size_t size, void* address) { - AutoLock lock(g_context.Get().lock); - g_context.Get().decrement_ashmem_fd_count(); if (munmap(address, size) == -1) { DPLOG(ERROR) << "Failed to unmap memory."; close(fd); @@ -213,44 +126,10 @@ bool DiscardableMemory::SupportedNatively() { return true; } -// Allocation can happen in two ways: -// - Each client-requested allocation is backed by an individual ashmem region. -// This allows deleting ashmem regions individually by closing the ashmem file -// descriptor. This is the default path that is taken when file descriptor usage -// allows us to do so or when the allocation size would require and entire -// ashmem region. -// - Allocations are performed by the global allocator when file descriptor -// usage gets too high. This still allows unpinning but does not allow deleting -// (i.e. releasing the physical pages backing) individual regions. -// -// TODO(pliard): consider tuning the size threshold used below. For instance we -// might want to make it a fraction of kMinAshmemRegionSize and also -// systematically have small allocations go through the allocator to let big -// allocations systematically go through individual ashmem regions. -// // static scoped_ptr<DiscardableMemory> DiscardableMemory::CreateLockedMemory( size_t size) { - if (!CheckSizeCanBeAlignedToNextPage(size)) - return scoped_ptr<DiscardableMemory>(); - // Pinning & unpinning works with page granularity therefore align the size - // upfront. - const size_t aligned_size = internal::AlignToNextPage(size); - // Note that the following code is slightly racy. The worst that can happen in - // practice though is taking the wrong decision (e.g. using the allocator - // rather than DiscardableMemoryAndroidSimple). Moreover keeping the lock - // acquired for the whole allocation would cause a deadlock when the allocator - // tries to create an ashmem region. - GlobalContext* const global_context = g_context.Pointer(); - if (GetCurrentNumberOfAshmemFDs() < 0.9 * global_context->ashmem_fd_limit) { - int fd; - void* address; - if (internal::CreateAshmemRegion("", aligned_size, &fd, &address)) { - return scoped_ptr<DiscardableMemory>( - new DiscardableMemoryAndroidSimple(fd, address, aligned_size)); - } - } - return global_context->allocator.Allocate(size); + return g_context.Pointer()->allocator.Allocate(size); } // static diff --git a/base/memory/discardable_memory_android.h b/base/memory/discardable_memory_android.h index 9db78b3..0f25dff 100644 --- a/base/memory/discardable_memory_android.h +++ b/base/memory/discardable_memory_android.h @@ -15,6 +15,7 @@ namespace base { namespace internal { +// Returns 0 if the provided size is too high to be aligned. size_t AlignToNextPage(size_t size); bool CreateAshmemRegion(const char* name, size_t size, int* fd, void** address); diff --git a/base/memory/discardable_memory_unittest.cc b/base/memory/discardable_memory_unittest.cc index c9f67b2..0b67743 100644 --- a/base/memory/discardable_memory_unittest.cc +++ b/base/memory/discardable_memory_unittest.cc @@ -12,20 +12,6 @@ namespace base { const size_t kSize = 1024; -#if defined(OS_ANDROID) -TEST(DiscardableMemoryTest, TooLargeAllocationFails) { - const size_t kPageSize = 4096; - const size_t max_allowed_allocation_size = - std::numeric_limits<size_t>::max() - kPageSize + 1; - scoped_ptr<DiscardableMemory> memory( - DiscardableMemory::CreateLockedMemory(max_allowed_allocation_size + 1)); - // On certain platforms (e.g. Android), page-alignment would have caused an - // overflow resulting in a small allocation if the input size wasn't checked - // correctly. - ASSERT_FALSE(memory); -} -#endif - TEST(DiscardableMemoryTest, SupportedNatively) { #if defined(DISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY) ASSERT_TRUE(DiscardableMemory::SupportedNatively()); |