diff options
author | pliard@chromium.org <pliard@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2014-01-16 15:54:41 +0000 |
---|---|---|
committer | pliard@chromium.org <pliard@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2014-01-16 15:54:41 +0000 |
commit | 55b5a578666805909fbbb62e5cc9a97954c27a8b (patch) | |
tree | 93ef7cacefa9189d9ff091fd9276a319d61950c6 /base | |
parent | 058548ba72d1c4464a2db0aca4213c0bc9969881 (diff) | |
download | chromium_src-55b5a578666805909fbbb62e5cc9a97954c27a8b.zip chromium_src-55b5a578666805909fbbb62e5cc9a97954c27a8b.tar.gz chromium_src-55b5a578666805909fbbb62e5cc9a97954c27a8b.tar.bz2 |
Align ashmem region sizes to page size after ashmem creation failed.
When ashmem creation failed the new size (twice smaller) used to retry wasn't
page-aligned.
This also moves the page alignment function to the allocator's anonymous
namespace since it's now an implementation detail (since the allocator is
always used).
BUG=331832
Review URL: https://codereview.chromium.org/136723005
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@245201 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'base')
-rw-r--r-- | base/memory/discardable_memory_allocator_android.cc | 27 | ||||
-rw-r--r-- | base/memory/discardable_memory_allocator_android.h | 9 | ||||
-rw-r--r-- | base/memory/discardable_memory_allocator_android_unittest.cc | 22 | ||||
-rw-r--r-- | base/memory/discardable_memory_android.cc | 12 | ||||
-rw-r--r-- | base/memory/discardable_memory_android.h | 3 |
5 files changed, 48 insertions, 25 deletions
diff --git a/base/memory/discardable_memory_allocator_android.cc b/base/memory/discardable_memory_allocator_android.cc index a2b823f..7ca5e7d 100644 --- a/base/memory/discardable_memory_allocator_android.cc +++ b/base/memory/discardable_memory_allocator_android.cc @@ -46,6 +46,16 @@ const size_t kMaxChunkFragmentationBytes = 4096 - 1; const size_t kMinAshmemRegionSize = 32 * 1024 * 1024; +// Returns 0 if the provided size is too high to be aligned. +size_t AlignToNextPage(size_t size) { + const size_t kPageSize = 4096; + DCHECK_EQ(static_cast<int>(kPageSize), getpagesize()); + if (size > std::numeric_limits<size_t>::max() - kPageSize + 1) + return 0; + const size_t mask = ~(kPageSize - 1); + return (size + kPageSize - 1) & mask; +} + } // namespace namespace internal { @@ -106,7 +116,7 @@ class DiscardableMemoryAllocator::AshmemRegion { size_t size, const std::string& name, DiscardableMemoryAllocator* allocator) { - DCHECK_EQ(size, internal::AlignToNextPage(size)); + DCHECK_EQ(size, AlignToNextPage(size)); int fd; void* base; if (!internal::CreateAshmemRegion(name.c_str(), size, &fd, &base)) @@ -371,7 +381,9 @@ DiscardableMemoryAllocator::DiscardableMemoryAllocator( const std::string& name, size_t ashmem_region_size) : name_(name), - ashmem_region_size_(std::max(kMinAshmemRegionSize, ashmem_region_size)) { + ashmem_region_size_( + std::max(kMinAshmemRegionSize, AlignToNextPage(ashmem_region_size))), + last_ashmem_region_size_(0) { DCHECK_GE(ashmem_region_size_, kMinAshmemRegionSize); } @@ -382,7 +394,7 @@ DiscardableMemoryAllocator::~DiscardableMemoryAllocator() { scoped_ptr<DiscardableMemory> DiscardableMemoryAllocator::Allocate( size_t size) { - const size_t aligned_size = internal::AlignToNextPage(size); + const size_t aligned_size = AlignToNextPage(size); if (!aligned_size) return scoped_ptr<DiscardableMemory>(); // TODO(pliard): make this function less naive by e.g. moving the free chunks @@ -403,11 +415,13 @@ scoped_ptr<DiscardableMemory> DiscardableMemoryAllocator::Allocate( // repetitively dividing the size by 2. const size_t min_region_size = std::max(kMinAshmemRegionSize, aligned_size); for (size_t region_size = std::max(ashmem_region_size_, aligned_size); - region_size >= min_region_size; region_size /= 2) { + region_size >= min_region_size; + region_size = AlignToNextPage(region_size / 2)) { scoped_ptr<AshmemRegion> new_region( AshmemRegion::Create(region_size, name_.c_str(), this)); if (!new_region) continue; + last_ashmem_region_size_ = region_size; ashmem_regions_.push_back(new_region.release()); return ashmem_regions_.back()->Allocate_Locked(size, aligned_size); } @@ -415,6 +429,11 @@ scoped_ptr<DiscardableMemory> DiscardableMemoryAllocator::Allocate( return scoped_ptr<DiscardableMemory>(); } +size_t DiscardableMemoryAllocator::last_ashmem_region_size() const { + AutoLock auto_lock(lock_); + return last_ashmem_region_size_; +} + void DiscardableMemoryAllocator::DeleteAshmemRegion_Locked( AshmemRegion* region) { lock_.AssertAcquired(); diff --git a/base/memory/discardable_memory_allocator_android.h b/base/memory/discardable_memory_allocator_android.h index 7ced405..eea57fb 100644 --- a/base/memory/discardable_memory_allocator_android.h +++ b/base/memory/discardable_memory_allocator_android.h @@ -44,16 +44,21 @@ class BASE_EXPORT_PRIVATE DiscardableMemoryAllocator { // instance. scoped_ptr<DiscardableMemory> Allocate(size_t size); + // Returns the size of the last ashmem region which was created. This is used + // for testing only. + size_t last_ashmem_region_size() const; + private: class AshmemRegion; class DiscardableAshmemChunk; void DeleteAshmemRegion_Locked(AshmemRegion* region); - base::ThreadChecker thread_checker_; + ThreadChecker thread_checker_; const std::string name_; const size_t ashmem_region_size_; - base::Lock lock_; + mutable Lock lock_; + size_t last_ashmem_region_size_; ScopedVector<AshmemRegion> ashmem_regions_; DISALLOW_COPY_AND_ASSIGN(DiscardableMemoryAllocator); diff --git a/base/memory/discardable_memory_allocator_android_unittest.cc b/base/memory/discardable_memory_allocator_android_unittest.cc index d800598..1da510d 100644 --- a/base/memory/discardable_memory_allocator_android_unittest.cc +++ b/base/memory/discardable_memory_allocator_android_unittest.cc @@ -23,6 +23,9 @@ const char kAllocatorName[] = "allocator-for-testing"; const size_t kAshmemRegionSizeForTesting = 32 * 1024 * 1024; const size_t kPageSize = 4096; +const size_t kMaxAllowedAllocationSize = + std::numeric_limits<size_t>::max() - kPageSize + 1; + class DiscardableMemoryAllocatorTest : public testing::Test { protected: DiscardableMemoryAllocatorTest() @@ -52,10 +55,8 @@ TEST_F(DiscardableMemoryAllocatorTest, ZeroAllocationIsNotSupported) { } TEST_F(DiscardableMemoryAllocatorTest, TooLargeAllocationFails) { - const size_t max_allowed_allocation_size = - std::numeric_limits<size_t>::max() - kPageSize + 1; scoped_ptr<DiscardableMemory> memory( - allocator_.Allocate(max_allowed_allocation_size + 1)); + allocator_.Allocate(kMaxAllowedAllocationSize + 1)); // Page-alignment would have caused an overflow resulting in a small // allocation if the input size wasn't checked correctly. ASSERT_FALSE(memory); @@ -63,17 +64,28 @@ TEST_F(DiscardableMemoryAllocatorTest, TooLargeAllocationFails) { TEST_F(DiscardableMemoryAllocatorTest, AshmemRegionsAreNotSmallerThanRequestedSize) { - const size_t size = std::numeric_limits<size_t>::max() - kPageSize + 1; // The creation of the underlying ashmem region is expected to fail since // there should not be enough room in the address space. When ashmem creation // fails, the allocator repetitively retries by dividing the size by 2. This // size should not be smaller than the size the user requested so the // allocation here should just fail (and not succeed with the minimum ashmem // region size). - scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(size)); + scoped_ptr<DiscardableMemory> memory( + allocator_.Allocate(kMaxAllowedAllocationSize)); ASSERT_FALSE(memory); } +TEST_F(DiscardableMemoryAllocatorTest, AshmemRegionsAreAlwaysPageAligned) { + // Use a separate allocator here so that we can override the ashmem region + // size. + DiscardableMemoryAllocator allocator( + kAllocatorName, kMaxAllowedAllocationSize); + scoped_ptr<DiscardableMemory> memory(allocator.Allocate(kPageSize)); + ASSERT_TRUE(memory); + EXPECT_GT(kMaxAllowedAllocationSize, allocator.last_ashmem_region_size()); + ASSERT_TRUE(allocator.last_ashmem_region_size() % kPageSize == 0); +} + TEST_F(DiscardableMemoryAllocatorTest, LargeAllocation) { // Note that large allocations should just use DiscardableMemoryAndroidSimple // instead. diff --git a/base/memory/discardable_memory_android.cc b/base/memory/discardable_memory_android.cc index 6f53144..2004a36 100644 --- a/base/memory/discardable_memory_android.cc +++ b/base/memory/discardable_memory_android.cc @@ -37,8 +37,7 @@ struct DiscardableMemoryAllocatorWrapper { static size_t GetOptimalAshmemRegionSizeForAllocator() { // Note that this may do some I/O (without hitting the disk though) so it // should not be called on the critical path. - return internal::AlignToNextPage( - base::android::SysUtils::AmountOfPhysicalMemoryKB() * 1024 / 8); + return base::android::SysUtils::AmountOfPhysicalMemoryKB() * 1024 / 8; } }; @@ -49,15 +48,6 @@ LazyInstance<DiscardableMemoryAllocatorWrapper>::Leaky g_context = namespace internal { -size_t AlignToNextPage(size_t size) { - const size_t kPageSize = 4096; - DCHECK_EQ(static_cast<int>(kPageSize), getpagesize()); - if (size > std::numeric_limits<size_t>::max() - kPageSize + 1) - return 0; - const size_t mask = ~(kPageSize - 1); - return (size + kPageSize - 1) & mask; -} - bool CreateAshmemRegion(const char* name, size_t size, int* out_fd, diff --git a/base/memory/discardable_memory_android.h b/base/memory/discardable_memory_android.h index 07c2e37..28b256d 100644 --- a/base/memory/discardable_memory_android.h +++ b/base/memory/discardable_memory_android.h @@ -15,9 +15,6 @@ namespace base { namespace internal { -// Returns 0 if the provided size is too high to be aligned. -size_t AlignToNextPage(size_t size); - bool CreateAshmemRegion(const char* name, size_t size, int* fd, void** address); bool CloseAshmemRegion(int fd, size_t size, void* address); |