summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorpliard@chromium.org <pliard@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2014-04-30 10:54:38 +0000
committerpliard@chromium.org <pliard@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2014-04-30 10:54:38 +0000
commit6b7e9b2dcf8f11f009c9e30fd28819dbd93ad000 (patch)
treed69ce3302d5be1b70d10cd6797abfec0e550166c
parent15d986badba741679d157658c0b6f2130095f35c (diff)
downloadchromium_src-6b7e9b2dcf8f11f009c9e30fd28819dbd93ad000.zip
chromium_src-6b7e9b2dcf8f11f009c9e30fd28819dbd93ad000.tar.gz
chromium_src-6b7e9b2dcf8f11f009c9e30fd28819dbd93ad000.tar.bz2
Use DiscardableMemoryManager on Android.
This allows userspace (DiscardableMemoryManager) to control eviction of unlocked DiscardableMemory instances to prevent the process from running out of address space in cases of heavy use of unlocked DiscardableMemory. This also removes all the occurences of 'Android' from the ashmem allocator to allow it to be later used on ChromeOS. BUG=327516, 334996 R=reveman@chromium.org, willchan@chromium.org Review URL: https://codereview.chromium.org/195863005 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@267170 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r--base/base.gyp6
-rw-r--r--base/base.gypi10
-rw-r--r--base/memory/discardable_memory.cc2
-rw-r--r--base/memory/discardable_memory.h6
-rw-r--r--base/memory/discardable_memory_allocator_android.h70
-rw-r--r--base/memory/discardable_memory_android.cc42
-rw-r--r--base/memory/discardable_memory_ashmem.cc75
-rw-r--r--base/memory/discardable_memory_ashmem.h55
-rw-r--r--base/memory/discardable_memory_ashmem_allocator.cc (renamed from base/memory/discardable_memory_allocator_android.cc)152
-rw-r--r--base/memory/discardable_memory_ashmem_allocator.h93
-rw-r--r--base/memory/discardable_memory_ashmem_allocator_unittest.cc (renamed from base/memory/discardable_memory_allocator_android_unittest.cc)154
-rw-r--r--base/memory/discardable_memory_emulated.cc11
-rw-r--r--base/memory/discardable_memory_emulated.h5
-rw-r--r--base/memory/discardable_memory_linux.cc7
-rw-r--r--base/memory/discardable_memory_mac.cc7
-rw-r--r--base/memory/discardable_memory_manager.cc2
-rw-r--r--base/memory/discardable_memory_manager.h2
-rw-r--r--base/memory/discardable_memory_manager_unittest.cc2
-rw-r--r--base/memory/discardable_memory_unittest.cc6
-rw-r--r--base/memory/discardable_memory_win.cc7
20 files changed, 428 insertions, 286 deletions
diff --git a/base/base.gyp b/base/base.gyp
index f2151b3..06ad10b 100644
--- a/base/base.gyp
+++ b/base/base.gyp
@@ -474,7 +474,6 @@
'mac/scoped_sending_event_unittest.mm',
'md5_unittest.cc',
'memory/aligned_memory_unittest.cc',
- 'memory/discardable_memory_allocator_android_unittest.cc',
'memory/discardable_memory_manager_unittest.cc',
'memory/discardable_memory_unittest.cc',
'memory/linked_ptr_unittest.cc',
@@ -756,6 +755,11 @@
['include', '^sys_string_conversions_mac_unittest\\.mm$'],
],
}],
+ ['OS == "android" and _toolset == "target"', {
+ 'sources': [
+ 'memory/discardable_memory_ashmem_allocator_unittest.cc',
+ ],
+ }],
['OS == "android"', {
'sources/': [
['include', '^debug/proc_maps_linux_unittest\\.cc$'],
diff --git a/base/base.gypi b/base/base.gypi
index 09a3afa..1767014 100644
--- a/base/base.gypi
+++ b/base/base.gypi
@@ -304,8 +304,6 @@
'memory/aligned_memory.h',
'memory/discardable_memory.cc',
'memory/discardable_memory.h',
- 'memory/discardable_memory_allocator_android.cc',
- 'memory/discardable_memory_allocator_android.h',
'memory/discardable_memory_android.cc',
'memory/discardable_memory_emulated.cc',
'memory/discardable_memory_emulated.h',
@@ -770,6 +768,14 @@
['include', '^threading/platform_thread_linux\\.cc$'],
],
}],
+ ['OS == "android" and _toolset == "target"', {
+ 'sources': [
+ 'memory/discardable_memory_ashmem_allocator.cc',
+ 'memory/discardable_memory_ashmem_allocator.h',
+ 'memory/discardable_memory_ashmem.cc',
+ 'memory/discardable_memory_ashmem.h',
+ ],
+ }],
['OS == "android" and >(nacl_untrusted_build)==0', {
'sources!': [
'base_paths_posix.cc',
diff --git a/base/memory/discardable_memory.cc b/base/memory/discardable_memory.cc
index 02fc5a7..9ba47aa 100644
--- a/base/memory/discardable_memory.cc
+++ b/base/memory/discardable_memory.cc
@@ -14,7 +14,7 @@ const struct TypeNamePair {
DiscardableMemoryType type;
const char* name;
} kTypeNamePairs[] = {
- { DISCARDABLE_MEMORY_TYPE_ANDROID, "android" },
+ { DISCARDABLE_MEMORY_TYPE_ASHMEM, "ashmem" },
{ DISCARDABLE_MEMORY_TYPE_MAC, "mac" },
{ DISCARDABLE_MEMORY_TYPE_EMULATED, "emulated" },
{ DISCARDABLE_MEMORY_TYPE_MALLOC, "malloc" }
diff --git a/base/memory/discardable_memory.h b/base/memory/discardable_memory.h
index 53d8d0e..9189b5e 100644
--- a/base/memory/discardable_memory.h
+++ b/base/memory/discardable_memory.h
@@ -17,7 +17,7 @@ namespace base {
enum DiscardableMemoryType {
DISCARDABLE_MEMORY_TYPE_NONE,
- DISCARDABLE_MEMORY_TYPE_ANDROID,
+ DISCARDABLE_MEMORY_TYPE_ASHMEM,
DISCARDABLE_MEMORY_TYPE_MAC,
DISCARDABLE_MEMORY_TYPE_EMULATED,
DISCARDABLE_MEMORY_TYPE_MALLOC
@@ -115,10 +115,6 @@ class BASE_EXPORT DiscardableMemory {
// Testing utility calls.
- // Check whether a purge of all discardable memory in the system is supported.
- // Use only for testing!
- static bool PurgeForTestingSupported();
-
// Purge all discardable memory in the system. This call has global effects
// across all running processes, so it should only be used for testing!
static void PurgeForTesting();
diff --git a/base/memory/discardable_memory_allocator_android.h b/base/memory/discardable_memory_allocator_android.h
deleted file mode 100644
index eea57fb..0000000
--- a/base/memory/discardable_memory_allocator_android.h
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MEMORY_DISCARDABLE_MEMORY_ALLOCATOR_H_
-#define BASE_MEMORY_DISCARDABLE_MEMORY_ALLOCATOR_H_
-
-#include <string>
-
-#include "base/base_export.h"
-#include "base/basictypes.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/memory/scoped_vector.h"
-#include "base/synchronization/lock.h"
-#include "base/threading/thread_checker.h"
-
-namespace base {
-
-class DiscardableMemory;
-
-namespace internal {
-
-// On Android ashmem is used to implement discardable memory. It is backed by a
-// file (descriptor) thus is a limited resource. This allocator minimizes the
-// problem by allocating large ashmem regions internally and returning smaller
-// chunks to the client.
-// Allocated chunks are systematically aligned on a page boundary therefore this
-// allocator should not be used for small allocations.
-//
-// Threading: The allocator must be deleted on the thread it was constructed on
-// although its Allocate() method can be invoked on any thread. See
-// discardable_memory.h for DiscardableMemory's threading guarantees.
-class BASE_EXPORT_PRIVATE DiscardableMemoryAllocator {
- public:
- // Note that |name| is only used for debugging/measurement purposes.
- // |ashmem_region_size| is the size that will be used to create the underlying
- // ashmem regions and is expected to be greater or equal than 32 MBytes.
- DiscardableMemoryAllocator(const std::string& name,
- size_t ashmem_region_size);
-
- ~DiscardableMemoryAllocator();
-
- // Note that the allocator must outlive the returned DiscardableMemory
- // instance.
- scoped_ptr<DiscardableMemory> Allocate(size_t size);
-
- // Returns the size of the last ashmem region which was created. This is used
- // for testing only.
- size_t last_ashmem_region_size() const;
-
- private:
- class AshmemRegion;
- class DiscardableAshmemChunk;
-
- void DeleteAshmemRegion_Locked(AshmemRegion* region);
-
- ThreadChecker thread_checker_;
- const std::string name_;
- const size_t ashmem_region_size_;
- mutable Lock lock_;
- size_t last_ashmem_region_size_;
- ScopedVector<AshmemRegion> ashmem_regions_;
-
- DISALLOW_COPY_AND_ASSIGN(DiscardableMemoryAllocator);
-};
-
-} // namespace internal
-} // namespace base
-
-#endif // BASE_MEMORY_DISCARDABLE_MEMORY_ALLOCATOR_H_
diff --git a/base/memory/discardable_memory_android.cc b/base/memory/discardable_memory_android.cc
index fa89c18..9043906 100644
--- a/base/memory/discardable_memory_android.cc
+++ b/base/memory/discardable_memory_android.cc
@@ -9,22 +9,30 @@
#include "base/compiler_specific.h"
#include "base/lazy_instance.h"
#include "base/logging.h"
-#include "base/memory/discardable_memory_allocator_android.h"
+#include "base/memory/discardable_memory_ashmem.h"
+#include "base/memory/discardable_memory_ashmem_allocator.h"
#include "base/memory/discardable_memory_emulated.h"
#include "base/memory/discardable_memory_malloc.h"
namespace base {
namespace {
-const char kAshmemAllocatorName[] = "DiscardableMemoryAllocator";
+const char kAshmemAllocatorName[] = "DiscardableMemoryAshmemAllocator";
-struct DiscardableMemoryAllocatorWrapper {
- DiscardableMemoryAllocatorWrapper()
+// When ashmem is used, have the DiscardableMemoryManager trigger userspace
+// eviction when address space usage gets too high (e.g. 512 MBytes).
+const size_t kAshmemMaxAddressSpaceUsage = 512 * 1024 * 1024;
+
+// Holds the state used for ashmem allocations.
+struct AshmemGlobalContext {
+ AshmemGlobalContext()
: allocator(kAshmemAllocatorName,
GetOptimalAshmemRegionSizeForAllocator()) {
+ manager.SetMemoryLimit(kAshmemMaxAddressSpaceUsage);
}
- internal::DiscardableMemoryAllocator allocator;
+ internal::DiscardableMemoryAshmemAllocator allocator;
+ internal::DiscardableMemoryManager manager;
private:
// Returns 64 MBytes for a 512 MBytes device, 128 MBytes for 1024 MBytes...
@@ -35,8 +43,7 @@ struct DiscardableMemoryAllocatorWrapper {
}
};
-LazyInstance<DiscardableMemoryAllocatorWrapper>::Leaky g_context =
- LAZY_INSTANCE_INITIALIZER;
+LazyInstance<AshmemGlobalContext>::Leaky g_context = LAZY_INSTANCE_INITIALIZER;
} // namespace
@@ -54,7 +61,7 @@ void DiscardableMemory::UnregisterMemoryPressureListeners() {
void DiscardableMemory::GetSupportedTypes(
std::vector<DiscardableMemoryType>* types) {
const DiscardableMemoryType supported_types[] = {
- DISCARDABLE_MEMORY_TYPE_ANDROID,
+ DISCARDABLE_MEMORY_TYPE_ASHMEM,
DISCARDABLE_MEMORY_TYPE_EMULATED,
DISCARDABLE_MEMORY_TYPE_MALLOC
};
@@ -68,8 +75,15 @@ scoped_ptr<DiscardableMemory> DiscardableMemory::CreateLockedMemoryWithType(
case DISCARDABLE_MEMORY_TYPE_NONE:
case DISCARDABLE_MEMORY_TYPE_MAC:
return scoped_ptr<DiscardableMemory>();
- case DISCARDABLE_MEMORY_TYPE_ANDROID: {
- return g_context.Pointer()->allocator.Allocate(size);
+ case DISCARDABLE_MEMORY_TYPE_ASHMEM: {
+ AshmemGlobalContext* const global_context = g_context.Pointer();
+ scoped_ptr<internal::DiscardableMemoryAshmem> memory(
+ new internal::DiscardableMemoryAshmem(
+ size, &global_context->allocator, &global_context->manager));
+ if (!memory->Initialize())
+ return scoped_ptr<DiscardableMemory>();
+
+ return memory.PassAs<DiscardableMemory>();
}
case DISCARDABLE_MEMORY_TYPE_EMULATED: {
scoped_ptr<internal::DiscardableMemoryEmulated> memory(
@@ -94,13 +108,9 @@ scoped_ptr<DiscardableMemory> DiscardableMemory::CreateLockedMemoryWithType(
}
// static
-bool DiscardableMemory::PurgeForTestingSupported() {
- return false;
-}
-
-// static
void DiscardableMemory::PurgeForTesting() {
- NOTIMPLEMENTED();
+ g_context.Pointer()->manager.PurgeAll();
+ internal::DiscardableMemoryEmulated::PurgeForTesting();
}
} // namespace base
diff --git a/base/memory/discardable_memory_ashmem.cc b/base/memory/discardable_memory_ashmem.cc
new file mode 100644
index 0000000..f830008
--- /dev/null
+++ b/base/memory/discardable_memory_ashmem.cc
@@ -0,0 +1,75 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/discardable_memory_ashmem.h"
+
+#include "base/memory/discardable_memory_ashmem_allocator.h"
+
+namespace base {
+namespace internal {
+
+DiscardableMemoryAshmem::DiscardableMemoryAshmem(
+ size_t bytes,
+ DiscardableMemoryAshmemAllocator* allocator,
+ DiscardableMemoryManager* manager)
+ : bytes_(bytes),
+ allocator_(allocator),
+ manager_(manager),
+ is_locked_(false) {
+ manager_->Register(this, bytes_);
+}
+
+DiscardableMemoryAshmem::~DiscardableMemoryAshmem() {
+ if (is_locked_)
+ manager_->ReleaseLock(this);
+
+ manager_->Unregister(this);
+}
+
+bool DiscardableMemoryAshmem::Initialize() {
+ return Lock() == DISCARDABLE_MEMORY_LOCK_STATUS_PURGED;
+}
+
+DiscardableMemoryLockStatus DiscardableMemoryAshmem::Lock() {
+ bool purged = false;
+ if (!manager_->AcquireLock(this, &purged))
+ return DISCARDABLE_MEMORY_LOCK_STATUS_FAILED;
+
+ return purged ? DISCARDABLE_MEMORY_LOCK_STATUS_PURGED
+ : DISCARDABLE_MEMORY_LOCK_STATUS_SUCCESS;
+}
+
+void DiscardableMemoryAshmem::Unlock() {
+ manager_->ReleaseLock(this);
+}
+
+void* DiscardableMemoryAshmem::Memory() const {
+ DCHECK(ashmem_chunk_);
+ return ashmem_chunk_->Memory();
+}
+
+bool DiscardableMemoryAshmem::AllocateAndAcquireLock() {
+ DCHECK(!is_locked_);
+ is_locked_ = true;
+
+ if (ashmem_chunk_)
+ return ashmem_chunk_->Lock();
+
+ ashmem_chunk_ = allocator_->Allocate(bytes_);
+ return false;
+}
+
+void DiscardableMemoryAshmem::ReleaseLock() {
+ DCHECK(is_locked_);
+ ashmem_chunk_->Unlock();
+ is_locked_ = false;
+}
+
+void DiscardableMemoryAshmem::Purge() {
+ DCHECK(!is_locked_);
+ ashmem_chunk_.reset();
+}
+
+} // namespace internal
+} // namespace base
diff --git a/base/memory/discardable_memory_ashmem.h b/base/memory/discardable_memory_ashmem.h
new file mode 100644
index 0000000..8436f5d
--- /dev/null
+++ b/base/memory/discardable_memory_ashmem.h
@@ -0,0 +1,55 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_DISCARDABLE_MEMORY_ASHMEM_H_
+#define BASE_MEMORY_DISCARDABLE_MEMORY_ASHMEM_H_
+
+#include "base/memory/discardable_memory.h"
+
+#include "base/macros.h"
+#include "base/memory/discardable_memory_manager.h"
+
+namespace base {
+namespace internal {
+
+class DiscardableAshmemChunk;
+class DiscardableMemoryAshmemAllocator;
+class DiscardableMemoryManager;
+
+class DiscardableMemoryAshmem
+ : public DiscardableMemory,
+ public internal::DiscardableMemoryManagerAllocation {
+ public:
+ explicit DiscardableMemoryAshmem(size_t bytes,
+ DiscardableMemoryAshmemAllocator* allocator,
+ DiscardableMemoryManager* manager);
+
+ virtual ~DiscardableMemoryAshmem();
+
+ bool Initialize();
+
+ // Overridden from DiscardableMemory:
+ virtual DiscardableMemoryLockStatus Lock() OVERRIDE;
+ virtual void Unlock() OVERRIDE;
+ virtual void* Memory() const OVERRIDE;
+
+ // Overridden from internal::DiscardableMemoryManagerAllocation:
+ virtual bool AllocateAndAcquireLock() OVERRIDE;
+ virtual void ReleaseLock() OVERRIDE;
+ virtual void Purge() OVERRIDE;
+
+ private:
+ const size_t bytes_;
+ DiscardableMemoryAshmemAllocator* const allocator_;
+ DiscardableMemoryManager* const manager_;
+ bool is_locked_;
+ scoped_ptr<DiscardableAshmemChunk> ashmem_chunk_;
+
+ DISALLOW_COPY_AND_ASSIGN(DiscardableMemoryAshmem);
+};
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_MEMORY_DISCARDABLE_MEMORY_ASHMEM_H_
diff --git a/base/memory/discardable_memory_allocator_android.cc b/base/memory/discardable_memory_ashmem_allocator.cc
index 077a441..3c5a348 100644
--- a/base/memory/discardable_memory_allocator_android.cc
+++ b/base/memory/discardable_memory_ashmem_allocator.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "base/memory/discardable_memory_allocator_android.h"
+#include "base/memory/discardable_memory_ashmem_allocator.h"
#include <sys/mman.h>
#include <unistd.h>
@@ -18,22 +18,17 @@
#include "base/file_util.h"
#include "base/files/scoped_file.h"
#include "base/logging.h"
-#include "base/memory/discardable_memory.h"
#include "base/memory/scoped_vector.h"
-#include "base/synchronization/lock.h"
-#include "base/threading/thread_checker.h"
#include "third_party/ashmem/ashmem.h"
// The allocator consists of three parts (classes):
-// - DiscardableMemoryAllocator: entry point of all allocations (through its
-// Allocate() method) that are dispatched to the AshmemRegion instances (which
-// it owns).
+// - DiscardableMemoryAshmemAllocator: entry point of all allocations (through
+// its Allocate() method) that are dispatched to the AshmemRegion instances
+// (which it owns).
// - AshmemRegion: manages allocations and destructions inside a single large
// (e.g. 32 MBytes) ashmem region.
-// - DiscardableAshmemChunk: class implementing the DiscardableMemory interface
-// whose instances are returned to the client. DiscardableAshmemChunk lets the
-// client seamlessly operate on a subrange of the ashmem region managed by
-// AshmemRegion.
+// - DiscardableAshmemChunk: class mimicking the DiscardableMemory interface
+// whose instances are returned to the client.
namespace base {
namespace {
@@ -102,10 +97,8 @@ bool CloseAshmemRegion(int fd, size_t size, void* address) {
return close(fd) == 0;
}
-DiscardableMemoryLockStatus LockAshmemRegion(int fd, size_t off, size_t size) {
- const int result = ashmem_pin_region(fd, off, size);
- return result == ASHMEM_WAS_PURGED ? DISCARDABLE_MEMORY_LOCK_STATUS_PURGED
- : DISCARDABLE_MEMORY_LOCK_STATUS_SUCCESS;
+bool LockAshmemRegion(int fd, size_t off, size_t size) {
+ return ashmem_pin_region(fd, off, size) != ASHMEM_WAS_PURGED;
}
bool UnlockAshmemRegion(int fd, size_t off, size_t size) {
@@ -119,62 +112,13 @@ bool UnlockAshmemRegion(int fd, size_t off, size_t size) {
namespace internal {
-class DiscardableMemoryAllocator::DiscardableAshmemChunk
- : public DiscardableMemory {
- public:
- // Note that |ashmem_region| must outlive |this|.
- DiscardableAshmemChunk(AshmemRegion* ashmem_region,
- int fd,
- void* address,
- size_t offset,
- size_t size)
- : ashmem_region_(ashmem_region),
- fd_(fd),
- address_(address),
- offset_(offset),
- size_(size),
- locked_(true) {
- }
-
- // Implemented below AshmemRegion since this requires the full definition of
- // AshmemRegion.
- virtual ~DiscardableAshmemChunk();
-
- // DiscardableMemory:
- virtual DiscardableMemoryLockStatus Lock() OVERRIDE {
- DCHECK(!locked_);
- locked_ = true;
- return LockAshmemRegion(fd_, offset_, size_);
- }
-
- virtual void Unlock() OVERRIDE {
- DCHECK(locked_);
- locked_ = false;
- UnlockAshmemRegion(fd_, offset_, size_);
- }
-
- virtual void* Memory() const OVERRIDE {
- return address_;
- }
-
- private:
- AshmemRegion* const ashmem_region_;
- const int fd_;
- void* const address_;
- const size_t offset_;
- const size_t size_;
- bool locked_;
-
- DISALLOW_COPY_AND_ASSIGN(DiscardableAshmemChunk);
-};
-
-class DiscardableMemoryAllocator::AshmemRegion {
+class AshmemRegion {
public:
// Note that |allocator| must outlive |this|.
static scoped_ptr<AshmemRegion> Create(
size_t size,
const std::string& name,
- DiscardableMemoryAllocator* allocator) {
+ DiscardableMemoryAshmemAllocator* allocator) {
DCHECK_EQ(size, AlignToNextPage(size));
int fd;
void* base;
@@ -189,8 +133,8 @@ class DiscardableMemoryAllocator::AshmemRegion {
DCHECK(!highest_allocated_chunk_);
}
- // Returns a new instance of DiscardableMemory whose size is greater or equal
- // than |actual_size| (which is expected to be greater or equal than
+ // Returns a new instance of DiscardableAshmemChunk whose size is greater or
+ // equal than |actual_size| (which is expected to be greater or equal than
// |client_requested_size|).
// Allocation works as follows:
// 1) Reuse a previously freed chunk and return it if it succeeded. See
@@ -200,8 +144,9 @@ class DiscardableMemoryAllocator::AshmemRegion {
// 3) If there is enough room in the ashmem region then a new chunk is
// returned. This new chunk starts at |offset_| which is the end of the
// previously highest chunk in the region.
- scoped_ptr<DiscardableMemory> Allocate_Locked(size_t client_requested_size,
- size_t actual_size) {
+ scoped_ptr<DiscardableAshmemChunk> Allocate_Locked(
+ size_t client_requested_size,
+ size_t actual_size) {
DCHECK_LE(client_requested_size, actual_size);
allocator_->lock_.AssertAcquired();
@@ -213,14 +158,14 @@ class DiscardableMemoryAllocator::AshmemRegion {
used_to_previous_chunk_map_.find(highest_allocated_chunk_) !=
used_to_previous_chunk_map_.end());
- scoped_ptr<DiscardableMemory> memory = ReuseFreeChunk_Locked(
+ scoped_ptr<DiscardableAshmemChunk> memory = ReuseFreeChunk_Locked(
client_requested_size, actual_size);
if (memory)
return memory.Pass();
if (size_ - offset_ < actual_size) {
// This region does not have enough space left to hold the requested size.
- return scoped_ptr<DiscardableMemory>();
+ return scoped_ptr<DiscardableAshmemChunk>();
}
void* const address = static_cast<char*>(base_) + offset_;
@@ -273,7 +218,7 @@ class DiscardableMemoryAllocator::AshmemRegion {
AshmemRegion(int fd,
size_t size,
void* base,
- DiscardableMemoryAllocator* allocator)
+ DiscardableMemoryAshmemAllocator* allocator)
: fd_(fd),
size_(size),
base_(base),
@@ -287,14 +232,14 @@ class DiscardableMemoryAllocator::AshmemRegion {
}
// Tries to reuse a previously freed chunk by doing a closest size match.
- scoped_ptr<DiscardableMemory> ReuseFreeChunk_Locked(
+ scoped_ptr<DiscardableAshmemChunk> ReuseFreeChunk_Locked(
size_t client_requested_size,
size_t actual_size) {
allocator_->lock_.AssertAcquired();
const FreeChunk reused_chunk = RemoveFreeChunkFromIterator_Locked(
free_chunks_.lower_bound(FreeChunk(actual_size)));
if (reused_chunk.is_null())
- return scoped_ptr<DiscardableMemory>();
+ return scoped_ptr<DiscardableAshmemChunk>();
used_to_previous_chunk_map_.insert(
std::make_pair(reused_chunk.start, reused_chunk.previous_chunk));
@@ -330,9 +275,9 @@ class DiscardableMemoryAllocator::AshmemRegion {
const size_t offset =
static_cast<char*>(reused_chunk.start) - static_cast<char*>(base_);
LockAshmemRegion(fd_, offset, reused_chunk_size);
- scoped_ptr<DiscardableMemory> memory(
- new DiscardableAshmemChunk(this, fd_, reused_chunk.start, offset,
- reused_chunk_size));
+ scoped_ptr<DiscardableAshmemChunk> memory(
+ new DiscardableAshmemChunk(
+ this, fd_, reused_chunk.start, offset, reused_chunk_size));
return memory.Pass();
}
@@ -449,7 +394,7 @@ class DiscardableMemoryAllocator::AshmemRegion {
const int fd_;
const size_t size_;
void* const base_;
- DiscardableMemoryAllocator* const allocator_;
+ DiscardableMemoryAshmemAllocator* const allocator_;
// Points to the chunk with the highest address in the region. This pointer
// needs to be carefully updated when chunks are merged/split.
void* highest_allocated_chunk_;
@@ -473,13 +418,43 @@ class DiscardableMemoryAllocator::AshmemRegion {
DISALLOW_COPY_AND_ASSIGN(AshmemRegion);
};
-DiscardableMemoryAllocator::DiscardableAshmemChunk::~DiscardableAshmemChunk() {
+DiscardableAshmemChunk::~DiscardableAshmemChunk() {
if (locked_)
UnlockAshmemRegion(fd_, offset_, size_);
ashmem_region_->OnChunkDeletion(address_, size_);
}
-DiscardableMemoryAllocator::DiscardableMemoryAllocator(
+bool DiscardableAshmemChunk::Lock() {
+ DCHECK(!locked_);
+ locked_ = true;
+ return LockAshmemRegion(fd_, offset_, size_);
+}
+
+void DiscardableAshmemChunk::Unlock() {
+ DCHECK(locked_);
+ locked_ = false;
+ UnlockAshmemRegion(fd_, offset_, size_);
+}
+
+void* DiscardableAshmemChunk::Memory() const {
+ return address_;
+}
+
+// Note that |ashmem_region| must outlive |this|.
+DiscardableAshmemChunk::DiscardableAshmemChunk(AshmemRegion* ashmem_region,
+ int fd,
+ void* address,
+ size_t offset,
+ size_t size)
+ : ashmem_region_(ashmem_region),
+ fd_(fd),
+ address_(address),
+ offset_(offset),
+ size_(size),
+ locked_(true) {
+}
+
+DiscardableMemoryAshmemAllocator::DiscardableMemoryAshmemAllocator(
const std::string& name,
size_t ashmem_region_size)
: name_(name),
@@ -489,16 +464,15 @@ DiscardableMemoryAllocator::DiscardableMemoryAllocator(
DCHECK_GE(ashmem_region_size_, kMinAshmemRegionSize);
}
-DiscardableMemoryAllocator::~DiscardableMemoryAllocator() {
- DCHECK(thread_checker_.CalledOnValidThread());
+DiscardableMemoryAshmemAllocator::~DiscardableMemoryAshmemAllocator() {
DCHECK(ashmem_regions_.empty());
}
-scoped_ptr<DiscardableMemory> DiscardableMemoryAllocator::Allocate(
+scoped_ptr<DiscardableAshmemChunk> DiscardableMemoryAshmemAllocator::Allocate(
size_t size) {
const size_t aligned_size = AlignToNextPage(size);
if (!aligned_size)
- return scoped_ptr<DiscardableMemory>();
+ return scoped_ptr<DiscardableAshmemChunk>();
// TODO(pliard): make this function less naive by e.g. moving the free chunks
// multiset to the allocator itself in order to decrease even more
// fragmentation/speedup allocation. Note that there should not be more than a
@@ -507,7 +481,7 @@ scoped_ptr<DiscardableMemory> DiscardableMemoryAllocator::Allocate(
DCHECK_LE(ashmem_regions_.size(), 5U);
for (ScopedVector<AshmemRegion>::iterator it = ashmem_regions_.begin();
it != ashmem_regions_.end(); ++it) {
- scoped_ptr<DiscardableMemory> memory(
+ scoped_ptr<DiscardableAshmemChunk> memory(
(*it)->Allocate_Locked(size, aligned_size));
if (memory)
return memory.Pass();
@@ -528,15 +502,15 @@ scoped_ptr<DiscardableMemory> DiscardableMemoryAllocator::Allocate(
return ashmem_regions_.back()->Allocate_Locked(size, aligned_size);
}
// TODO(pliard): consider adding an histogram to see how often this happens.
- return scoped_ptr<DiscardableMemory>();
+ return scoped_ptr<DiscardableAshmemChunk>();
}
-size_t DiscardableMemoryAllocator::last_ashmem_region_size() const {
+size_t DiscardableMemoryAshmemAllocator::last_ashmem_region_size() const {
AutoLock auto_lock(lock_);
return last_ashmem_region_size_;
}
-void DiscardableMemoryAllocator::DeleteAshmemRegion_Locked(
+void DiscardableMemoryAshmemAllocator::DeleteAshmemRegion_Locked(
AshmemRegion* region) {
lock_.AssertAcquired();
// Note that there should not be more than a couple of ashmem region instances
diff --git a/base/memory/discardable_memory_ashmem_allocator.h b/base/memory/discardable_memory_ashmem_allocator.h
new file mode 100644
index 0000000..5106e1a
--- /dev/null
+++ b/base/memory/discardable_memory_ashmem_allocator.h
@@ -0,0 +1,93 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_DISCARDABLE_MEMORY_ASHMEM_ALLOCATOR_H_
+#define BASE_MEMORY_DISCARDABLE_MEMORY_ASHMEM_ALLOCATOR_H_
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/basictypes.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/scoped_vector.h"
+#include "base/synchronization/lock.h"
+
+namespace base {
+namespace internal {
+
+class AshmemRegion;
+
+// Internal class, whose instances are returned to the client of the allocator
+// (e.g. DiscardableMemoryAshmem), that mimicks the DiscardableMemory interface.
+class BASE_EXPORT_PRIVATE DiscardableAshmemChunk {
+ public:
+ ~DiscardableAshmemChunk();
+
+ // Returns whether the memory is still resident.
+ bool Lock();
+
+ void Unlock();
+
+ void* Memory() const;
+
+ private:
+ friend class AshmemRegion;
+
+ DiscardableAshmemChunk(AshmemRegion* ashmem_region,
+ int fd,
+ void* address,
+ size_t offset,
+ size_t size);
+
+ AshmemRegion* const ashmem_region_;
+ const int fd_;
+ void* const address_;
+ const size_t offset_;
+ const size_t size_;
+ bool locked_;
+
+ DISALLOW_COPY_AND_ASSIGN(DiscardableAshmemChunk);
+};
+
+// Ashmem regions are backed by a file (descriptor) therefore they are a limited
+// resource. This allocator minimizes the problem by allocating large ashmem
+// regions internally and returning smaller chunks to the client.
+// Allocated chunks are systematically aligned on a page boundary therefore this
+// allocator should not be used for small allocations.
+class BASE_EXPORT_PRIVATE DiscardableMemoryAshmemAllocator {
+ public:
+ // Note that |name| is only used for debugging/measurement purposes.
+ // |ashmem_region_size| is the size that will be used to create the underlying
+ // ashmem regions and is expected to be greater or equal than 32 MBytes.
+ DiscardableMemoryAshmemAllocator(const std::string& name,
+ size_t ashmem_region_size);
+
+ ~DiscardableMemoryAshmemAllocator();
+
+ // Note that the allocator must outlive the returned DiscardableAshmemChunk
+ // instance.
+ scoped_ptr<DiscardableAshmemChunk> Allocate(size_t size);
+
+ // Returns the size of the last ashmem region which was created. This is used
+ // for testing only.
+ size_t last_ashmem_region_size() const;
+
+ private:
+ friend class AshmemRegion;
+
+ void DeleteAshmemRegion_Locked(AshmemRegion* region);
+
+ const std::string name_;
+ const size_t ashmem_region_size_;
+ mutable Lock lock_;
+ size_t last_ashmem_region_size_;
+ ScopedVector<AshmemRegion> ashmem_regions_;
+
+ DISALLOW_COPY_AND_ASSIGN(DiscardableMemoryAshmemAllocator);
+};
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_MEMORY_DISCARDABLE_MEMORY_ASHMEM_ALLOCATOR_H_
diff --git a/base/memory/discardable_memory_allocator_android_unittest.cc b/base/memory/discardable_memory_ashmem_allocator_unittest.cc
index b2e8d55..a20a418 100644
--- a/base/memory/discardable_memory_allocator_android_unittest.cc
+++ b/base/memory/discardable_memory_ashmem_allocator_unittest.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "base/memory/discardable_memory_allocator_android.h"
+#include "base/memory/discardable_memory_ashmem_allocator.h"
#include <sys/types.h>
#include <unistd.h>
@@ -26,43 +26,44 @@ const size_t kPageSize = 4096;
const size_t kMaxAllowedAllocationSize =
std::numeric_limits<size_t>::max() - kPageSize + 1;
-class DiscardableMemoryAllocatorTest : public testing::Test {
+class DiscardableMemoryAshmemAllocatorTest : public testing::Test {
protected:
- DiscardableMemoryAllocatorTest()
+ DiscardableMemoryAshmemAllocatorTest()
: allocator_(kAllocatorName, kAshmemRegionSizeForTesting) {
}
- DiscardableMemoryAllocator allocator_;
+ DiscardableMemoryAshmemAllocator allocator_;
};
-void WriteToDiscardableMemory(DiscardableMemory* memory, size_t size) {
+void WriteToDiscardableAshmemChunk(DiscardableAshmemChunk* memory,
+ size_t size) {
// Write to the first and the last pages only to avoid paging in up to 64
// MBytes.
static_cast<char*>(memory->Memory())[0] = 'a';
static_cast<char*>(memory->Memory())[size - 1] = 'a';
}
-TEST_F(DiscardableMemoryAllocatorTest, Basic) {
+TEST_F(DiscardableMemoryAshmemAllocatorTest, Basic) {
const size_t size = 128;
- scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(size));
+ scoped_ptr<DiscardableAshmemChunk> memory(allocator_.Allocate(size));
ASSERT_TRUE(memory);
- WriteToDiscardableMemory(memory.get(), size);
+ WriteToDiscardableAshmemChunk(memory.get(), size);
}
-TEST_F(DiscardableMemoryAllocatorTest, ZeroAllocationIsNotSupported) {
- scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(0));
+TEST_F(DiscardableMemoryAshmemAllocatorTest, ZeroAllocationIsNotSupported) {
+ scoped_ptr<DiscardableAshmemChunk> memory(allocator_.Allocate(0));
ASSERT_FALSE(memory);
}
-TEST_F(DiscardableMemoryAllocatorTest, TooLargeAllocationFails) {
- scoped_ptr<DiscardableMemory> memory(
+TEST_F(DiscardableMemoryAshmemAllocatorTest, TooLargeAllocationFails) {
+ scoped_ptr<DiscardableAshmemChunk> memory(
allocator_.Allocate(kMaxAllowedAllocationSize + 1));
// Page-alignment would have caused an overflow resulting in a small
// allocation if the input size wasn't checked correctly.
ASSERT_FALSE(memory);
}
-TEST_F(DiscardableMemoryAllocatorTest,
+TEST_F(DiscardableMemoryAshmemAllocatorTest,
AshmemRegionsAreNotSmallerThanRequestedSize) {
// The creation of the underlying ashmem region is expected to fail since
// there should not be enough room in the address space. When ashmem creation
@@ -70,43 +71,43 @@ TEST_F(DiscardableMemoryAllocatorTest,
// size should not be smaller than the size the user requested so the
// allocation here should just fail (and not succeed with the minimum ashmem
// region size).
- scoped_ptr<DiscardableMemory> memory(
+ scoped_ptr<DiscardableAshmemChunk> memory(
allocator_.Allocate(kMaxAllowedAllocationSize));
ASSERT_FALSE(memory);
}
-TEST_F(DiscardableMemoryAllocatorTest, AshmemRegionsAreAlwaysPageAligned) {
+TEST_F(DiscardableMemoryAshmemAllocatorTest,
+ AshmemRegionsAreAlwaysPageAligned) {
// Use a separate allocator here so that we can override the ashmem region
// size.
- DiscardableMemoryAllocator allocator(
+ DiscardableMemoryAshmemAllocator allocator(
kAllocatorName, kMaxAllowedAllocationSize);
- scoped_ptr<DiscardableMemory> memory(allocator.Allocate(kPageSize));
+ scoped_ptr<DiscardableAshmemChunk> memory(allocator.Allocate(kPageSize));
ASSERT_TRUE(memory);
EXPECT_GT(kMaxAllowedAllocationSize, allocator.last_ashmem_region_size());
ASSERT_TRUE(allocator.last_ashmem_region_size() % kPageSize == 0);
}
-TEST_F(DiscardableMemoryAllocatorTest, LargeAllocation) {
- // Note that large allocations should just use DiscardableMemoryAndroidSimple
- // instead.
+TEST_F(DiscardableMemoryAshmemAllocatorTest, LargeAllocation) {
const size_t size = 64 * 1024 * 1024;
- scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(size));
+ scoped_ptr<DiscardableAshmemChunk> memory(allocator_.Allocate(size));
ASSERT_TRUE(memory);
- WriteToDiscardableMemory(memory.get(), size);
+ WriteToDiscardableAshmemChunk(memory.get(), size);
}
-TEST_F(DiscardableMemoryAllocatorTest, ChunksArePageAligned) {
- scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(kPageSize));
+TEST_F(DiscardableMemoryAshmemAllocatorTest, ChunksArePageAligned) {
+ scoped_ptr<DiscardableAshmemChunk> memory(allocator_.Allocate(kPageSize));
ASSERT_TRUE(memory);
EXPECT_EQ(0U, reinterpret_cast<uint64_t>(memory->Memory()) % kPageSize);
- WriteToDiscardableMemory(memory.get(), kPageSize);
+ WriteToDiscardableAshmemChunk(memory.get(), kPageSize);
}
-TEST_F(DiscardableMemoryAllocatorTest, AllocateFreeAllocate) {
- scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(kPageSize));
+TEST_F(DiscardableMemoryAshmemAllocatorTest, AllocateFreeAllocate) {
+ scoped_ptr<DiscardableAshmemChunk> memory(allocator_.Allocate(kPageSize));
// Extra allocation that prevents the region from being deleted when |memory|
// gets deleted.
- scoped_ptr<DiscardableMemory> memory_lock(allocator_.Allocate(kPageSize));
+ scoped_ptr<DiscardableAshmemChunk> memory_lock(
+ allocator_.Allocate(kPageSize));
ASSERT_TRUE(memory);
void* const address = memory->Memory();
memory->Unlock(); // Tests that the reused chunk is being locked correctly.
@@ -115,11 +116,12 @@ TEST_F(DiscardableMemoryAllocatorTest, AllocateFreeAllocate) {
ASSERT_TRUE(memory);
// The previously freed chunk should be reused.
EXPECT_EQ(address, memory->Memory());
- WriteToDiscardableMemory(memory.get(), kPageSize);
+ WriteToDiscardableAshmemChunk(memory.get(), kPageSize);
}
-TEST_F(DiscardableMemoryAllocatorTest, FreeingWholeAshmemRegionClosesAshmem) {
- scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(kPageSize));
+TEST_F(DiscardableMemoryAshmemAllocatorTest,
+ FreeingWholeAshmemRegionClosesAshmem) {
+ scoped_ptr<DiscardableAshmemChunk> memory(allocator_.Allocate(kPageSize));
ASSERT_TRUE(memory);
const int kMagic = 0xdeadbeef;
*static_cast<int*>(memory->Memory()) = kMagic;
@@ -131,12 +133,15 @@ TEST_F(DiscardableMemoryAllocatorTest, FreeingWholeAshmemRegionClosesAshmem) {
EXPECT_NE(kMagic, *static_cast<const int*>(memory->Memory()));
}
-TEST_F(DiscardableMemoryAllocatorTest, AllocateUsesBestFitAlgorithm) {
- scoped_ptr<DiscardableMemory> memory1(allocator_.Allocate(3 * kPageSize));
+TEST_F(DiscardableMemoryAshmemAllocatorTest, AllocateUsesBestFitAlgorithm) {
+ scoped_ptr<DiscardableAshmemChunk> memory1(
+ allocator_.Allocate(3 * kPageSize));
ASSERT_TRUE(memory1);
- scoped_ptr<DiscardableMemory> memory2(allocator_.Allocate(2 * kPageSize));
+ scoped_ptr<DiscardableAshmemChunk> memory2(
+ allocator_.Allocate(2 * kPageSize));
ASSERT_TRUE(memory2);
- scoped_ptr<DiscardableMemory> memory3(allocator_.Allocate(1 * kPageSize));
+ scoped_ptr<DiscardableAshmemChunk> memory3(
+ allocator_.Allocate(1 * kPageSize));
ASSERT_TRUE(memory3);
void* const address_3 = memory3->Memory();
memory1.reset();
@@ -146,17 +151,17 @@ TEST_F(DiscardableMemoryAllocatorTest, AllocateUsesBestFitAlgorithm) {
ASSERT_TRUE(memory1);
// The chunk whose size is closest to the requested size should be reused.
EXPECT_EQ(address_3, memory1->Memory());
- WriteToDiscardableMemory(memory1.get(), kPageSize);
+ WriteToDiscardableAshmemChunk(memory1.get(), kPageSize);
}
-TEST_F(DiscardableMemoryAllocatorTest, MergeFreeChunks) {
- scoped_ptr<DiscardableMemory> memory1(allocator_.Allocate(kPageSize));
+TEST_F(DiscardableMemoryAshmemAllocatorTest, MergeFreeChunks) {
+ scoped_ptr<DiscardableAshmemChunk> memory1(allocator_.Allocate(kPageSize));
ASSERT_TRUE(memory1);
- scoped_ptr<DiscardableMemory> memory2(allocator_.Allocate(kPageSize));
+ scoped_ptr<DiscardableAshmemChunk> memory2(allocator_.Allocate(kPageSize));
ASSERT_TRUE(memory2);
- scoped_ptr<DiscardableMemory> memory3(allocator_.Allocate(kPageSize));
+ scoped_ptr<DiscardableAshmemChunk> memory3(allocator_.Allocate(kPageSize));
ASSERT_TRUE(memory3);
- scoped_ptr<DiscardableMemory> memory4(allocator_.Allocate(kPageSize));
+ scoped_ptr<DiscardableAshmemChunk> memory4(allocator_.Allocate(kPageSize));
ASSERT_TRUE(memory4);
void* const memory1_address = memory1->Memory();
memory1.reset();
@@ -168,10 +173,12 @@ TEST_F(DiscardableMemoryAllocatorTest, MergeFreeChunks) {
EXPECT_EQ(memory1_address, memory1->Memory());
}
-TEST_F(DiscardableMemoryAllocatorTest, MergeFreeChunksAdvanced) {
- scoped_ptr<DiscardableMemory> memory1(allocator_.Allocate(4 * kPageSize));
+TEST_F(DiscardableMemoryAshmemAllocatorTest, MergeFreeChunksAdvanced) {
+ scoped_ptr<DiscardableAshmemChunk> memory1(
+ allocator_.Allocate(4 * kPageSize));
ASSERT_TRUE(memory1);
- scoped_ptr<DiscardableMemory> memory2(allocator_.Allocate(4 * kPageSize));
+ scoped_ptr<DiscardableAshmemChunk> memory2(
+ allocator_.Allocate(4 * kPageSize));
ASSERT_TRUE(memory2);
void* const memory1_address = memory1->Memory();
memory1.reset();
@@ -185,15 +192,18 @@ TEST_F(DiscardableMemoryAllocatorTest, MergeFreeChunksAdvanced) {
static_cast<const char*>(memory1_address) + 2 * kPageSize);
}
-TEST_F(DiscardableMemoryAllocatorTest, MergeFreeChunksAdvanced2) {
- scoped_ptr<DiscardableMemory> memory1(allocator_.Allocate(4 * kPageSize));
+TEST_F(DiscardableMemoryAshmemAllocatorTest, MergeFreeChunksAdvanced2) {
+ scoped_ptr<DiscardableAshmemChunk> memory1(
+ allocator_.Allocate(4 * kPageSize));
ASSERT_TRUE(memory1);
- scoped_ptr<DiscardableMemory> memory2(allocator_.Allocate(4 * kPageSize));
+ scoped_ptr<DiscardableAshmemChunk> memory2(
+ allocator_.Allocate(4 * kPageSize));
ASSERT_TRUE(memory2);
void* const memory1_address = memory1->Memory();
memory1.reset();
memory1 = allocator_.Allocate(2 * kPageSize);
- scoped_ptr<DiscardableMemory> memory3(allocator_.Allocate(2 * kPageSize));
+ scoped_ptr<DiscardableAshmemChunk> memory3(
+ allocator_.Allocate(2 * kPageSize));
// At this point, the region should be in this state:
// 8 KBytes (used), 8 KBytes (used), 16 KBytes (used).
memory3.reset();
@@ -206,14 +216,18 @@ TEST_F(DiscardableMemoryAllocatorTest, MergeFreeChunksAdvanced2) {
static_cast<const char*>(memory1_address) + 2 * kPageSize);
}
-TEST_F(DiscardableMemoryAllocatorTest, MergeFreeChunksAndDeleteAshmemRegion) {
- scoped_ptr<DiscardableMemory> memory1(allocator_.Allocate(4 * kPageSize));
+TEST_F(DiscardableMemoryAshmemAllocatorTest,
+ MergeFreeChunksAndDeleteAshmemRegion) {
+ scoped_ptr<DiscardableAshmemChunk> memory1(
+ allocator_.Allocate(4 * kPageSize));
ASSERT_TRUE(memory1);
- scoped_ptr<DiscardableMemory> memory2(allocator_.Allocate(4 * kPageSize));
+ scoped_ptr<DiscardableAshmemChunk> memory2(
+ allocator_.Allocate(4 * kPageSize));
ASSERT_TRUE(memory2);
memory1.reset();
memory1 = allocator_.Allocate(2 * kPageSize);
- scoped_ptr<DiscardableMemory> memory3(allocator_.Allocate(2 * kPageSize));
+ scoped_ptr<DiscardableAshmemChunk> memory3(
+ allocator_.Allocate(2 * kPageSize));
// At this point, the region should be in this state:
// 8 KBytes (used), 8 KBytes (used), 16 KBytes (used).
memory1.reset();
@@ -228,13 +242,13 @@ TEST_F(DiscardableMemoryAllocatorTest, MergeFreeChunksAndDeleteAshmemRegion) {
EXPECT_NE(kMagic, *static_cast<int*>(memory2->Memory()));
}
-TEST_F(DiscardableMemoryAllocatorTest,
+TEST_F(DiscardableMemoryAshmemAllocatorTest,
TooLargeFreeChunksDontCauseTooMuchFragmentationWhenRecycled) {
// Keep |memory_1| below allocated so that the ashmem region doesn't get
// closed when |memory_2| is deleted.
- scoped_ptr<DiscardableMemory> memory_1(allocator_.Allocate(64 * 1024));
+ scoped_ptr<DiscardableAshmemChunk> memory_1(allocator_.Allocate(64 * 1024));
ASSERT_TRUE(memory_1);
- scoped_ptr<DiscardableMemory> memory_2(allocator_.Allocate(32 * 1024));
+ scoped_ptr<DiscardableAshmemChunk> memory_2(allocator_.Allocate(32 * 1024));
ASSERT_TRUE(memory_2);
void* const address = memory_2->Memory();
memory_2.reset();
@@ -242,40 +256,41 @@ TEST_F(DiscardableMemoryAllocatorTest,
memory_2 = allocator_.Allocate(size);
ASSERT_TRUE(memory_2);
EXPECT_EQ(address, memory_2->Memory());
- WriteToDiscardableMemory(memory_2.get(), size);
- scoped_ptr<DiscardableMemory> memory_3(allocator_.Allocate(size));
+ WriteToDiscardableAshmemChunk(memory_2.get(), size);
+ scoped_ptr<DiscardableAshmemChunk> memory_3(allocator_.Allocate(size));
// The unused tail (16 KBytes large) of the previously freed chunk should be
// reused.
EXPECT_EQ(static_cast<char*>(address) + size, memory_3->Memory());
- WriteToDiscardableMemory(memory_3.get(), size);
+ WriteToDiscardableAshmemChunk(memory_3.get(), size);
}
-TEST_F(DiscardableMemoryAllocatorTest, UseMultipleAshmemRegions) {
+TEST_F(DiscardableMemoryAshmemAllocatorTest, UseMultipleAshmemRegions) {
// Leave one page untouched at the end of the ashmem region.
const size_t size = kAshmemRegionSizeForTesting - kPageSize;
- scoped_ptr<DiscardableMemory> memory1(allocator_.Allocate(size));
+ scoped_ptr<DiscardableAshmemChunk> memory1(allocator_.Allocate(size));
ASSERT_TRUE(memory1);
- WriteToDiscardableMemory(memory1.get(), size);
+ WriteToDiscardableAshmemChunk(memory1.get(), size);
- scoped_ptr<DiscardableMemory> memory2(
+ scoped_ptr<DiscardableAshmemChunk> memory2(
allocator_.Allocate(kAshmemRegionSizeForTesting));
ASSERT_TRUE(memory2);
- WriteToDiscardableMemory(memory2.get(), kAshmemRegionSizeForTesting);
+ WriteToDiscardableAshmemChunk(memory2.get(), kAshmemRegionSizeForTesting);
// The last page of the first ashmem region should be used for this
// allocation.
- scoped_ptr<DiscardableMemory> memory3(allocator_.Allocate(kPageSize));
+ scoped_ptr<DiscardableAshmemChunk> memory3(allocator_.Allocate(kPageSize));
ASSERT_TRUE(memory3);
- WriteToDiscardableMemory(memory3.get(), kPageSize);
+ WriteToDiscardableAshmemChunk(memory3.get(), kPageSize);
EXPECT_EQ(memory3->Memory(), static_cast<char*>(memory1->Memory()) + size);
}
-TEST_F(DiscardableMemoryAllocatorTest,
+TEST_F(DiscardableMemoryAshmemAllocatorTest,
HighestAllocatedChunkPointerIsUpdatedWhenHighestChunkGetsSplit) {
// Prevents the ashmem region from getting closed when |memory2| gets freed.
- scoped_ptr<DiscardableMemory> memory1(allocator_.Allocate(kPageSize));
+ scoped_ptr<DiscardableAshmemChunk> memory1(allocator_.Allocate(kPageSize));
ASSERT_TRUE(memory1);
- scoped_ptr<DiscardableMemory> memory2(allocator_.Allocate(4 * kPageSize));
+ scoped_ptr<DiscardableAshmemChunk> memory2(
+ allocator_.Allocate(4 * kPageSize));
ASSERT_TRUE(memory2);
memory2.reset();
@@ -288,7 +303,8 @@ TEST_F(DiscardableMemoryAllocatorTest,
// Allocate more than 3 * |kPageSize| so that the free chunk of size 3 *
// |kPageSize| is not reused and |highest_allocated_chunk_| gets used instead.
- scoped_ptr<DiscardableMemory> memory3(allocator_.Allocate(4 * kPageSize));
+ scoped_ptr<DiscardableAshmemChunk> memory3(
+ allocator_.Allocate(4 * kPageSize));
ASSERT_TRUE(memory3);
// Deleting |memory3| (whose size is 4 * |kPageSize|) should result in a merge
diff --git a/base/memory/discardable_memory_emulated.cc b/base/memory/discardable_memory_emulated.cc
index c9effe6..415a451 100644
--- a/base/memory/discardable_memory_emulated.cc
+++ b/base/memory/discardable_memory_emulated.cc
@@ -17,9 +17,10 @@ base::LazyInstance<internal::DiscardableMemoryManager>::Leaky g_manager =
namespace internal {
-DiscardableMemoryEmulated::DiscardableMemoryEmulated(size_t size)
- : is_locked_(false) {
- g_manager.Pointer()->Register(this, size);
+DiscardableMemoryEmulated::DiscardableMemoryEmulated(size_t bytes)
+ : bytes_(bytes),
+ is_locked_(false) {
+ g_manager.Pointer()->Register(this, bytes);
}
DiscardableMemoryEmulated::~DiscardableMemoryEmulated() {
@@ -71,11 +72,11 @@ void* DiscardableMemoryEmulated::Memory() const {
return memory_.get();
}
-bool DiscardableMemoryEmulated::AllocateAndAcquireLock(size_t bytes) {
+bool DiscardableMemoryEmulated::AllocateAndAcquireLock() {
if (memory_)
return true;
- memory_.reset(new uint8[bytes]);
+ memory_.reset(new uint8[bytes_]);
return false;
}
diff --git a/base/memory/discardable_memory_emulated.h b/base/memory/discardable_memory_emulated.h
index 35ce08e..3242245 100644
--- a/base/memory/discardable_memory_emulated.h
+++ b/base/memory/discardable_memory_emulated.h
@@ -16,7 +16,7 @@ class DiscardableMemoryEmulated
: public DiscardableMemory,
public internal::DiscardableMemoryManagerAllocation {
public:
- explicit DiscardableMemoryEmulated(size_t size);
+ explicit DiscardableMemoryEmulated(size_t bytes);
virtual ~DiscardableMemoryEmulated();
static void RegisterMemoryPressureListeners();
@@ -32,11 +32,12 @@ class DiscardableMemoryEmulated
virtual void* Memory() const OVERRIDE;
// Overridden from internal::DiscardableMemoryManagerAllocation:
- virtual bool AllocateAndAcquireLock(size_t bytes) OVERRIDE;
+ virtual bool AllocateAndAcquireLock() OVERRIDE;
virtual void ReleaseLock() OVERRIDE {}
virtual void Purge() OVERRIDE;
private:
+ const size_t bytes_;
scoped_ptr<uint8[]> memory_;
bool is_locked_;
diff --git a/base/memory/discardable_memory_linux.cc b/base/memory/discardable_memory_linux.cc
index e44b7c2..fbd496d 100644
--- a/base/memory/discardable_memory_linux.cc
+++ b/base/memory/discardable_memory_linux.cc
@@ -35,7 +35,7 @@ scoped_ptr<DiscardableMemory> DiscardableMemory::CreateLockedMemoryWithType(
DiscardableMemoryType type, size_t size) {
switch (type) {
case DISCARDABLE_MEMORY_TYPE_NONE:
- case DISCARDABLE_MEMORY_TYPE_ANDROID:
+ case DISCARDABLE_MEMORY_TYPE_ASHMEM:
case DISCARDABLE_MEMORY_TYPE_MAC:
return scoped_ptr<DiscardableMemory>();
case DISCARDABLE_MEMORY_TYPE_EMULATED: {
@@ -61,11 +61,6 @@ scoped_ptr<DiscardableMemory> DiscardableMemory::CreateLockedMemoryWithType(
}
// static
-bool DiscardableMemory::PurgeForTestingSupported() {
- return true;
-}
-
-// static
void DiscardableMemory::PurgeForTesting() {
internal::DiscardableMemoryEmulated::PurgeForTesting();
}
diff --git a/base/memory/discardable_memory_mac.cc b/base/memory/discardable_memory_mac.cc
index 40a9739..8cd5905 100644
--- a/base/memory/discardable_memory_mac.cc
+++ b/base/memory/discardable_memory_mac.cc
@@ -115,7 +115,7 @@ scoped_ptr<DiscardableMemory> DiscardableMemory::CreateLockedMemoryWithType(
DiscardableMemoryType type, size_t size) {
switch (type) {
case DISCARDABLE_MEMORY_TYPE_NONE:
- case DISCARDABLE_MEMORY_TYPE_ANDROID:
+ case DISCARDABLE_MEMORY_TYPE_ASHMEM:
return scoped_ptr<DiscardableMemory>();
case DISCARDABLE_MEMORY_TYPE_MAC: {
scoped_ptr<DiscardableMemoryMac> memory(new DiscardableMemoryMac(size));
@@ -147,11 +147,6 @@ scoped_ptr<DiscardableMemory> DiscardableMemory::CreateLockedMemoryWithType(
}
// static
-bool DiscardableMemory::PurgeForTestingSupported() {
- return true;
-}
-
-// static
void DiscardableMemory::PurgeForTesting() {
int state = 0;
vm_purgable_control(mach_task_self(), 0, VM_PURGABLE_PURGE_ALL, &state);
diff --git a/base/memory/discardable_memory_manager.cc b/base/memory/discardable_memory_manager.cc
index 1dff3e4d..6ede8a3 100644
--- a/base/memory/discardable_memory_manager.cc
+++ b/base/memory/discardable_memory_manager.cc
@@ -114,7 +114,7 @@ bool DiscardableMemoryManager::AcquireLock(Allocation* allocation,
if (std::numeric_limits<size_t>::max() - bytes_required < bytes_allocated_)
return false;
- *purged = !allocation->AllocateAndAcquireLock(info->bytes);
+ *purged = !allocation->AllocateAndAcquireLock();
info->purgable = false;
if (bytes_required) {
bytes_allocated_ += bytes_required;
diff --git a/base/memory/discardable_memory_manager.h b/base/memory/discardable_memory_manager.h
index 8447521..3c9bd61 100644
--- a/base/memory/discardable_memory_manager.h
+++ b/base/memory/discardable_memory_manager.h
@@ -21,7 +21,7 @@ class DiscardableMemoryManagerAllocation {
// Allocate and acquire a lock that prevents the allocation from being purged
// by the system. Returns true if memory was previously allocated and is still
// resident.
- virtual bool AllocateAndAcquireLock(size_t bytes) = 0;
+ virtual bool AllocateAndAcquireLock() = 0;
// Release a previously acquired lock on the allocation so that it can be
// purged by the system.
diff --git a/base/memory/discardable_memory_manager_unittest.cc b/base/memory/discardable_memory_manager_unittest.cc
index 58a9603..c59d82e 100644
--- a/base/memory/discardable_memory_manager_unittest.cc
+++ b/base/memory/discardable_memory_manager_unittest.cc
@@ -19,7 +19,7 @@ class TestAllocationImpl : public internal::DiscardableMemoryManagerAllocation {
virtual ~TestAllocationImpl() { DCHECK(!is_locked_); }
// Overridden from internal::DiscardableMemoryManagerAllocation:
- virtual bool AllocateAndAcquireLock(size_t bytes) OVERRIDE {
+ virtual bool AllocateAndAcquireLock() OVERRIDE {
bool was_allocated = is_allocated_;
is_allocated_ = true;
DCHECK(!is_locked_);
diff --git a/base/memory/discardable_memory_unittest.cc b/base/memory/discardable_memory_unittest.cc
index d4c2955..dc0e2cd 100644
--- a/base/memory/discardable_memory_unittest.cc
+++ b/base/memory/discardable_memory_unittest.cc
@@ -47,7 +47,7 @@ TEST_P(DiscardableMemoryTest, IsNamed) {
bool IsNativeType(DiscardableMemoryType type) {
return
- type == DISCARDABLE_MEMORY_TYPE_ANDROID ||
+ type == DISCARDABLE_MEMORY_TYPE_ASHMEM ||
type == DISCARDABLE_MEMORY_TYPE_MAC;
}
@@ -91,11 +91,8 @@ TEST_P(DiscardableMemoryTest, DeleteWhileLocked) {
ASSERT_TRUE(memory);
}
-#if !defined(OS_ANDROID)
// Test forced purging.
TEST_P(DiscardableMemoryTest, Purge) {
- ASSERT_TRUE(DiscardableMemory::PurgeForTestingSupported());
-
const scoped_ptr<DiscardableMemory> memory(CreateLockedMemory(kSize));
ASSERT_TRUE(memory);
memory->Unlock();
@@ -103,7 +100,6 @@ TEST_P(DiscardableMemoryTest, Purge) {
DiscardableMemory::PurgeForTesting();
EXPECT_EQ(DISCARDABLE_MEMORY_LOCK_STATUS_PURGED, memory->Lock());
}
-#endif // !OS_ANDROID
#if !defined(NDEBUG) && !defined(OS_ANDROID)
// Death tests are not supported with Android APKs.
diff --git a/base/memory/discardable_memory_win.cc b/base/memory/discardable_memory_win.cc
index e44b7c2..fbd496d 100644
--- a/base/memory/discardable_memory_win.cc
+++ b/base/memory/discardable_memory_win.cc
@@ -35,7 +35,7 @@ scoped_ptr<DiscardableMemory> DiscardableMemory::CreateLockedMemoryWithType(
DiscardableMemoryType type, size_t size) {
switch (type) {
case DISCARDABLE_MEMORY_TYPE_NONE:
- case DISCARDABLE_MEMORY_TYPE_ANDROID:
+ case DISCARDABLE_MEMORY_TYPE_ASHMEM:
case DISCARDABLE_MEMORY_TYPE_MAC:
return scoped_ptr<DiscardableMemory>();
case DISCARDABLE_MEMORY_TYPE_EMULATED: {
@@ -61,11 +61,6 @@ scoped_ptr<DiscardableMemory> DiscardableMemory::CreateLockedMemoryWithType(
}
// static
-bool DiscardableMemory::PurgeForTestingSupported() {
- return true;
-}
-
-// static
void DiscardableMemory::PurgeForTesting() {
internal::DiscardableMemoryEmulated::PurgeForTesting();
}