summaryrefslogtreecommitdiffstats
path: root/base/memory
diff options
context:
space:
mode:
authorreveman <reveman@chromium.org>2015-03-11 07:27:45 -0700
committerCommit bot <commit-bot@chromium.org>2015-03-11 14:28:23 +0000
commitfe90db0f1f0351a24e6a5e1f9b7e15299c831408 (patch)
tree96648bb5131ad9880250d68706be0ea730fa0ba0 /base/memory
parent01fe080d5c19d358c10f88673ccdad4a594d2b8a (diff)
downloadchromium_src-fe90db0f1f0351a24e6a5e1f9b7e15299c831408.zip
chromium_src-fe90db0f1f0351a24e6a5e1f9b7e15299c831408.tar.gz
chromium_src-fe90db0f1f0351a24e6a5e1f9b7e15299c831408.tar.bz2
base: Remove DiscardableMemoryAshmem.
Remove unused DiscardableMemory implementation. BUG=442945,422953 Review URL: https://codereview.chromium.org/963883002 Cr-Commit-Position: refs/heads/master@{#320081}
Diffstat (limited to 'base/memory')
-rw-r--r--base/memory/BUILD.gn9
-rw-r--r--base/memory/discardable_memory.cc1
-rw-r--r--base/memory/discardable_memory.h1
-rw-r--r--base/memory/discardable_memory_android.cc46
-rw-r--r--base/memory/discardable_memory_ashmem.cc75
-rw-r--r--base/memory/discardable_memory_ashmem.h55
-rw-r--r--base/memory/discardable_memory_ashmem_allocator.cc528
-rw-r--r--base/memory/discardable_memory_ashmem_allocator.h93
-rw-r--r--base/memory/discardable_memory_ashmem_allocator_unittest.cc319
-rw-r--r--base/memory/discardable_memory_linux.cc1
-rw-r--r--base/memory/discardable_memory_mac.cc1
-rw-r--r--base/memory/discardable_memory_unittest.cc8
-rw-r--r--base/memory/discardable_memory_win.cc1
13 files changed, 7 insertions, 1131 deletions
diff --git a/base/memory/BUILD.gn b/base/memory/BUILD.gn
index 83ce4dd..535202a 100644
--- a/base/memory/BUILD.gn
+++ b/base/memory/BUILD.gn
@@ -44,15 +44,6 @@ source_set("memory") {
"weak_ptr.h",
]
- if (is_android) {
- sources += [
- "discardable_memory_ashmem.cc",
- "discardable_memory_ashmem.h",
- "discardable_memory_ashmem_allocator.cc",
- "discardable_memory_ashmem_allocator.h",
- ]
- }
-
if (is_nacl) {
sources -= [
"discardable_memory.cc",
diff --git a/base/memory/discardable_memory.cc b/base/memory/discardable_memory.cc
index 0772698..c4dcccd 100644
--- a/base/memory/discardable_memory.cc
+++ b/base/memory/discardable_memory.cc
@@ -14,7 +14,6 @@ const struct TypeNamePair {
DiscardableMemoryType type;
const char* name;
} kTypeNamePairs[] = {
- { DISCARDABLE_MEMORY_TYPE_ASHMEM, "ashmem" },
{ DISCARDABLE_MEMORY_TYPE_MACH, "mach" },
{ DISCARDABLE_MEMORY_TYPE_SHMEM, "shmem" }
};
diff --git a/base/memory/discardable_memory.h b/base/memory/discardable_memory.h
index fc1fad2..3e58bde 100644
--- a/base/memory/discardable_memory.h
+++ b/base/memory/discardable_memory.h
@@ -17,7 +17,6 @@ namespace base {
enum DiscardableMemoryType {
DISCARDABLE_MEMORY_TYPE_NONE,
- DISCARDABLE_MEMORY_TYPE_ASHMEM,
DISCARDABLE_MEMORY_TYPE_MACH,
DISCARDABLE_MEMORY_TYPE_SHMEM
};
diff --git a/base/memory/discardable_memory_android.cc b/base/memory/discardable_memory_android.cc
index d58e05c..f502c53 100644
--- a/base/memory/discardable_memory_android.cc
+++ b/base/memory/discardable_memory_android.cc
@@ -4,50 +4,16 @@
#include "base/memory/discardable_memory.h"
-#include "base/basictypes.h"
-#include "base/compiler_specific.h"
-#include "base/lazy_instance.h"
#include "base/logging.h"
-#include "base/memory/discardable_memory_ashmem.h"
-#include "base/memory/discardable_memory_ashmem_allocator.h"
#include "base/memory/discardable_memory_shmem.h"
-#include "base/sys_info.h"
namespace base {
-namespace {
-
-const char kAshmemAllocatorName[] = "DiscardableMemoryAshmemAllocator";
-
-// For Ashmem, have the DiscardableMemoryManager trigger userspace eviction
-// when address space usage gets too high (e.g. 512 MBytes).
-const size_t kAshmemMemoryLimit = 512 * 1024 * 1024;
-
-size_t GetOptimalAshmemRegionSizeForAllocator() {
- // Note that this may do some I/O (without hitting the disk though) so it
- // should not be called on the critical path.
- return base::SysInfo::AmountOfPhysicalMemory() / 8;
-}
-
-// Holds the shared state used for allocations.
-struct SharedState {
- SharedState()
- : manager(kAshmemMemoryLimit, kAshmemMemoryLimit, TimeDelta::Max()),
- allocator(kAshmemAllocatorName,
- GetOptimalAshmemRegionSizeForAllocator()) {}
-
- internal::DiscardableMemoryManager manager;
- internal::DiscardableMemoryAshmemAllocator allocator;
-};
-LazyInstance<SharedState>::Leaky g_shared_state = LAZY_INSTANCE_INITIALIZER;
-
-} // namespace
// static
void DiscardableMemory::GetSupportedTypes(
std::vector<DiscardableMemoryType>* types) {
const DiscardableMemoryType supported_types[] = {
- DISCARDABLE_MEMORY_TYPE_SHMEM,
- DISCARDABLE_MEMORY_TYPE_ASHMEM
+ DISCARDABLE_MEMORY_TYPE_SHMEM
};
types->assign(supported_types, supported_types + arraysize(supported_types));
}
@@ -56,16 +22,6 @@ void DiscardableMemory::GetSupportedTypes(
scoped_ptr<DiscardableMemory> DiscardableMemory::CreateLockedMemoryWithType(
DiscardableMemoryType type, size_t size) {
switch (type) {
- case DISCARDABLE_MEMORY_TYPE_ASHMEM: {
- SharedState* const shared_state = g_shared_state.Pointer();
- scoped_ptr<internal::DiscardableMemoryAshmem> memory(
- new internal::DiscardableMemoryAshmem(
- size, &shared_state->allocator, &shared_state->manager));
- if (!memory->Initialize())
- return nullptr;
-
- return memory.Pass();
- }
case DISCARDABLE_MEMORY_TYPE_SHMEM: {
scoped_ptr<internal::DiscardableMemoryShmem> memory(
new internal::DiscardableMemoryShmem(size));
diff --git a/base/memory/discardable_memory_ashmem.cc b/base/memory/discardable_memory_ashmem.cc
deleted file mode 100644
index a590e53..0000000
--- a/base/memory/discardable_memory_ashmem.cc
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/discardable_memory_ashmem.h"
-
-#include "base/memory/discardable_memory_ashmem_allocator.h"
-
-namespace base {
-namespace internal {
-
-DiscardableMemoryAshmem::DiscardableMemoryAshmem(
- size_t bytes,
- DiscardableMemoryAshmemAllocator* allocator,
- DiscardableMemoryManager* manager)
- : bytes_(bytes),
- allocator_(allocator),
- manager_(manager),
- is_locked_(false) {
- manager_->Register(this, bytes_);
-}
-
-DiscardableMemoryAshmem::~DiscardableMemoryAshmem() {
- if (is_locked_)
- Unlock();
-
- manager_->Unregister(this);
-}
-
-bool DiscardableMemoryAshmem::Initialize() {
- return Lock() != DISCARDABLE_MEMORY_LOCK_STATUS_FAILED;
-}
-
-DiscardableMemoryLockStatus DiscardableMemoryAshmem::Lock() {
- DCHECK(!is_locked_);
-
- bool purged = false;
- if (!manager_->AcquireLock(this, &purged))
- return DISCARDABLE_MEMORY_LOCK_STATUS_FAILED;
-
- is_locked_ = true;
- return purged ? DISCARDABLE_MEMORY_LOCK_STATUS_PURGED
- : DISCARDABLE_MEMORY_LOCK_STATUS_SUCCESS;
-}
-
-void DiscardableMemoryAshmem::Unlock() {
- DCHECK(is_locked_);
- manager_->ReleaseLock(this);
- is_locked_ = false;
-}
-
-void* DiscardableMemoryAshmem::Memory() const {
- DCHECK(is_locked_);
- DCHECK(ashmem_chunk_);
- return ashmem_chunk_->Memory();
-}
-
-bool DiscardableMemoryAshmem::AllocateAndAcquireLock() {
- if (ashmem_chunk_)
- return ashmem_chunk_->Lock();
-
- ashmem_chunk_ = allocator_->Allocate(bytes_);
- return false;
-}
-
-void DiscardableMemoryAshmem::ReleaseLock() {
- ashmem_chunk_->Unlock();
-}
-
-void DiscardableMemoryAshmem::Purge() {
- ashmem_chunk_.reset();
-}
-
-} // namespace internal
-} // namespace base
diff --git a/base/memory/discardable_memory_ashmem.h b/base/memory/discardable_memory_ashmem.h
deleted file mode 100644
index 1269cc2..0000000
--- a/base/memory/discardable_memory_ashmem.h
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MEMORY_DISCARDABLE_MEMORY_ASHMEM_H_
-#define BASE_MEMORY_DISCARDABLE_MEMORY_ASHMEM_H_
-
-#include "base/memory/discardable_memory.h"
-
-#include "base/macros.h"
-#include "base/memory/discardable_memory_manager.h"
-
-namespace base {
-namespace internal {
-
-class DiscardableAshmemChunk;
-class DiscardableMemoryAshmemAllocator;
-class DiscardableMemoryManager;
-
-class DiscardableMemoryAshmem
- : public DiscardableMemory,
- public internal::DiscardableMemoryManagerAllocation {
- public:
- explicit DiscardableMemoryAshmem(size_t bytes,
- DiscardableMemoryAshmemAllocator* allocator,
- DiscardableMemoryManager* manager);
-
- ~DiscardableMemoryAshmem() override;
-
- bool Initialize();
-
- // Overridden from DiscardableMemory:
- DiscardableMemoryLockStatus Lock() override;
- void Unlock() override;
- void* Memory() const override;
-
- // Overridden from internal::DiscardableMemoryManagerAllocation:
- bool AllocateAndAcquireLock() override;
- void ReleaseLock() override;
- void Purge() override;
-
- private:
- const size_t bytes_;
- DiscardableMemoryAshmemAllocator* const allocator_;
- DiscardableMemoryManager* const manager_;
- bool is_locked_;
- scoped_ptr<DiscardableAshmemChunk> ashmem_chunk_;
-
- DISALLOW_COPY_AND_ASSIGN(DiscardableMemoryAshmem);
-};
-
-} // namespace internal
-} // namespace base
-
-#endif // BASE_MEMORY_DISCARDABLE_MEMORY_ASHMEM_H_
diff --git a/base/memory/discardable_memory_ashmem_allocator.cc b/base/memory/discardable_memory_ashmem_allocator.cc
deleted file mode 100644
index 3d4af92..0000000
--- a/base/memory/discardable_memory_ashmem_allocator.cc
+++ /dev/null
@@ -1,528 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/discardable_memory_ashmem_allocator.h"
-
-#include <sys/mman.h>
-#include <unistd.h>
-
-#include <algorithm>
-#include <cmath>
-#include <limits>
-#include <set>
-#include <utility>
-
-#include "base/basictypes.h"
-#include "base/containers/hash_tables.h"
-#include "base/files/file_util.h"
-#include "base/files/scoped_file.h"
-#include "base/logging.h"
-#include "base/memory/scoped_vector.h"
-#include "third_party/ashmem/ashmem.h"
-
-// The allocator consists of three parts (classes):
-// - DiscardableMemoryAshmemAllocator: entry point of all allocations (through
-// its Allocate() method) that are dispatched to the AshmemRegion instances
-// (which it owns).
-// - AshmemRegion: manages allocations and destructions inside a single large
-// (e.g. 32 MBytes) ashmem region.
-// - DiscardableAshmemChunk: class mimicking the DiscardableMemory interface
-// whose instances are returned to the client.
-
-namespace base {
-namespace {
-
-// Only tolerate fragmentation in used chunks *caused by the client* (as opposed
-// to the allocator when a free chunk is reused). The client can cause such
-// fragmentation by e.g. requesting 4097 bytes. This size would be rounded up to
-// 8192 by the allocator which would cause 4095 bytes of fragmentation (which is
-// currently the maximum allowed). If the client requests 4096 bytes and a free
-// chunk of 8192 bytes is available then the free chunk gets splitted into two
-// pieces to minimize fragmentation (since 8192 - 4096 = 4096 which is greater
-// than 4095).
-// TODO(pliard): tune this if splitting chunks too often leads to performance
-// issues.
-const size_t kMaxChunkFragmentationBytes = 4096 - 1;
-
-const size_t kMinAshmemRegionSize = 32 * 1024 * 1024;
-
-// Returns 0 if the provided size is too high to be aligned.
-size_t AlignToNextPage(size_t size) {
- const size_t kPageSize = 4096;
- DCHECK_EQ(static_cast<int>(kPageSize), getpagesize());
- if (size > std::numeric_limits<size_t>::max() - kPageSize + 1)
- return 0;
- const size_t mask = ~(kPageSize - 1);
- return (size + kPageSize - 1) & mask;
-}
-
-bool CreateAshmemRegion(const char* name,
- size_t size,
- int* out_fd,
- uintptr_t* out_address) {
- base::ScopedFD fd(ashmem_create_region(name, size));
- if (!fd.is_valid()) {
- DLOG(ERROR) << "ashmem_create_region() failed";
- return false;
- }
-
- const int err = ashmem_set_prot_region(fd.get(), PROT_READ | PROT_WRITE);
- if (err < 0) {
- DLOG(ERROR) << "Error " << err << " when setting protection of ashmem";
- return false;
- }
-
- // There is a problem using MAP_PRIVATE here. As we are constantly calling
- // Lock() and Unlock(), data could get lost if they are not written to the
- // underlying file when Unlock() gets called.
- void* const address = mmap(
- NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd.get(), 0);
- if (address == MAP_FAILED) {
- DPLOG(ERROR) << "Failed to map memory.";
- return false;
- }
-
- *out_fd = fd.release();
- *out_address = reinterpret_cast<uintptr_t>(address);
- return true;
-}
-
-bool CloseAshmemRegion(int fd, size_t size, void* address) {
- if (munmap(address, size) == -1) {
- DPLOG(ERROR) << "Failed to unmap memory.";
- close(fd);
- return false;
- }
- return close(fd) == 0;
-}
-
-bool LockAshmemRegion(int fd, size_t off, size_t size) {
- return ashmem_pin_region(fd, off, size) != ASHMEM_WAS_PURGED;
-}
-
-bool UnlockAshmemRegion(int fd, size_t off, size_t size) {
- const int failed = ashmem_unpin_region(fd, off, size);
- if (failed)
- DLOG(ERROR) << "Failed to unpin memory.";
- return !failed;
-}
-
-} // namespace
-
-namespace internal {
-
-class AshmemRegion {
- public:
- // Note that |allocator| must outlive |this|.
- static scoped_ptr<AshmemRegion> Create(
- size_t size,
- const std::string& name,
- DiscardableMemoryAshmemAllocator* allocator) {
- DCHECK_EQ(size, AlignToNextPage(size));
- int fd;
- uintptr_t base;
- if (!CreateAshmemRegion(name.c_str(), size, &fd, &base))
- return scoped_ptr<AshmemRegion>();
- return make_scoped_ptr(new AshmemRegion(fd, size, base, allocator));
- }
-
- ~AshmemRegion() {
- const bool result = CloseAshmemRegion(
- fd_, size_, reinterpret_cast<void*>(base_));
- DCHECK(result);
- DCHECK(!highest_allocated_chunk_);
- }
-
- // Returns a new instance of DiscardableAshmemChunk whose size is greater or
- // equal than |actual_size| (which is expected to be greater or equal than
- // |client_requested_size|).
- // Allocation works as follows:
- // 1) Reuse a previously freed chunk and return it if it succeeded. See
- // ReuseFreeChunk_Locked() below for more information.
- // 2) If no free chunk could be reused and the region is not big enough for
- // the requested size then NULL is returned.
- // 3) If there is enough room in the ashmem region then a new chunk is
- // returned. This new chunk starts at |offset_| which is the end of the
- // previously highest chunk in the region.
- scoped_ptr<DiscardableAshmemChunk> Allocate_Locked(
- size_t client_requested_size,
- size_t actual_size) {
- DCHECK_LE(client_requested_size, actual_size);
- allocator_->lock_.AssertAcquired();
-
- // Check that the |highest_allocated_chunk_| field doesn't contain a stale
- // pointer. It should point to either a free chunk or a used chunk.
- DCHECK(!highest_allocated_chunk_ ||
- address_to_free_chunk_map_.find(highest_allocated_chunk_) !=
- address_to_free_chunk_map_.end() ||
- used_to_previous_chunk_map_.find(highest_allocated_chunk_) !=
- used_to_previous_chunk_map_.end());
-
- scoped_ptr<DiscardableAshmemChunk> memory = ReuseFreeChunk_Locked(
- client_requested_size, actual_size);
- if (memory)
- return memory.Pass();
-
- if (size_ - offset_ < actual_size) {
- // This region does not have enough space left to hold the requested size.
- return scoped_ptr<DiscardableAshmemChunk>();
- }
-
- uintptr_t const address = base_ + offset_;
- memory.reset(
- new DiscardableAshmemChunk(this, fd_, reinterpret_cast<void*>(address),
- offset_, actual_size));
-
- used_to_previous_chunk_map_.insert(
- std::make_pair(address, highest_allocated_chunk_));
- highest_allocated_chunk_ = reinterpret_cast<uintptr_t>(address);
- offset_ += actual_size;
- DCHECK_LE(offset_, size_);
- return memory.Pass();
- }
-
- void OnChunkDeletion(uintptr_t chunk, size_t size) {
- AutoLock auto_lock(allocator_->lock_);
- MergeAndAddFreeChunk_Locked(chunk, size);
- // Note that |this| might be deleted beyond this point.
- }
-
- private:
- struct FreeChunk {
- FreeChunk() : previous_chunk(0), start(0), size(0) {}
-
- explicit FreeChunk(size_t size)
- : previous_chunk(0),
- start(0),
- size(size) {
- }
-
- FreeChunk(uintptr_t previous_chunk, uintptr_t start, size_t size)
- : previous_chunk(previous_chunk),
- start(start),
- size(size) {
- DCHECK_LT(previous_chunk, start);
- }
-
- uintptr_t const previous_chunk;
- uintptr_t const start;
- const size_t size;
-
- bool is_null() const { return !start; }
-
- bool operator<(const FreeChunk& other) const {
- return size < other.size;
- }
- };
-
- // Note that |allocator| must outlive |this|.
- AshmemRegion(int fd,
- size_t size,
- uintptr_t base,
- DiscardableMemoryAshmemAllocator* allocator)
- : fd_(fd),
- size_(size),
- base_(base),
- allocator_(allocator),
- highest_allocated_chunk_(0),
- offset_(0) {
- DCHECK_GE(fd_, 0);
- DCHECK_GE(size, kMinAshmemRegionSize);
- DCHECK(base);
- DCHECK(allocator);
- }
-
- // Tries to reuse a previously freed chunk by doing a closest size match.
- scoped_ptr<DiscardableAshmemChunk> ReuseFreeChunk_Locked(
- size_t client_requested_size,
- size_t actual_size) {
- allocator_->lock_.AssertAcquired();
- const FreeChunk reused_chunk = RemoveFreeChunkFromIterator_Locked(
- free_chunks_.lower_bound(FreeChunk(actual_size)));
- if (reused_chunk.is_null())
- return scoped_ptr<DiscardableAshmemChunk>();
-
- used_to_previous_chunk_map_.insert(
- std::make_pair(reused_chunk.start, reused_chunk.previous_chunk));
- size_t reused_chunk_size = reused_chunk.size;
- // |client_requested_size| is used below rather than |actual_size| to
- // reflect the amount of bytes that would not be usable by the client (i.e.
- // wasted). Using |actual_size| instead would not allow us to detect
- // fragmentation caused by the client if he did misaligned allocations.
- DCHECK_GE(reused_chunk.size, client_requested_size);
- const size_t fragmentation_bytes =
- reused_chunk.size - client_requested_size;
-
- if (fragmentation_bytes > kMaxChunkFragmentationBytes) {
- // Split the free chunk being recycled so that its unused tail doesn't get
- // reused (i.e. locked) which would prevent it from being evicted under
- // memory pressure.
- reused_chunk_size = actual_size;
- uintptr_t const new_chunk_start = reused_chunk.start + actual_size;
- if (reused_chunk.start == highest_allocated_chunk_) {
- // We also need to update the pointer to the highest allocated chunk in
- // case we are splitting the highest chunk.
- highest_allocated_chunk_ = new_chunk_start;
- }
- DCHECK_GT(reused_chunk.size, actual_size);
- const size_t new_chunk_size = reused_chunk.size - actual_size;
- // Note that merging is not needed here since there can't be contiguous
- // free chunks at this point.
- AddFreeChunk_Locked(
- FreeChunk(reused_chunk.start, new_chunk_start, new_chunk_size));
- }
-
- const size_t offset = reused_chunk.start - base_;
- LockAshmemRegion(fd_, offset, reused_chunk_size);
- scoped_ptr<DiscardableAshmemChunk> memory(
- new DiscardableAshmemChunk(this, fd_,
- reinterpret_cast<void*>(reused_chunk.start),
- offset, reused_chunk_size));
- return memory.Pass();
- }
-
- // Makes the chunk identified with the provided arguments free and possibly
- // merges this chunk with the previous and next contiguous ones.
- // If the provided chunk is the only one used (and going to be freed) in the
- // region then the internal ashmem region is closed so that the underlying
- // physical pages are immediately released.
- // Note that free chunks are unlocked therefore they can be reclaimed by the
- // kernel if needed (under memory pressure) but they are not immediately
- // released unfortunately since madvise(MADV_REMOVE) and
- // fallocate(FALLOC_FL_PUNCH_HOLE) don't seem to work on ashmem. This might
- // change in versions of kernel >=3.5 though. The fact that free chunks are
- // not immediately released is the reason why we are trying to minimize
- // fragmentation in order not to cause "artificial" memory pressure.
- void MergeAndAddFreeChunk_Locked(uintptr_t chunk, size_t size) {
- allocator_->lock_.AssertAcquired();
- size_t new_free_chunk_size = size;
- // Merge with the previous chunk.
- uintptr_t first_free_chunk = chunk;
- DCHECK(!used_to_previous_chunk_map_.empty());
- const hash_map<uintptr_t, uintptr_t>::iterator previous_chunk_it =
- used_to_previous_chunk_map_.find(chunk);
- DCHECK(previous_chunk_it != used_to_previous_chunk_map_.end());
- uintptr_t previous_chunk = previous_chunk_it->second;
- used_to_previous_chunk_map_.erase(previous_chunk_it);
-
- if (previous_chunk) {
- const FreeChunk free_chunk = RemoveFreeChunk_Locked(previous_chunk);
- if (!free_chunk.is_null()) {
- new_free_chunk_size += free_chunk.size;
- first_free_chunk = previous_chunk;
- if (chunk == highest_allocated_chunk_)
- highest_allocated_chunk_ = previous_chunk;
-
- // There should not be more contiguous previous free chunks.
- previous_chunk = free_chunk.previous_chunk;
- DCHECK(!address_to_free_chunk_map_.count(previous_chunk));
- }
- }
-
- // Merge with the next chunk if free and present.
- uintptr_t next_chunk = chunk + size;
- const FreeChunk next_free_chunk = RemoveFreeChunk_Locked(next_chunk);
- if (!next_free_chunk.is_null()) {
- new_free_chunk_size += next_free_chunk.size;
- if (next_free_chunk.start == highest_allocated_chunk_)
- highest_allocated_chunk_ = first_free_chunk;
-
- // Same as above.
- DCHECK(
- !address_to_free_chunk_map_.count(next_chunk + next_free_chunk.size));
- }
-
- const bool whole_ashmem_region_is_free =
- used_to_previous_chunk_map_.empty();
- if (!whole_ashmem_region_is_free) {
- AddFreeChunk_Locked(
- FreeChunk(previous_chunk, first_free_chunk, new_free_chunk_size));
- return;
- }
-
- // The whole ashmem region is free thus it can be deleted.
- DCHECK_EQ(base_, first_free_chunk);
- DCHECK_EQ(base_, highest_allocated_chunk_);
- DCHECK(free_chunks_.empty());
- DCHECK(address_to_free_chunk_map_.empty());
- DCHECK(used_to_previous_chunk_map_.empty());
- highest_allocated_chunk_ = 0;
- allocator_->DeleteAshmemRegion_Locked(this); // Deletes |this|.
- }
-
- void AddFreeChunk_Locked(const FreeChunk& free_chunk) {
- allocator_->lock_.AssertAcquired();
- const std::multiset<FreeChunk>::iterator it = free_chunks_.insert(
- free_chunk);
- address_to_free_chunk_map_.insert(std::make_pair(free_chunk.start, it));
- // Update the next used contiguous chunk, if any, since its previous chunk
- // may have changed due to free chunks merging/splitting.
- uintptr_t const next_used_contiguous_chunk =
- free_chunk.start + free_chunk.size;
- hash_map<uintptr_t, uintptr_t>::iterator previous_it =
- used_to_previous_chunk_map_.find(next_used_contiguous_chunk);
- if (previous_it != used_to_previous_chunk_map_.end())
- previous_it->second = free_chunk.start;
- }
-
- // Finds and removes the free chunk, if any, whose start address is
- // |chunk_start|. Returns a copy of the unlinked free chunk or a free chunk
- // whose content is null if it was not found.
- FreeChunk RemoveFreeChunk_Locked(uintptr_t chunk_start) {
- allocator_->lock_.AssertAcquired();
- const hash_map<
- uintptr_t, std::multiset<FreeChunk>::iterator>::iterator it =
- address_to_free_chunk_map_.find(chunk_start);
- if (it == address_to_free_chunk_map_.end())
- return FreeChunk();
- return RemoveFreeChunkFromIterator_Locked(it->second);
- }
-
- // Same as above but takes an iterator in.
- FreeChunk RemoveFreeChunkFromIterator_Locked(
- std::multiset<FreeChunk>::iterator free_chunk_it) {
- allocator_->lock_.AssertAcquired();
- if (free_chunk_it == free_chunks_.end())
- return FreeChunk();
- DCHECK(free_chunk_it != free_chunks_.end());
- const FreeChunk free_chunk(*free_chunk_it);
- address_to_free_chunk_map_.erase(free_chunk_it->start);
- free_chunks_.erase(free_chunk_it);
- return free_chunk;
- }
-
- const int fd_;
- const size_t size_;
- uintptr_t const base_;
- DiscardableMemoryAshmemAllocator* const allocator_;
- // Points to the chunk with the highest address in the region. This pointer
- // needs to be carefully updated when chunks are merged/split.
- uintptr_t highest_allocated_chunk_;
- // Points to the end of |highest_allocated_chunk_|.
- size_t offset_;
- // Allows free chunks recycling (lookup, insertion and removal) in O(log N).
- // Note that FreeChunk values are indexed by their size and also note that
- // multiple free chunks can have the same size (which is why multiset<> is
- // used instead of e.g. set<>).
- std::multiset<FreeChunk> free_chunks_;
- // Used while merging free contiguous chunks to erase free chunks (from their
- // start address) in constant time. Note that multiset<>::{insert,erase}()
- // don't invalidate iterators (except the one for the element being removed
- // obviously).
- hash_map<
- uintptr_t, std::multiset<FreeChunk>::iterator> address_to_free_chunk_map_;
- // Maps the address of *used* chunks to the address of their previous
- // contiguous chunk.
- hash_map<uintptr_t, uintptr_t> used_to_previous_chunk_map_;
-
- DISALLOW_COPY_AND_ASSIGN(AshmemRegion);
-};
-
-DiscardableAshmemChunk::~DiscardableAshmemChunk() {
- if (locked_)
- UnlockAshmemRegion(fd_, offset_, size_);
- ashmem_region_->OnChunkDeletion(reinterpret_cast<uintptr_t>(address_), size_);
-}
-
-bool DiscardableAshmemChunk::Lock() {
- DCHECK(!locked_);
- locked_ = true;
- return LockAshmemRegion(fd_, offset_, size_);
-}
-
-void DiscardableAshmemChunk::Unlock() {
- DCHECK(locked_);
- locked_ = false;
- UnlockAshmemRegion(fd_, offset_, size_);
-}
-
-void* DiscardableAshmemChunk::Memory() const {
- return address_;
-}
-
-// Note that |ashmem_region| must outlive |this|.
-DiscardableAshmemChunk::DiscardableAshmemChunk(AshmemRegion* ashmem_region,
- int fd,
- void* address,
- size_t offset,
- size_t size)
- : ashmem_region_(ashmem_region),
- fd_(fd),
- address_(address),
- offset_(offset),
- size_(size),
- locked_(true) {
-}
-
-DiscardableMemoryAshmemAllocator::DiscardableMemoryAshmemAllocator(
- const std::string& name,
- size_t ashmem_region_size)
- : name_(name),
- ashmem_region_size_(
- std::max(kMinAshmemRegionSize, AlignToNextPage(ashmem_region_size))),
- last_ashmem_region_size_(0) {
- DCHECK_GE(ashmem_region_size_, kMinAshmemRegionSize);
-}
-
-DiscardableMemoryAshmemAllocator::~DiscardableMemoryAshmemAllocator() {
- DCHECK(ashmem_regions_.empty());
-}
-
-scoped_ptr<DiscardableAshmemChunk> DiscardableMemoryAshmemAllocator::Allocate(
- size_t size) {
- const size_t aligned_size = AlignToNextPage(size);
- if (!aligned_size)
- return scoped_ptr<DiscardableAshmemChunk>();
- // TODO(pliard): make this function less naive by e.g. moving the free chunks
- // multiset to the allocator itself in order to decrease even more
- // fragmentation/speedup allocation. Note that there should not be more than a
- // couple (=5) of AshmemRegion instances in practice though.
- AutoLock auto_lock(lock_);
- DCHECK_LE(ashmem_regions_.size(), 5U);
- for (ScopedVector<AshmemRegion>::iterator it = ashmem_regions_.begin();
- it != ashmem_regions_.end(); ++it) {
- scoped_ptr<DiscardableAshmemChunk> memory(
- (*it)->Allocate_Locked(size, aligned_size));
- if (memory)
- return memory.Pass();
- }
- // The creation of the (large) ashmem region might fail if the address space
- // is too fragmented. In case creation fails the allocator retries by
- // repetitively dividing the size by 2.
- const size_t min_region_size = std::max(kMinAshmemRegionSize, aligned_size);
- for (size_t region_size = std::max(ashmem_region_size_, aligned_size);
- region_size >= min_region_size;
- region_size = AlignToNextPage(region_size / 2)) {
- scoped_ptr<AshmemRegion> new_region(
- AshmemRegion::Create(region_size, name_.c_str(), this));
- if (!new_region)
- continue;
- last_ashmem_region_size_ = region_size;
- ashmem_regions_.push_back(new_region.release());
- return ashmem_regions_.back()->Allocate_Locked(size, aligned_size);
- }
- // TODO(pliard): consider adding an histogram to see how often this happens.
- return scoped_ptr<DiscardableAshmemChunk>();
-}
-
-size_t DiscardableMemoryAshmemAllocator::last_ashmem_region_size() const {
- AutoLock auto_lock(lock_);
- return last_ashmem_region_size_;
-}
-
-void DiscardableMemoryAshmemAllocator::DeleteAshmemRegion_Locked(
- AshmemRegion* region) {
- lock_.AssertAcquired();
- // Note that there should not be more than a couple of ashmem region instances
- // in |ashmem_regions_|.
- DCHECK_LE(ashmem_regions_.size(), 5U);
- const ScopedVector<AshmemRegion>::iterator it = std::find(
- ashmem_regions_.begin(), ashmem_regions_.end(), region);
- DCHECK(ashmem_regions_.end() != it);
- std::swap(*it, ashmem_regions_.back());
- ashmem_regions_.pop_back();
-}
-
-} // namespace internal
-} // namespace base
diff --git a/base/memory/discardable_memory_ashmem_allocator.h b/base/memory/discardable_memory_ashmem_allocator.h
deleted file mode 100644
index 996dde9..0000000
--- a/base/memory/discardable_memory_ashmem_allocator.h
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MEMORY_DISCARDABLE_MEMORY_ASHMEM_ALLOCATOR_H_
-#define BASE_MEMORY_DISCARDABLE_MEMORY_ASHMEM_ALLOCATOR_H_
-
-#include <string>
-
-#include "base/base_export.h"
-#include "base/basictypes.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/memory/scoped_vector.h"
-#include "base/synchronization/lock.h"
-
-namespace base {
-namespace internal {
-
-class AshmemRegion;
-
-// Internal class, whose instances are returned to the client of the allocator
-// (e.g. DiscardableMemoryAshmem), that mimicks the DiscardableMemory interface.
-class BASE_EXPORT_PRIVATE DiscardableAshmemChunk {
- public:
- ~DiscardableAshmemChunk();
-
- // Returns whether the memory is still resident.
- bool Lock();
-
- void Unlock();
-
- void* Memory() const;
-
- private:
- friend class AshmemRegion;
-
- DiscardableAshmemChunk(AshmemRegion* ashmem_region,
- int fd,
- void* address,
- size_t offset,
- size_t size);
-
- AshmemRegion* const ashmem_region_;
- const int fd_;
- void* const address_;
- const size_t offset_;
- const size_t size_;
- bool locked_;
-
- DISALLOW_COPY_AND_ASSIGN(DiscardableAshmemChunk);
-};
-
-// Ashmem regions are backed by a file (descriptor) therefore they are a limited
-// resource. This allocator minimizes the problem by allocating large ashmem
-// regions internally and returning smaller chunks to the client.
-// Allocated chunks are systematically aligned on a page boundary therefore this
-// allocator should not be used for small allocations.
-class BASE_EXPORT_PRIVATE DiscardableMemoryAshmemAllocator {
- public:
- // Note that |name| is only used for debugging/measurement purposes.
- // |ashmem_region_size| is the size that will be used to create the underlying
- // ashmem regions and is expected to be greater or equal than 32 MBytes.
- DiscardableMemoryAshmemAllocator(const std::string& name,
- size_t ashmem_region_size);
-
- ~DiscardableMemoryAshmemAllocator();
-
- // Note that the allocator must outlive the returned DiscardableAshmemChunk
- // instance.
- scoped_ptr<DiscardableAshmemChunk> Allocate(size_t size);
-
- // Returns the size of the last ashmem region which was created. This is used
- // for testing only.
- size_t last_ashmem_region_size() const;
-
- private:
- friend class AshmemRegion;
-
- void DeleteAshmemRegion_Locked(AshmemRegion* region);
-
- const std::string name_;
- const size_t ashmem_region_size_;
- mutable Lock lock_;
- size_t last_ashmem_region_size_;
- ScopedVector<AshmemRegion> ashmem_regions_;
-
- DISALLOW_COPY_AND_ASSIGN(DiscardableMemoryAshmemAllocator);
-};
-
-} // namespace internal
-} // namespace base
-
-#endif // BASE_MEMORY_DISCARDABLE_MEMORY_ASHMEM_ALLOCATOR_H_
diff --git a/base/memory/discardable_memory_ashmem_allocator_unittest.cc b/base/memory/discardable_memory_ashmem_allocator_unittest.cc
deleted file mode 100644
index e6f2fea..0000000
--- a/base/memory/discardable_memory_ashmem_allocator_unittest.cc
+++ /dev/null
@@ -1,319 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/discardable_memory_ashmem_allocator.h"
-
-#include <sys/types.h>
-#include <unistd.h>
-
-#include "base/memory/discardable_memory.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/strings/string_number_conversions.h"
-#include "base/strings/string_split.h"
-#include "base/strings/stringprintf.h"
-#include "build/build_config.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace base {
-namespace internal {
-
-const char kAllocatorName[] = "allocator-for-testing";
-
-const size_t kAshmemRegionSizeForTesting = 32 * 1024 * 1024;
-const size_t kPageSize = 4096;
-
-const size_t kMaxAllowedAllocationSize =
- std::numeric_limits<size_t>::max() - kPageSize + 1;
-
-class DiscardableMemoryAshmemAllocatorTest : public testing::Test {
- protected:
- DiscardableMemoryAshmemAllocatorTest()
- : allocator_(kAllocatorName, kAshmemRegionSizeForTesting) {
- }
-
- DiscardableMemoryAshmemAllocator allocator_;
-};
-
-void WriteToDiscardableAshmemChunk(DiscardableAshmemChunk* memory,
- size_t size) {
- // Write to the first and the last pages only to avoid paging in up to 64
- // MBytes.
- static_cast<char*>(memory->Memory())[0] = 'a';
- static_cast<char*>(memory->Memory())[size - 1] = 'a';
-}
-
-TEST_F(DiscardableMemoryAshmemAllocatorTest, Basic) {
- const size_t size = 128;
- scoped_ptr<DiscardableAshmemChunk> memory(allocator_.Allocate(size));
- ASSERT_TRUE(memory);
- WriteToDiscardableAshmemChunk(memory.get(), size);
-}
-
-TEST_F(DiscardableMemoryAshmemAllocatorTest, ZeroAllocationIsNotSupported) {
- scoped_ptr<DiscardableAshmemChunk> memory(allocator_.Allocate(0));
- ASSERT_FALSE(memory);
-}
-
-TEST_F(DiscardableMemoryAshmemAllocatorTest, TooLargeAllocationFails) {
- scoped_ptr<DiscardableAshmemChunk> memory(
- allocator_.Allocate(kMaxAllowedAllocationSize + 1));
- // Page-alignment would have caused an overflow resulting in a small
- // allocation if the input size wasn't checked correctly.
- ASSERT_FALSE(memory);
-}
-
-TEST_F(DiscardableMemoryAshmemAllocatorTest,
- AshmemRegionsAreNotSmallerThanRequestedSize) {
- // The creation of the underlying ashmem region is expected to fail since
- // there should not be enough room in the address space. When ashmem creation
- // fails, the allocator repetitively retries by dividing the size by 2. This
- // size should not be smaller than the size the user requested so the
- // allocation here should just fail (and not succeed with the minimum ashmem
- // region size).
- scoped_ptr<DiscardableAshmemChunk> memory(
- allocator_.Allocate(kMaxAllowedAllocationSize));
- ASSERT_FALSE(memory);
-}
-
-TEST_F(DiscardableMemoryAshmemAllocatorTest,
- AshmemRegionsAreAlwaysPageAligned) {
- // Use a separate allocator here so that we can override the ashmem region
- // size.
- DiscardableMemoryAshmemAllocator allocator(
- kAllocatorName, kMaxAllowedAllocationSize);
- scoped_ptr<DiscardableAshmemChunk> memory(allocator.Allocate(kPageSize));
- ASSERT_TRUE(memory);
- EXPECT_GT(kMaxAllowedAllocationSize, allocator.last_ashmem_region_size());
- ASSERT_EQ(allocator.last_ashmem_region_size() % kPageSize, 0u);
-}
-
-TEST_F(DiscardableMemoryAshmemAllocatorTest, LargeAllocation) {
- const size_t size = 64 * 1024 * 1024;
- scoped_ptr<DiscardableAshmemChunk> memory(allocator_.Allocate(size));
- ASSERT_TRUE(memory);
- WriteToDiscardableAshmemChunk(memory.get(), size);
-}
-
-TEST_F(DiscardableMemoryAshmemAllocatorTest, ChunksArePageAligned) {
- scoped_ptr<DiscardableAshmemChunk> memory(allocator_.Allocate(kPageSize));
- ASSERT_TRUE(memory);
- EXPECT_EQ(0U, reinterpret_cast<uint64_t>(memory->Memory()) % kPageSize);
- WriteToDiscardableAshmemChunk(memory.get(), kPageSize);
-}
-
-TEST_F(DiscardableMemoryAshmemAllocatorTest, AllocateFreeAllocate) {
- scoped_ptr<DiscardableAshmemChunk> memory(allocator_.Allocate(kPageSize));
- // Extra allocation that prevents the region from being deleted when |memory|
- // gets deleted.
- scoped_ptr<DiscardableAshmemChunk> memory_lock(
- allocator_.Allocate(kPageSize));
- ASSERT_TRUE(memory);
- void* const address = memory->Memory();
- memory->Unlock(); // Tests that the reused chunk is being locked correctly.
- memory.reset();
- memory = allocator_.Allocate(kPageSize);
- ASSERT_TRUE(memory);
- // The previously freed chunk should be reused.
- EXPECT_EQ(address, memory->Memory());
- WriteToDiscardableAshmemChunk(memory.get(), kPageSize);
-}
-
-TEST_F(DiscardableMemoryAshmemAllocatorTest,
- FreeingWholeAshmemRegionClosesAshmem) {
- scoped_ptr<DiscardableAshmemChunk> memory(allocator_.Allocate(kPageSize));
- ASSERT_TRUE(memory);
- const int kMagic = 0xdeadbeef;
- *static_cast<int*>(memory->Memory()) = kMagic;
- memory.reset();
- // The previous ashmem region should have been closed thus it should not be
- // reused.
- memory = allocator_.Allocate(kPageSize);
- ASSERT_TRUE(memory);
- EXPECT_NE(kMagic, *static_cast<const int*>(memory->Memory()));
-}
-
-TEST_F(DiscardableMemoryAshmemAllocatorTest, AllocateUsesBestFitAlgorithm) {
- scoped_ptr<DiscardableAshmemChunk> memory1(
- allocator_.Allocate(3 * kPageSize));
- ASSERT_TRUE(memory1);
- scoped_ptr<DiscardableAshmemChunk> memory2(
- allocator_.Allocate(2 * kPageSize));
- ASSERT_TRUE(memory2);
- scoped_ptr<DiscardableAshmemChunk> memory3(
- allocator_.Allocate(1 * kPageSize));
- ASSERT_TRUE(memory3);
- void* const address_3 = memory3->Memory();
- memory1.reset();
- // Don't free |memory2| to avoid merging the 3 blocks together.
- memory3.reset();
- memory1 = allocator_.Allocate(1 * kPageSize);
- ASSERT_TRUE(memory1);
- // The chunk whose size is closest to the requested size should be reused.
- EXPECT_EQ(address_3, memory1->Memory());
- WriteToDiscardableAshmemChunk(memory1.get(), kPageSize);
-}
-
-TEST_F(DiscardableMemoryAshmemAllocatorTest, MergeFreeChunks) {
- scoped_ptr<DiscardableAshmemChunk> memory1(allocator_.Allocate(kPageSize));
- ASSERT_TRUE(memory1);
- scoped_ptr<DiscardableAshmemChunk> memory2(allocator_.Allocate(kPageSize));
- ASSERT_TRUE(memory2);
- scoped_ptr<DiscardableAshmemChunk> memory3(allocator_.Allocate(kPageSize));
- ASSERT_TRUE(memory3);
- scoped_ptr<DiscardableAshmemChunk> memory4(allocator_.Allocate(kPageSize));
- ASSERT_TRUE(memory4);
- void* const memory1_address = memory1->Memory();
- memory1.reset();
- memory3.reset();
- // Freeing |memory2| (located between memory1 and memory3) should merge the
- // three free blocks together.
- memory2.reset();
- memory1 = allocator_.Allocate(3 * kPageSize);
- EXPECT_EQ(memory1_address, memory1->Memory());
-}
-
-TEST_F(DiscardableMemoryAshmemAllocatorTest, MergeFreeChunksAdvanced) {
- scoped_ptr<DiscardableAshmemChunk> memory1(
- allocator_.Allocate(4 * kPageSize));
- ASSERT_TRUE(memory1);
- scoped_ptr<DiscardableAshmemChunk> memory2(
- allocator_.Allocate(4 * kPageSize));
- ASSERT_TRUE(memory2);
- void* const memory1_address = memory1->Memory();
- memory1.reset();
- memory1 = allocator_.Allocate(2 * kPageSize);
- memory2.reset();
- // At this point, the region should be in this state:
- // 8 KBytes (used), 24 KBytes (free).
- memory2 = allocator_.Allocate(6 * kPageSize);
- EXPECT_EQ(
- static_cast<const char*>(memory2->Memory()),
- static_cast<const char*>(memory1_address) + 2 * kPageSize);
-}
-
-TEST_F(DiscardableMemoryAshmemAllocatorTest, MergeFreeChunksAdvanced2) {
- scoped_ptr<DiscardableAshmemChunk> memory1(
- allocator_.Allocate(4 * kPageSize));
- ASSERT_TRUE(memory1);
- scoped_ptr<DiscardableAshmemChunk> memory2(
- allocator_.Allocate(4 * kPageSize));
- ASSERT_TRUE(memory2);
- void* const memory1_address = memory1->Memory();
- memory1.reset();
- memory1 = allocator_.Allocate(2 * kPageSize);
- scoped_ptr<DiscardableAshmemChunk> memory3(
- allocator_.Allocate(2 * kPageSize));
- // At this point, the region should be in this state:
- // 8 KBytes (used), 8 KBytes (used), 16 KBytes (used).
- memory3.reset();
- memory2.reset();
- // At this point, the region should be in this state:
- // 8 KBytes (used), 24 KBytes (free).
- memory2 = allocator_.Allocate(6 * kPageSize);
- EXPECT_EQ(
- static_cast<const char*>(memory2->Memory()),
- static_cast<const char*>(memory1_address) + 2 * kPageSize);
-}
-
-TEST_F(DiscardableMemoryAshmemAllocatorTest,
- MergeFreeChunksAndDeleteAshmemRegion) {
- scoped_ptr<DiscardableAshmemChunk> memory1(
- allocator_.Allocate(4 * kPageSize));
- ASSERT_TRUE(memory1);
- scoped_ptr<DiscardableAshmemChunk> memory2(
- allocator_.Allocate(4 * kPageSize));
- ASSERT_TRUE(memory2);
- memory1.reset();
- memory1 = allocator_.Allocate(2 * kPageSize);
- scoped_ptr<DiscardableAshmemChunk> memory3(
- allocator_.Allocate(2 * kPageSize));
- // At this point, the region should be in this state:
- // 8 KBytes (used), 8 KBytes (used), 16 KBytes (used).
- memory1.reset();
- memory3.reset();
- // At this point, the region should be in this state:
- // 8 KBytes (free), 8 KBytes (used), 8 KBytes (free).
- const int kMagic = 0xdeadbeef;
- *static_cast<int*>(memory2->Memory()) = kMagic;
- memory2.reset();
- // The whole region should have been deleted.
- memory2 = allocator_.Allocate(2 * kPageSize);
- EXPECT_NE(kMagic, *static_cast<int*>(memory2->Memory()));
-}
-
-TEST_F(DiscardableMemoryAshmemAllocatorTest,
- TooLargeFreeChunksDontCauseTooMuchFragmentationWhenRecycled) {
- // Keep |memory_1| below allocated so that the ashmem region doesn't get
- // closed when |memory_2| is deleted.
- scoped_ptr<DiscardableAshmemChunk> memory_1(allocator_.Allocate(64 * 1024));
- ASSERT_TRUE(memory_1);
- scoped_ptr<DiscardableAshmemChunk> memory_2(allocator_.Allocate(32 * 1024));
- ASSERT_TRUE(memory_2);
- void* const address = memory_2->Memory();
- memory_2.reset();
- const size_t size = 16 * 1024;
- memory_2 = allocator_.Allocate(size);
- ASSERT_TRUE(memory_2);
- EXPECT_EQ(address, memory_2->Memory());
- WriteToDiscardableAshmemChunk(memory_2.get(), size);
- scoped_ptr<DiscardableAshmemChunk> memory_3(allocator_.Allocate(size));
- // The unused tail (16 KBytes large) of the previously freed chunk should be
- // reused.
- EXPECT_EQ(static_cast<char*>(address) + size, memory_3->Memory());
- WriteToDiscardableAshmemChunk(memory_3.get(), size);
-}
-
-TEST_F(DiscardableMemoryAshmemAllocatorTest, UseMultipleAshmemRegions) {
- // Leave one page untouched at the end of the ashmem region.
- const size_t size = kAshmemRegionSizeForTesting - kPageSize;
- scoped_ptr<DiscardableAshmemChunk> memory1(allocator_.Allocate(size));
- ASSERT_TRUE(memory1);
- WriteToDiscardableAshmemChunk(memory1.get(), size);
-
- scoped_ptr<DiscardableAshmemChunk> memory2(
- allocator_.Allocate(kAshmemRegionSizeForTesting));
- ASSERT_TRUE(memory2);
- WriteToDiscardableAshmemChunk(memory2.get(), kAshmemRegionSizeForTesting);
- // The last page of the first ashmem region should be used for this
- // allocation.
- scoped_ptr<DiscardableAshmemChunk> memory3(allocator_.Allocate(kPageSize));
- ASSERT_TRUE(memory3);
- WriteToDiscardableAshmemChunk(memory3.get(), kPageSize);
- EXPECT_EQ(memory3->Memory(), static_cast<char*>(memory1->Memory()) + size);
-}
-
-TEST_F(DiscardableMemoryAshmemAllocatorTest,
- HighestAllocatedChunkPointerIsUpdatedWhenHighestChunkGetsSplit) {
- // Prevents the ashmem region from getting closed when |memory2| gets freed.
- scoped_ptr<DiscardableAshmemChunk> memory1(allocator_.Allocate(kPageSize));
- ASSERT_TRUE(memory1);
-
- scoped_ptr<DiscardableAshmemChunk> memory2(
- allocator_.Allocate(4 * kPageSize));
- ASSERT_TRUE(memory2);
-
- memory2.reset();
- memory2 = allocator_.Allocate(kPageSize);
- // There should now be a free chunk of size 3 * |kPageSize| starting at offset
- // 2 * |kPageSize| and the pointer to the highest allocated chunk should have
- // also been updated to |base_| + 2 * |kPageSize|. This pointer is used to
- // maintain the container mapping a chunk address to its previous chunk and
- // this map is in turn used while merging previous contiguous chunks.
-
- // Allocate more than 3 * |kPageSize| so that the free chunk of size 3 *
- // |kPageSize| is not reused and |highest_allocated_chunk_| gets used instead.
- scoped_ptr<DiscardableAshmemChunk> memory3(
- allocator_.Allocate(4 * kPageSize));
- ASSERT_TRUE(memory3);
-
- // Deleting |memory3| (whose size is 4 * |kPageSize|) should result in a merge
- // with its previous chunk which is the free chunk of size |3 * kPageSize|.
- memory3.reset();
- memory3 = allocator_.Allocate((3 + 4) * kPageSize);
- EXPECT_EQ(memory3->Memory(),
- static_cast<const char*>(memory2->Memory()) + kPageSize);
-}
-
-} // namespace internal
-} // namespace base
diff --git a/base/memory/discardable_memory_linux.cc b/base/memory/discardable_memory_linux.cc
index 9a7ea12..005f956 100644
--- a/base/memory/discardable_memory_linux.cc
+++ b/base/memory/discardable_memory_linux.cc
@@ -31,7 +31,6 @@ scoped_ptr<DiscardableMemory> DiscardableMemory::CreateLockedMemoryWithType(
return memory.Pass();
}
case DISCARDABLE_MEMORY_TYPE_NONE:
- case DISCARDABLE_MEMORY_TYPE_ASHMEM:
case DISCARDABLE_MEMORY_TYPE_MACH:
NOTREACHED();
return nullptr;
diff --git a/base/memory/discardable_memory_mac.cc b/base/memory/discardable_memory_mac.cc
index cc21562..71d4802 100644
--- a/base/memory/discardable_memory_mac.cc
+++ b/base/memory/discardable_memory_mac.cc
@@ -43,7 +43,6 @@ scoped_ptr<DiscardableMemory> DiscardableMemory::CreateLockedMemoryWithType(
return memory.Pass();
}
case DISCARDABLE_MEMORY_TYPE_NONE:
- case DISCARDABLE_MEMORY_TYPE_ASHMEM:
NOTREACHED();
return nullptr;
}
diff --git a/base/memory/discardable_memory_unittest.cc b/base/memory/discardable_memory_unittest.cc
index fb1eba6..2bee010 100644
--- a/base/memory/discardable_memory_unittest.cc
+++ b/base/memory/discardable_memory_unittest.cc
@@ -38,8 +38,12 @@ TEST_P(DiscardableMemoryTest, IsNamed) {
}
bool IsNativeType(DiscardableMemoryType type) {
- return type == DISCARDABLE_MEMORY_TYPE_ASHMEM ||
- type == DISCARDABLE_MEMORY_TYPE_MACH;
+ return
+#if defined(OS_ANDROID)
+ // SHMEM is backed by native discardable memory on Android.
+ type == DISCARDABLE_MEMORY_TYPE_SHMEM ||
+#endif
+ type == DISCARDABLE_MEMORY_TYPE_MACH;
}
TEST_P(DiscardableMemoryTest, SupportedNatively) {
diff --git a/base/memory/discardable_memory_win.cc b/base/memory/discardable_memory_win.cc
index 9a7ea12..005f956 100644
--- a/base/memory/discardable_memory_win.cc
+++ b/base/memory/discardable_memory_win.cc
@@ -31,7 +31,6 @@ scoped_ptr<DiscardableMemory> DiscardableMemory::CreateLockedMemoryWithType(
return memory.Pass();
}
case DISCARDABLE_MEMORY_TYPE_NONE:
- case DISCARDABLE_MEMORY_TYPE_ASHMEM:
case DISCARDABLE_MEMORY_TYPE_MACH:
NOTREACHED();
return nullptr;