summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorkuan <kuan@chromium.org>2015-10-06 14:08:23 -0700
committerCommit bot <commit-bot@chromium.org>2015-10-06 21:11:32 +0000
commit1cd5fee938b55399ff68538699e453c110021d8c (patch)
treebf5e23ddb8a5059f977cdcc86264e95d3e4ab867
parentab825853d8130f2caa7d5fd49ba5224dfe7ce297 (diff)
downloadchromium_src-1cd5fee938b55399ff68538699e453c110021d8c.zip
chromium_src-1cd5fee938b55399ff68538699e453c110021d8c.tar.gz
chromium_src-1cd5fee938b55399ff68538699e453c110021d8c.tar.bz2
Revert of [Tracing] Add allocation register for heap profiling (patchset #7 id:120001 of https://codereview.chromium.org/1371053002/ )
Reason for revert: broke builds: https://build.chromium.org/p/chromium.linux/builders/Linux%20Builder%20%28dbg%29/builds/94444/steps/compile/logs/stdio https://build.chromium.org/p/chromium.linux/builders/Linux%20GN%20%28dbg%29/builds/37877/steps/compile/logs/stdio Original issue's description: > [Tracing] Add allocation register for heap profiling > > Malloc and PartitionAlloc dumpers will use this allocation register (a > hash table) to keep track of all outstanding allocations. The hash table > is tailored for this purpose. It handles its own memory management, to > avoid allocation reentrancy issues when doing the bookkeeping. This is > part of the heap profiler in chrome://tracing. > > BUG=524631 > > Committed: https://crrev.com/fd577b4f6c1f24709c274d8970d4f8e6369bb1ff > Cr-Commit-Position: refs/heads/master@{#352664} TBR=primiano@chromium.org,ruuda@google.com NOPRESUBMIT=true NOTREECHECKS=true NOTRY=true BUG=524631 Review URL: https://codereview.chromium.org/1387483006 Cr-Commit-Position: refs/heads/master@{#352685}
-rw-r--r--base/trace_event/BUILD.gn5
-rw-r--r--base/trace_event/memory_profiler_allocation_register.cc166
-rw-r--r--base/trace_event/memory_profiler_allocation_register.h159
-rw-r--r--base/trace_event/memory_profiler_allocation_register_posix.cc59
-rw-r--r--base/trace_event/memory_profiler_allocation_register_unittest.cc228
-rw-r--r--base/trace_event/memory_profiler_allocation_register_win.cc62
-rw-r--r--base/trace_event/trace_event.gypi5
7 files changed, 0 insertions, 684 deletions
diff --git a/base/trace_event/BUILD.gn b/base/trace_event/BUILD.gn
index 9d9b467..a55c55d 100644
--- a/base/trace_event/BUILD.gn
+++ b/base/trace_event/BUILD.gn
@@ -19,10 +19,6 @@ source_set("trace_event") {
"memory_dump_session_state.h",
"memory_profiler_allocation_context.cc",
"memory_profiler_allocation_context.h",
- "memory_profiler_allocation_register.cc",
- "memory_profiler_allocation_register.h",
- "memory_profiler_allocation_register_posix.cc",
- "memory_profiler_allocation_register_win.cc",
"process_memory_dump.cc",
"process_memory_dump.h",
"process_memory_maps.cc",
@@ -109,7 +105,6 @@ source_set("trace_event_unittests") {
"memory_allocator_dump_unittest.cc",
"memory_dump_manager_unittest.cc",
"memory_profiler_allocation_context_unittest.cc",
- "memory_profiler_allocation_register_unittest.cc",
"process_memory_dump_unittest.cc",
"process_memory_maps_dump_provider_unittest.cc",
"process_memory_totals_dump_provider_unittest.cc",
diff --git a/base/trace_event/memory_profiler_allocation_register.cc b/base/trace_event/memory_profiler_allocation_register.cc
deleted file mode 100644
index 662e86f..0000000
--- a/base/trace_event/memory_profiler_allocation_register.cc
+++ /dev/null
@@ -1,166 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/trace_event/memory_profiler_allocation_register.h"
-
-namespace base {
-namespace trace_event {
-
-AllocationRegister::AllocationRegister()
- // Reserve enough address space to store |kNumCells| entries if necessary,
- // with a guard page after it to crash the program when attempting to store
- // more entries.
- : cells_(static_cast<Cell*>(AllocateVirtualMemory(kNumCells *
- sizeof(Cell)))),
- buckets_(static_cast<CellIndex*>(
- AllocateVirtualMemory(kNumBuckets * sizeof(CellIndex)))),
-
- // The free list is empty. The first unused cell is cell 1, because index
- // 0 is used as list terminator.
- free_list_(0),
- next_unused_cell_(1) {}
-
-AllocationRegister::~AllocationRegister() {
- FreeVirtualMemory(buckets_, kNumBuckets * sizeof(CellIndex));
- FreeVirtualMemory(cells_, kNumCells * sizeof(Cell));
-}
-
-void AllocationRegister::Insert(void* address,
- size_t size,
- AllocationContext context) {
- DCHECK(address != nullptr);
-
- CellIndex* idx_ptr = Lookup(address);
-
- // If the index is 0, the address is not yet present, so insert it.
- if (*idx_ptr == 0) {
- *idx_ptr = GetFreeCell();
-
- cells_[*idx_ptr].allocation.address = address;
- cells_[*idx_ptr].next = 0;
- }
-
- cells_[*idx_ptr].allocation.size = size;
- cells_[*idx_ptr].allocation.context = context;
-}
-
-void AllocationRegister::Remove(void* address) {
- // Get a pointer to the index of the cell that stores |address|. The index can
- // be an element of |buckets_| or the |next| member of a cell.
- CellIndex* idx_ptr = Lookup(address);
- CellIndex freed_idx = *idx_ptr;
-
- // If the index is 0, the address was not there in the first place.
- if (freed_idx == 0)
- return;
-
- // The cell at the index is now free, remove it from the linked list for
- // |Hash(address)|.
- Cell* freed_cell = &cells_[freed_idx];
- *idx_ptr = freed_cell->next;
-
- // Put the free cell at the front of the free list.
- freed_cell->next = free_list_;
- free_list_ = freed_idx;
-
- // Reset the address, so that on iteration the free cell is ignored.
- freed_cell->allocation.address = nullptr;
-}
-
-AllocationRegister::ConstIterator AllocationRegister::begin() const {
- // Initialize the iterator's index to 0. Cell 0 never stores an entry.
- ConstIterator iterator(*this, 0);
- // Incrementing will advance the iterator to the first used cell.
- ++iterator;
- return iterator;
-}
-
-AllocationRegister::ConstIterator AllocationRegister::end() const {
- // Cell |next_unused_cell_ - 1| is the last cell that could contain an entry,
- // so index |next_unused_cell_| is an iterator past the last element, in line
- // with the STL iterator conventions.
- return ConstIterator(*this, next_unused_cell_);
-}
-
-AllocationRegister::ConstIterator::ConstIterator(
- const AllocationRegister& alloc_register,
- CellIndex index)
- : register_(alloc_register), index_(index) {}
-
-void AllocationRegister::ConstIterator::operator++() {
- // Find the next cell with a non-null address until all cells that could
- // possibly be used have been iterated. A null address indicates a free cell.
- do {
- index_++;
- } while (index_ < register_.next_unused_cell_ &&
- register_.cells_[index_].allocation.address == nullptr);
-}
-
-bool AllocationRegister::ConstIterator::operator!=(
- const ConstIterator& other) const {
- return index_ != other.index_;
-}
-
-const AllocationRegister::Allocation& AllocationRegister::ConstIterator::
-operator*() const {
- return register_.cells_[index_].allocation;
-}
-
-AllocationRegister::CellIndex* AllocationRegister::Lookup(void* address) {
- // The list head is in |buckets_| at the hash offset.
- CellIndex* idx_ptr = &buckets_[Hash(address)];
-
- // Chase down the list until the cell that holds |key| is found,
- // or until the list ends.
- while (*idx_ptr != 0 && cells_[*idx_ptr].allocation.address != address)
- idx_ptr = &cells_[*idx_ptr].next;
-
- return idx_ptr;
-}
-
-AllocationRegister::CellIndex AllocationRegister::GetFreeCell() {
- // First try to re-use a cell from the freelist.
- if (free_list_) {
- CellIndex idx = free_list_;
- free_list_ = cells_[idx].next;
- return idx;
- }
-
- // Otherwise pick the next cell that has not been touched before.
- CellIndex idx = next_unused_cell_;
- next_unused_cell_++;
-
- // If the hash table has too little capacity (when too little address space
- // was reserved for |cells_|), |next_unused_cell_| can be an index outside of
- // the allocated storage. A guard page is allocated there to crash the
- // program in that case. There are alternative solutions:
- // - Deal with it, increase capacity by reallocating |cells_|.
- // - Refuse to insert and let the caller deal with it.
- // Because free cells are re-used before accessing fresh cells with a higher
- // index, and because reserving address space without touching it is cheap,
- // the simplest solution is to just allocate a humongous chunk of address
- // space.
-
- DCHECK_LT(next_unused_cell_, kNumCells + 1);
-
- return idx;
-}
-
-// static
-uint32_t AllocationRegister::Hash(void* address) {
- // The multiplicative hashing scheme from [Knuth 1998]. The value of |a| has
- // been chosen carefully based on measurements with real-word data (addresses
- // recorded from a Chrome trace run). It is the first prime after 2^17. For
- // |shift|, 13, 14 and 15 yield good results. These values are tuned to 2^18
- // buckets. Microbenchmarks show that this simple scheme outperforms fancy
- // hashes like Murmur3 by 20 to 40 percent.
- const uintptr_t key = reinterpret_cast<uintptr_t>(address);
- const uintptr_t a = 131101;
- const uintptr_t shift = 14;
- const uintptr_t h = (key * a) >> shift;
- return static_cast<uint32_t>(h) & kNumBucketsMask;
-}
-
-} // namespace trace_event
-} // namespace base
diff --git a/base/trace_event/memory_profiler_allocation_register.h b/base/trace_event/memory_profiler_allocation_register.h
deleted file mode 100644
index db8eb03..0000000
--- a/base/trace_event/memory_profiler_allocation_register.h
+++ /dev/null
@@ -1,159 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_TRACE_EVENT_MEMORY_PROFILER_ALLOCATION_REGISTER_H_
-#define BASE_TRACE_EVENT_MEMORY_PROFILER_ALLOCATION_REGISTER_H_
-
-#include <stdint.h>
-
-#include "base/logging.h"
-#include "base/trace_event/memory_profiler_allocation_context.h"
-
-namespace base {
-namespace trace_event {
-
-// The allocation register keeps track of all allocations that have not been
-// freed. It is a memory map-backed hash table that stores size and context
-// indexed by address. The hash table is tailored specifically for this use
-// case. The common case is that an entry is inserted and removed after a
-// while, lookup without modifying the table is not an intended use case. The
-// hash table is implemented as an array of linked lists. The size of this
-// array is fixed, but it does not limit the amount of entries that can be
-// stored.
-//
-// Replaying a recording of Chrome's allocations and frees against this hash
-// table takes about 15% of the time that it takes to replay them against
-// |std::map|.
-class BASE_EXPORT AllocationRegister {
- public:
- // The data stored in the hash table;
- // contains the details about an allocation.
- struct Allocation {
- void* address;
- size_t size;
- AllocationContext context;
- };
-
- // An iterator that iterates entries in the hash table efficiently, but in no
- // particular order. It can do this by iterating the cells and ignoring the
- // linked lists altogether. Instead of checking whether a cell is in the free
- // list to see if it should be skipped, a null address is used to indicate
- // that a cell is free.
- class BASE_EXPORT ConstIterator {
- public:
- void operator++();
- bool operator!=(const ConstIterator& other) const;
- const Allocation& operator*() const;
-
- private:
- friend class AllocationRegister;
- using CellIndex = uint32_t;
-
- ConstIterator(const AllocationRegister& alloc_register, CellIndex index);
-
- const AllocationRegister& register_;
- CellIndex index_;
- };
-
- AllocationRegister();
- ~AllocationRegister();
-
- // Inserts allocation details into the table. If the address was present
- // already, its details are updated. |address| must not be null. (This is
- // because null is used to mark free cells, to allow efficient iteration of
- // the hash table.)
- void Insert(void* address, size_t size, AllocationContext context);
-
- // Removes the address from the table if it is present. It is ok to call this
- // with a null pointer.
- void Remove(void* address);
-
- ConstIterator begin() const;
- ConstIterator end() const;
-
- private:
- friend class AllocationRegisterTest;
- using CellIndex = uint32_t;
-
- // A cell can store allocation details (size and context) by address. Cells
- // are part of a linked list via the |next| member. This list is either the
- // list for a particular hash, or the free list. All cells are contiguous in
- // memory in one big array. Therefore, on 64-bit systems, space can be saved
- // by storing 32-bit indices instead of pointers as links. Index 0 is used as
- // the list terminator.
- struct Cell {
- CellIndex next;
- Allocation allocation;
- };
-
- // The number of buckets, 2^18, approximately 260 000, has been tuned for
- // Chrome's typical number of outstanding allocations. (This number varies
- // between processes. Most processes have a sustained load of ~30k unfreed
- // allocations, but some processes have peeks around 100k-400k allocations.)
- // Because of the size of the table, it is likely that every |buckets_|
- // access and every |cells_| access will incur a cache miss. Microbenchmarks
- // suggest that it is worthwile to use more memory for the table to avoid
- // chasing down the linked list, until the size is 2^18. The number of buckets
- // is a power of two so modular indexing can be done with bitwise and.
- static const uint32_t kNumBuckets = 0x40000;
- static const uint32_t kNumBucketsMask = kNumBuckets - 1;
-
- // Reserve address space to store at most this number of entries. High
- // capacity does not imply high memory usage due to the access pattern. The
- // only constraint on the number of cells is that on 32-bit systems address
- // space is scarce (i.e. reserving 2GiB of address space for the entries is
- // not an option). A value of ~3M entries is large enough to handle spikes in
- // the number of allocations, and modest enough to require no more than a few
- // dozens of MiB of address space.
- static const uint32_t kNumCells = kNumBuckets * 10;
-
- // Returns a value in the range [0, kNumBuckets - 1] (inclusive).
- static uint32_t Hash(void* address);
-
- // Allocates a region of virtual address space of |min_size| rounded up to the
- // system page size. The memory is zeroed by the system. A guard page is added
- // after the end.
- static void* AllocateVirtualMemory(size_t size);
-
- // Frees a region of virtual address space allocated by a call to
- // |AllocateVirtualMemory|.
- static void FreeVirtualMemory(void* address, size_t allocated_size);
-
- // Returns a pointer to the variable that contains or should contain the
- // index of the cell that stores the entry for |address|. The pointer may
- // point at an element of |buckets_| or at the |next| member of an element of
- // |cells_|. If the value pointed at is 0, |address| is not in the table.
- CellIndex* Lookup(void* address);
-
- // Takes a cell that is not being used to store an entry (either by recycling
- // from the free list or by taking a fresh cell) and returns its index.
- CellIndex GetFreeCell();
-
- // The array of cells. This array is backed by mmapped memory. Lower indices
- // are accessed first, higher indices are only accessed when required. In
- // this way, even if a huge amount of address space has been mmapped, only
- // the cells that are actually used will be backed by physical memory.
- Cell* const cells_;
-
- // The array of indices into |cells_|. |buckets_[Hash(address)]| will contain
- // the index of the head of the linked list for |Hash(key)|. A value of 0
- // indicates an empty list. This array is backed by mmapped memory.
- CellIndex* const buckets_;
-
- // The head of the free list. This is the index of the cell. A value of 0
- // means that the free list is empty.
- CellIndex free_list_;
-
- // The index of the first element of |cells_| that has not been used before.
- // If the free list is empty and a new cell is needed, the cell at this index
- // is used. This is the high water mark for the number of entries stored.
- CellIndex next_unused_cell_;
-
- DISALLOW_COPY_AND_ASSIGN(AllocationRegister);
-};
-
-} // namespace trace_event
-} // namespace base
-
-#endif // BASE_TRACE_EVENT_MEMORY_PROFILER_ALLOCATION_REGISTER_H_
diff --git a/base/trace_event/memory_profiler_allocation_register_posix.cc b/base/trace_event/memory_profiler_allocation_register_posix.cc
deleted file mode 100644
index e2231a8..0000000
--- a/base/trace_event/memory_profiler_allocation_register_posix.cc
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/trace_event/memory_profiler_allocation_register.h"
-
-#include <sys/mman.h>
-#include <unistd.h>
-
-#include "base/basictypes.h"
-#include "base/bits.h"
-#include "base/logging.h"
-#include "base/process/process_metrics.h"
-
-#ifndef MAP_ANONYMOUS
-#define MAP_ANONYMOUS MAP_ANON
-#endif
-
-namespace base {
-namespace trace_event {
-
-namespace {
-size_t GetGuardSize() {
- return GetPageSize();
-}
-}
-
-// static
-void* AllocationRegister::AllocateVirtualMemory(size_t size) {
- size = bits::Align(size, GetPageSize());
-
- // Add space for a guard page at the end.
- size_t map_size = size + GetGuardSize();
-
- void* addr = mmap(nullptr, map_size, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
-
- PCHECK(addr != MAP_FAILED);
-
- // Mark the last page of the allocated address space as inaccessible
- // (PROT_NONE). The read/write accessible space is still at least |min_size|
- // bytes.
- void* guard_addr =
- reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) + size);
- int result = mprotect(guard_addr, GetGuardSize(), PROT_NONE);
- PCHECK(result == 0);
-
- return addr;
-}
-
-// static
-void AllocationRegister::FreeVirtualMemory(void* address,
- size_t allocated_size) {
- size_t size = bits::Align(allocated_size, GetPageSize()) + GetGuardSize();
- munmap(address, size);
-}
-
-} // namespace trace_event
-} // namespace base
diff --git a/base/trace_event/memory_profiler_allocation_register_unittest.cc b/base/trace_event/memory_profiler_allocation_register_unittest.cc
deleted file mode 100644
index 0c976c4..0000000
--- a/base/trace_event/memory_profiler_allocation_register_unittest.cc
+++ /dev/null
@@ -1,228 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/trace_event/memory_profiler_allocation_register.h"
-
-#include "base/process/process_metrics.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace base {
-namespace trace_event {
-
-class AllocationRegisterTest : public testing::Test {
- public:
- static const uint32_t kNumBuckets = AllocationRegister::kNumBuckets;
- static const uint32_t kNumCells = AllocationRegister::kNumCells;
-
- // Returns the number of cells that the |AllocationRegister| can store per
- // system page.
- size_t GetNumCellsPerPage() {
- return GetPageSize() / sizeof(AllocationRegister::Cell);
- }
-
- uint32_t GetHighWaterMark(const AllocationRegister& reg) {
- return reg.next_unused_cell_;
- }
-};
-
-// Iterates over all entries in the allocation register and returns the bitwise
-// or of all addresses stored in it.
-uintptr_t OrAllAddresses(const AllocationRegister& reg) {
- uintptr_t acc = 0;
-
- for (auto i : reg)
- acc |= reinterpret_cast<uintptr_t>(i.address);
-
- return acc;
-}
-
-// Iterates over all entries in the allocation register and returns the sum of
-// the sizes of the entries.
-size_t SumAllSizes(const AllocationRegister& reg) {
- size_t sum = 0;
-
- for (auto i : reg)
- sum += i.size;
-
- return sum;
-}
-
-TEST_F(AllocationRegisterTest, InsertRemove) {
- AllocationRegister reg;
- AllocationContext ctx;
-
- EXPECT_EQ(0u, OrAllAddresses(reg));
-
- reg.Insert(reinterpret_cast<void*>(1), 0, ctx);
-
- EXPECT_EQ(1u, OrAllAddresses(reg));
-
- reg.Insert(reinterpret_cast<void*>(2), 0, ctx);
-
- EXPECT_EQ(3u, OrAllAddresses(reg));
-
- reg.Insert(reinterpret_cast<void*>(4), 0, ctx);
-
- EXPECT_EQ(7u, OrAllAddresses(reg));
-
- reg.Remove(reinterpret_cast<void*>(2));
-
- EXPECT_EQ(5u, OrAllAddresses(reg));
-
- reg.Remove(reinterpret_cast<void*>(4));
-
- EXPECT_EQ(1u, OrAllAddresses(reg));
-
- reg.Remove(reinterpret_cast<void*>(1));
-
- EXPECT_EQ(0u, OrAllAddresses(reg));
-}
-
-TEST_F(AllocationRegisterTest, DoubleFreeIsAllowed) {
- AllocationRegister reg;
- AllocationContext ctx;
-
- reg.Insert(reinterpret_cast<void*>(1), 0, ctx);
- reg.Insert(reinterpret_cast<void*>(2), 0, ctx);
- reg.Remove(reinterpret_cast<void*>(1));
- reg.Remove(reinterpret_cast<void*>(1)); // Remove for the second time.
- reg.Remove(reinterpret_cast<void*>(4)); // Remove never inserted address.
-
- EXPECT_EQ(2u, OrAllAddresses(reg));
-}
-
-TEST_F(AllocationRegisterTest, DoubleInsertOverwrites) {
- // TODO(ruuda): Although double insert happens in practice, it should not.
- // Find out the cause and ban double insert if possible.
- AllocationRegister reg;
- AllocationContext ctx;
- StackFrame frame1 = "Foo";
- StackFrame frame2 = "Bar";
-
- ctx.backtrace.frames[0] = frame1;
- reg.Insert(reinterpret_cast<void*>(1), 11, ctx);
-
- auto elem = *reg.begin();
-
- EXPECT_EQ(frame1, elem.context.backtrace.frames[0]);
- EXPECT_EQ(11u, elem.size);
- EXPECT_EQ(reinterpret_cast<void*>(1), elem.address);
-
- ctx.backtrace.frames[0] = frame2;
- reg.Insert(reinterpret_cast<void*>(1), 13, ctx);
-
- elem = *reg.begin();
-
- EXPECT_EQ(frame2, elem.context.backtrace.frames[0]);
- EXPECT_EQ(13u, elem.size);
- EXPECT_EQ(reinterpret_cast<void*>(1), elem.address);
-}
-
-// Check that even if more entries than the number of buckets are inserted, the
-// register still behaves correctly.
-TEST_F(AllocationRegisterTest, InsertRemoveCollisions) {
- size_t expected_sum = 0;
- AllocationRegister reg;
- AllocationContext ctx;
-
- // By inserting 100 more entries than the number of buckets, there will be at
- // least 100 collisions.
- for (uintptr_t i = 1; i <= kNumBuckets + 100; i++) {
- size_t size = i % 31;
- expected_sum += size;
- reg.Insert(reinterpret_cast<void*>(i), size, ctx);
-
- // Don't check the sum on every iteration to keep the test fast.
- if (i % (1 << 14) == 0)
- EXPECT_EQ(expected_sum, SumAllSizes(reg));
- }
-
- EXPECT_EQ(expected_sum, SumAllSizes(reg));
-
- for (uintptr_t i = 1; i <= kNumBuckets + 100; i++) {
- size_t size = i % 31;
- expected_sum -= size;
- reg.Remove(reinterpret_cast<void*>(i));
-
- if (i % (1 << 14) == 0)
- EXPECT_EQ(expected_sum, SumAllSizes(reg));
- }
-
- EXPECT_EQ(expected_sum, SumAllSizes(reg));
-}
-
-// The previous tests are not particularly good for testing iterators, because
-// elements are removed and inserted in the same order, meaning that the cells
-// fill up from low to high index, and are then freed from low to high index.
-// This test removes entries in a different order, to ensure that the iterator
-// skips over the freed cells properly. Then insert again to ensure that the
-// free list is utilised properly.
-TEST_F(AllocationRegisterTest, InsertRemoveRandomOrder) {
- size_t expected_sum = 0;
- AllocationRegister reg;
- AllocationContext ctx;
-
- uintptr_t generator = 3;
- uintptr_t prime = 1013;
- uint32_t initial_water_mark = GetHighWaterMark(reg);
-
- for (uintptr_t i = 2; i < prime; i++) {
- size_t size = i % 31;
- expected_sum += size;
- reg.Insert(reinterpret_cast<void*>(i), size, ctx);
- }
-
- // This should have used a fresh slot for each of the |prime - 2| inserts.
- ASSERT_EQ(prime - 2, GetHighWaterMark(reg) - initial_water_mark);
-
- // Iterate the numbers 2, 3, ..., prime - 1 in pseudorandom order.
- for (uintptr_t i = generator; i != 1; i = (i * generator) % prime) {
- size_t size = i % 31;
- expected_sum -= size;
- reg.Remove(reinterpret_cast<void*>(i));
- EXPECT_EQ(expected_sum, SumAllSizes(reg));
- }
-
- ASSERT_EQ(0u, expected_sum);
-
- // Insert |prime - 2| entries again. This should use cells from the free list,
- // so the |next_unused_cell_| index should not change.
- for (uintptr_t i = 2; i < prime; i++)
- reg.Insert(reinterpret_cast<void*>(i), 0, ctx);
-
- ASSERT_EQ(prime - 2, GetHighWaterMark(reg) - initial_water_mark);
-
- // Inserting one more entry should use a fresh cell again.
- reg.Insert(reinterpret_cast<void*>(prime), 0, ctx);
- ASSERT_EQ(prime - 1, GetHighWaterMark(reg) - initial_water_mark);
-}
-
-// Check that the process aborts due to hitting the guard page when inserting
-// too many elements.
-#if GTEST_HAS_DEATH_TEST
-TEST_F(AllocationRegisterTest, OverflowDeathTest) {
- AllocationRegister reg;
- AllocationContext ctx;
- uintptr_t i;
-
- // Fill up all of the memory allocated for the register. |kNumCells| minus 1
- // elements are inserted, because cell 0 is unused, so this should fill up
- // the available cells exactly.
- for (i = 1; i < kNumCells; i++) {
- reg.Insert(reinterpret_cast<void*>(i), 0, ctx);
- }
-
- // Adding just one extra element might still work because the allocated memory
- // is rounded up to the page size. Adding a page full of elements should cause
- // overflow.
- const size_t cells_per_page = GetNumCellsPerPage();
-
- ASSERT_DEATH(for (size_t j = 0; j < cells_per_page; j++) {
- reg.Insert(reinterpret_cast<void*>(i + j), 0, ctx);
- }, "");
-}
-#endif
-
-} // namespace trace_event
-} // namespace base
diff --git a/base/trace_event/memory_profiler_allocation_register_win.cc b/base/trace_event/memory_profiler_allocation_register_win.cc
deleted file mode 100644
index b94c75e..0000000
--- a/base/trace_event/memory_profiler_allocation_register_win.cc
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/trace_event/memory_profiler_allocation_register.h"
-
-#include <windows.h>
-
-#include "base/bits.h"
-#include "base/logging.h"
-#include "base/process/process_metrics.h"
-
-namespace base {
-namespace trace_event {
-
-namespace {
-size_t GetGuardSize() {
- return GetPageSize();
-}
-}
-
-// static
-void* AllocationRegister::AllocateVirtualMemory(size_t size) {
- size = bits::Align(size, GetPageSize());
-
- // Add space for a guard page at the end.
- size_t map_size = size + GetGuardSize();
-
- // Reserve the address space. This does not make the memory usable yet.
- void* addr = VirtualAlloc(nullptr, map_size, MEM_RESERVE, PAGE_NOACCESS);
-
- PCHECK(addr != nullptr);
-
- // Commit the non-guard pages as read-write memory.
- void* result = VirtualAlloc(addr, size, MEM_COMMIT, PAGE_READWRITE);
-
- PCHECK(result != nullptr);
-
- // Mark the last page of the allocated address space as guard page. (NB: The
- // |PAGE_GUARD| flag is not the flag to use here, that flag can be used to
- // detect and intercept access to a certain memory region. Accessing a
- // |PAGE_NOACCESS| page will raise a general protection fault.) The
- // read/write accessible space is still at least |min_size| bytes.
- void* guard_addr =
- reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) + size);
- result = VirtualAlloc(guard_addr, GetGuardSize(), MEM_COMMIT, PAGE_NOACCESS);
- PCHECK(result != nullptr);
-
- return addr;
-}
-
-// static
-void AllocationRegister::FreeVirtualMemory(void* address,
- size_t allocated_size) {
- // For |VirtualFree|, the size passed with |MEM_RELEASE| mut be 0. Windows
- // automatically frees the entire region that was reserved by the
- // |VirtualAlloc| with flag |MEM_RESERVE|.
- VirtualFree(address, 0, MEM_RELEASE);
-}
-
-} // namespace trace_event
-} // namespace base
diff --git a/base/trace_event/trace_event.gypi b/base/trace_event/trace_event.gypi
index 2718418..d7ec391 100644
--- a/base/trace_event/trace_event.gypi
+++ b/base/trace_event/trace_event.gypi
@@ -19,10 +19,6 @@
'trace_event/memory_dump_session_state.h',
'trace_event/memory_profiler_allocation_context.cc',
'trace_event/memory_profiler_allocation_context.h',
- 'trace_event/memory_profiler_allocation_register.cc',
- 'trace_event/memory_profiler_allocation_register_posix.cc',
- 'trace_event/memory_profiler_allocation_register_win.cc',
- 'trace_event/memory_profiler_allocation_register.h',
'trace_event/process_memory_dump.cc',
'trace_event/process_memory_dump.h',
'trace_event/process_memory_maps.cc',
@@ -77,7 +73,6 @@
'trace_event/memory_allocator_dump_unittest.cc',
'trace_event/memory_dump_manager_unittest.cc',
'trace_event/memory_profiler_allocation_context_unittest.cc',
- 'trace_event/memory_profiler_allocation_register_unittest.cc',
'trace_event/process_memory_dump_unittest.cc',
'trace_event/process_memory_maps_dump_provider_unittest.cc',
'trace_event/process_memory_totals_dump_provider_unittest.cc',