summaryrefslogtreecommitdiffstats
path: root/base/trace_event
diff options
context:
space:
mode:
authorseantopping <seantopping@chromium.org>2015-12-17 14:08:23 -0800
committerCommit bot <commit-bot@chromium.org>2015-12-17 22:09:18 +0000
commit684e479f9a46580b5d5bdcf58944b832f6317dc1 (patch)
treecd0b123b66663c5dba612b6825167fbaa12edabe /base/trace_event
parent53adf16ef292bc721a65b47628b1a3008b502363 (diff)
downloadchromium_src-684e479f9a46580b5d5bdcf58944b832f6317dc1.zip
chromium_src-684e479f9a46580b5d5bdcf58944b832f6317dc1.tar.gz
chromium_src-684e479f9a46580b5d5bdcf58944b832f6317dc1.tar.bz2
[Tracing] Adjust allocation register size for low-end devices
This patch limits the size of the allocation register and prevents OOM errors when running AllocationRegisterTest.OverflowDeathTest on low-end devices. BUG=570242 Review URL: https://codereview.chromium.org/1530583003 Cr-Commit-Position: refs/heads/master@{#365905}
Diffstat (limited to 'base/trace_event')
-rw-r--r--base/trace_event/heap_profiler_allocation_register.cc13
-rw-r--r--base/trace_event/heap_profiler_allocation_register.h7
-rw-r--r--base/trace_event/heap_profiler_allocation_register_unittest.cc16
3 files changed, 25 insertions, 11 deletions
diff --git a/base/trace_event/heap_profiler_allocation_register.cc b/base/trace_event/heap_profiler_allocation_register.cc
index 37647ee..2920153 100644
--- a/base/trace_event/heap_profiler_allocation_register.cc
+++ b/base/trace_event/heap_profiler_allocation_register.cc
@@ -10,10 +10,14 @@ namespace base {
namespace trace_event {
AllocationRegister::AllocationRegister()
- // Reserve enough address space to store |kNumCells| entries if necessary,
+ : AllocationRegister(kNumBuckets * kNumCellsPerBucket) {}
+
+AllocationRegister::AllocationRegister(uint32_t num_cells)
+ // Reserve enough address space to store |num_cells_| entries if necessary,
// with a guard page after it to crash the program when attempting to store
// more entries.
- : cells_(static_cast<Cell*>(AllocateVirtualMemory(kNumCells *
+ : num_cells_(num_cells),
+ cells_(static_cast<Cell*>(AllocateVirtualMemory(num_cells_ *
sizeof(Cell)))),
buckets_(static_cast<CellIndex*>(
AllocateVirtualMemory(kNumBuckets * sizeof(CellIndex)))),
@@ -23,9 +27,10 @@ AllocationRegister::AllocationRegister()
free_list_(0),
next_unused_cell_(1) {}
+
AllocationRegister::~AllocationRegister() {
FreeVirtualMemory(buckets_, kNumBuckets * sizeof(CellIndex));
- FreeVirtualMemory(cells_, kNumCells * sizeof(Cell));
+ FreeVirtualMemory(cells_, num_cells_ * sizeof(Cell));
}
void AllocationRegister::Insert(void* address,
@@ -144,7 +149,7 @@ AllocationRegister::CellIndex AllocationRegister::GetFreeCell() {
// the simplest solution is to just allocate a humongous chunk of address
// space.
- DCHECK_LT(next_unused_cell_, kNumCells + 1);
+ DCHECK_LT(next_unused_cell_, num_cells_ + 1);
return idx;
}
diff --git a/base/trace_event/heap_profiler_allocation_register.h b/base/trace_event/heap_profiler_allocation_register.h
index 7c4ba99..940fb54 100644
--- a/base/trace_event/heap_profiler_allocation_register.h
+++ b/base/trace_event/heap_profiler_allocation_register.h
@@ -60,6 +60,8 @@ class BASE_EXPORT AllocationRegister {
};
AllocationRegister();
+ explicit AllocationRegister(uint32_t num_cells);
+
~AllocationRegister();
// Inserts allocation details into the table. If the address was present
@@ -112,7 +114,7 @@ class BASE_EXPORT AllocationRegister {
// not an option). A value of ~3M entries is large enough to handle spikes in
// the number of allocations, and modest enough to require no more than a few
// dozens of MiB of address space.
- static const uint32_t kNumCells = kNumBuckets * 10;
+ static const uint32_t kNumCellsPerBucket = 10;
// Returns a value in the range [0, kNumBuckets - 1] (inclusive).
static uint32_t Hash(void* address);
@@ -136,6 +138,9 @@ class BASE_EXPORT AllocationRegister {
// from the free list or by taking a fresh cell) and returns its index.
CellIndex GetFreeCell();
+ // The maximum number of cells which can be allocated.
+ uint32_t const num_cells_;
+
// The array of cells. This array is backed by mmapped memory. Lower indices
// are accessed first, higher indices are only accessed when required. In
// this way, even if a huge amount of address space has been mmapped, only
diff --git a/base/trace_event/heap_profiler_allocation_register_unittest.cc b/base/trace_event/heap_profiler_allocation_register_unittest.cc
index 6058f47..51d36e9 100644
--- a/base/trace_event/heap_profiler_allocation_register_unittest.cc
+++ b/base/trace_event/heap_profiler_allocation_register_unittest.cc
@@ -14,7 +14,6 @@ namespace trace_event {
class AllocationRegisterTest : public testing::Test {
public:
static const uint32_t kNumBuckets = AllocationRegister::kNumBuckets;
- static const uint32_t kNumCells = AllocationRegister::kNumCells;
// Returns the number of cells that the |AllocationRegister| can store per
// system page.
@@ -25,6 +24,10 @@ class AllocationRegisterTest : public testing::Test {
uint32_t GetHighWaterMark(const AllocationRegister& reg) {
return reg.next_unused_cell_;
}
+
+ uint32_t GetNumCells(const AllocationRegister& reg) {
+ return reg.num_cells_;
+ }
};
// Iterates over all entries in the allocation register and returns the bitwise
@@ -203,14 +206,15 @@ TEST_F(AllocationRegisterTest, InsertRemoveRandomOrder) {
// too many elements.
#if GTEST_HAS_DEATH_TEST
TEST_F(AllocationRegisterTest, OverflowDeathTest) {
- AllocationRegister reg;
+ // Use a smaller register to prevent OOM errors on low-end devices.
+ AllocationRegister reg(GetNumCellsPerPage());
AllocationContext ctx = AllocationContext::Empty();
uintptr_t i;
- // Fill up all of the memory allocated for the register. |kNumCells| minus 1
- // elements are inserted, because cell 0 is unused, so this should fill up
- // the available cells exactly.
- for (i = 1; i < kNumCells; i++) {
+ // Fill up all of the memory allocated for the register. |GetNumCells(reg)|
+ // minus 1 elements are inserted, because cell 0 is unused, so this should
+ // fill up the available cells exactly.
+ for (i = 1; i < GetNumCells(reg); i++) {
reg.Insert(reinterpret_cast<void*>(i), 0, ctx);
}