summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorsque <sque@chromium.org>2015-11-21 00:03:56 -0800
committerCommit bot <commit-bot@chromium.org>2015-11-21 08:04:34 +0000
commitd02b109cb1f41b7ea0584d0910c8bb4cf21cb521 (patch)
tree3294c7cd6680b8d3d514aa45dc49d677cd6189be
parentb88ffb2d8d3188ad74f8e6585efc77d41827a1a5 (diff)
downloadchromium_src-d02b109cb1f41b7ea0584d0910c8bb4cf21cb521.zip
chromium_src-d02b109cb1f41b7ea0584d0910c8bb4cf21cb521.tar.gz
chromium_src-d02b109cb1f41b7ea0584d0910c8bb4cf21cb521.tar.bz2
components/metrics: Add runtime memory leak detector
This patch adds heuristic-based memory leak detector. Unlike traditional leak detectors like valgrind, it doesn't wait until a process terminates to check for leftover allocations. Instead, it analyzes allocation patterns over time. This code is not thread-safe. It is up to the caller of this code to ensure thread safety. BUG=382705 Signed-off-by: Simon Que <sque@chromium.org> Review URL: https://codereview.chromium.org/986503002 Cr-Commit-Position: refs/heads/master@{#361012}
-rw-r--r--components/components_tests.gyp9
-rw-r--r--components/metrics.gypi23
-rw-r--r--components/metrics/BUILD.gn44
-rw-r--r--components/metrics/leak_detector/OWNERS2
-rw-r--r--components/metrics/leak_detector/call_stack_manager.cc67
-rw-r--r--components/metrics/leak_detector/call_stack_manager.h100
-rw-r--r--components/metrics/leak_detector/call_stack_manager_unittest.cc260
-rw-r--r--components/metrics/leak_detector/call_stack_table.cc78
-rw-r--r--components/metrics/leak_detector/call_stack_table.h81
-rw-r--r--components/metrics/leak_detector/call_stack_table_unittest.cc306
-rw-r--r--components/metrics/leak_detector/custom_allocator.cc61
-rw-r--r--components/metrics/leak_detector/custom_allocator.h50
-rw-r--r--components/metrics/leak_detector/leak_analyzer.cc139
-rw-r--r--components/metrics/leak_detector/leak_analyzer.h81
-rw-r--r--components/metrics/leak_detector/leak_analyzer_unittest.cc362
-rw-r--r--components/metrics/leak_detector/leak_detector_impl.cc215
-rw-r--r--components/metrics/leak_detector/leak_detector_impl.h161
-rw-r--r--components/metrics/leak_detector/leak_detector_impl_unittest.cc464
-rw-r--r--components/metrics/leak_detector/leak_detector_value_type.cc47
-rw-r--r--components/metrics/leak_detector/leak_detector_value_type.h52
-rw-r--r--components/metrics/leak_detector/ranked_list.cc45
-rw-r--r--components/metrics/leak_detector/ranked_list.h81
-rw-r--r--components/metrics/leak_detector/ranked_list_unittest.cc266
-rw-r--r--components/metrics/leak_detector/stl_allocator.h61
24 files changed, 3055 insertions, 0 deletions
diff --git a/components/components_tests.gyp b/components/components_tests.gyp
index 26558d4..067b266 100644
--- a/components/components_tests.gyp
+++ b/components/components_tests.gyp
@@ -359,6 +359,13 @@
'metrics/stability_metrics_helper_unittest.cc',
'metrics/ui/screen_info_metrics_provider_unittest.cc',
],
+ 'metrics_leak_detector_unittest_sources': [
+ 'metrics/leak_detector/call_stack_manager_unittest.cc',
+ 'metrics/leak_detector/call_stack_table_unittest.cc',
+ 'metrics/leak_detector/leak_analyzer_unittest.cc',
+ 'metrics/leak_detector/leak_detector_impl_unittest.cc',
+ 'metrics/leak_detector/ranked_list_unittest.cc',
+ ],
'mime_util_unittest_sources': [
'mime_util/mime_util_unittest.cc',
],
@@ -1371,6 +1378,7 @@
'wifi_sync/wifi_credential_unittest.cc',
'wifi_sync/wifi_security_class_chromeos_unittest.cc',
'wifi_sync/wifi_security_class_unittest.cc',
+ '<@(metrics_leak_detector_unittest_sources)',
'<@(ownership_unittest_sources)',
],
'sources!': [
@@ -1379,6 +1387,7 @@
],
'dependencies': [
'../chromeos/chromeos.gyp:chromeos_test_support',
+ 'components.gyp:metrics_leak_detector',
'components.gyp:ownership',
'components.gyp:pairing',
'components.gyp:user_manager_test_support',
diff --git a/components/metrics.gypi b/components/metrics.gypi
index 36e8e57..99db049 100644
--- a/components/metrics.gypi
+++ b/components/metrics.gypi
@@ -104,6 +104,29 @@
],
},
{
+ 'target_name': 'metrics_leak_detector',
+ 'type': 'static_library',
+ 'dependencies': [
+ '../base/base.gyp:base',
+ ],
+ 'sources': [
+ 'metrics/leak_detector/call_stack_manager.cc',
+ 'metrics/leak_detector/call_stack_manager.h',
+ 'metrics/leak_detector/call_stack_table.cc',
+ 'metrics/leak_detector/call_stack_table.h',
+ 'metrics/leak_detector/custom_allocator.cc',
+ 'metrics/leak_detector/custom_allocator.h',
+ 'metrics/leak_detector/leak_analyzer.cc',
+ 'metrics/leak_detector/leak_analyzer.h',
+ 'metrics/leak_detector/leak_detector_impl.cc',
+ 'metrics/leak_detector/leak_detector_impl.h',
+ 'metrics/leak_detector/leak_detector_value_type.cc',
+ 'metrics/leak_detector/leak_detector_value_type.h',
+ 'metrics/leak_detector/ranked_list.cc',
+ 'metrics/leak_detector/ranked_list.h',
+ ],
+ },
+ {
# GN version: //components/metrics:net
'target_name': 'metrics_net',
'type': 'static_library',
diff --git a/components/metrics/BUILD.gn b/components/metrics/BUILD.gn
index 836c243..f711a59 100644
--- a/components/metrics/BUILD.gn
+++ b/components/metrics/BUILD.gn
@@ -111,6 +111,30 @@ if (!is_ios) {
}
}
+# GYP version: components/metrics.gypi:metrics_leak_detector
+static_library("leak_detector") {
+ sources = [
+ "leak_detector/call_stack_manager.cc",
+ "leak_detector/call_stack_manager.h",
+ "leak_detector/call_stack_table.cc",
+ "leak_detector/call_stack_table.h",
+ "leak_detector/custom_allocator.cc",
+ "leak_detector/custom_allocator.h",
+ "leak_detector/leak_analyzer.cc",
+ "leak_detector/leak_analyzer.h",
+ "leak_detector/leak_detector_impl.cc",
+ "leak_detector/leak_detector_impl.h",
+ "leak_detector/leak_detector_value_type.cc",
+ "leak_detector/leak_detector_value_type.h",
+ "leak_detector/ranked_list.cc",
+ "leak_detector/ranked_list.h",
+ ]
+
+ deps = [
+ "//base",
+ ]
+}
+
# GYP version: components/metrics.gypi:metrics_net
static_library("net") {
sources = [
@@ -241,6 +265,22 @@ if (is_linux) {
}
}
+source_set("leak_detector_unit_tests") {
+ testonly = true
+ sources = [
+ "leak_detector/call_stack_manager_unittest.cc",
+ "leak_detector/call_stack_table_unittest.cc",
+ "leak_detector/leak_analyzer_unittest.cc",
+ "leak_detector/leak_detector_impl_unittest.cc",
+ "leak_detector/ranked_list_unittest.cc",
+ ]
+
+ deps = [
+ ":leak_detector",
+ "//testing/gtest",
+ ]
+}
+
source_set("unit_tests") {
testonly = true
sources = [
@@ -280,5 +320,9 @@ source_set("unit_tests") {
sources += [ "serialization/serialization_utils_unittest.cc" ]
deps += [ ":serialization" ]
}
+
+ if (is_chromeos) {
+ deps += [ ":leak_detector_unit_tests" ]
+ }
}
# TODO(GYP): metrics_chromeos
diff --git a/components/metrics/leak_detector/OWNERS b/components/metrics/leak_detector/OWNERS
new file mode 100644
index 0000000..d8bfc45
--- /dev/null
+++ b/components/metrics/leak_detector/OWNERS
@@ -0,0 +1,2 @@
+sque@chromium.org
+wfh@chromium.org
diff --git a/components/metrics/leak_detector/call_stack_manager.cc b/components/metrics/leak_detector/call_stack_manager.cc
new file mode 100644
index 0000000..de9fcd8
--- /dev/null
+++ b/components/metrics/leak_detector/call_stack_manager.cc
@@ -0,0 +1,67 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/leak_detector/call_stack_manager.h"
+
+#include <algorithm> // For std::copy.
+#include <new>
+
+#include "base/hash.h"
+#include "components/metrics/leak_detector/custom_allocator.h"
+
+namespace metrics {
+namespace leak_detector {
+
+CallStackManager::CallStackManager() {}
+
+CallStackManager::~CallStackManager() {
+ for (CallStack* call_stack : call_stacks_) {
+ CustomAllocator::Free(call_stack->stack,
+ call_stack->depth * sizeof(*call_stack->stack));
+ call_stack->stack = nullptr;
+ call_stack->depth = 0;
+
+ CustomAllocator::Free(call_stack, sizeof(CallStack));
+ }
+ call_stacks_.clear();
+}
+
+const CallStack* CallStackManager::GetCallStack(size_t depth,
+ const void* const stack[]) {
+ // Temporarily create a call stack object for lookup in |call_stacks_|.
+ CallStack temp;
+ temp.depth = depth;
+ temp.stack = const_cast<const void**>(stack);
+ // This is the only place where the call stack's hash is computed. This value
+ // can be reused in the created object to avoid further hash computation.
+ temp.hash =
+ base::Hash(reinterpret_cast<const char*>(stack), sizeof(*stack) * depth);
+
+ auto iter = call_stacks_.find(&temp);
+ if (iter != call_stacks_.end())
+ return *iter;
+
+ // Since |call_stacks_| stores CallStack pointers rather than actual objects,
+ // create new call objects manually here.
+ CallStack* call_stack =
+ new (CustomAllocator::Allocate(sizeof(CallStack))) CallStack;
+ call_stack->depth = depth;
+ call_stack->hash = temp.hash; // Don't run the hash function again.
+ call_stack->stack = reinterpret_cast<const void**>(
+ CustomAllocator::Allocate(sizeof(*stack) * depth));
+ std::copy(stack, stack + depth, call_stack->stack);
+
+ call_stacks_.insert(call_stack);
+ return call_stack;
+}
+
+bool CallStackManager::CallStackPointerEqual::operator()(
+ const CallStack* c1,
+ const CallStack* c2) const {
+ return c1->depth == c2->depth && c1->hash == c2->hash &&
+ std::equal(c1->stack, c1->stack + c1->depth, c2->stack);
+}
+
+} // namespace leak_detector
+} // namespace metrics
diff --git a/components/metrics/leak_detector/call_stack_manager.h b/components/metrics/leak_detector/call_stack_manager.h
new file mode 100644
index 0000000..1ec6b9d
--- /dev/null
+++ b/components/metrics/leak_detector/call_stack_manager.h
@@ -0,0 +1,100 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_LEAK_DETECTOR_CALL_STACK_MANAGER_H_
+#define COMPONENTS_METRICS_LEAK_DETECTOR_CALL_STACK_MANAGER_H_
+
+#include <stdint.h>
+
+#include "base/containers/hash_tables.h"
+#include "base/macros.h"
+#include "components/metrics/leak_detector/custom_allocator.h"
+#include "components/metrics/leak_detector/stl_allocator.h"
+
+// Summary of structures:
+//
+// struct CallStack:
+// Represents a unique call stack, defined by its raw call stack (array of
+// pointers), and hash value. All CallStack objects are owned by class
+// CallStackManager. Other classes may hold pointers to them but should not
+// attempt to create or modify any CallStack objects.
+//
+// class CallStackManager:
+// Creates CallStack objects to represent unique call stacks. Owns all
+// CallStack objects that it creates, storing them internally.
+//
+// Returns the call stacks as const pointers because no caller should take
+// ownership of them and modify or delete them. The lifetime of all CallStack
+// objects is limited to that of the CallStackManager object that created
+// them. When the CallStackManager is destroyed, the CallStack objects will be
+// invalidated. Hence the caller should make sure that it does not use
+// CallStack objects beyond the lifetime of the CallStackManager that created
+// them.
+
+namespace metrics {
+namespace leak_detector {
+
+// Struct to represent a call stack.
+struct CallStack {
+ // Call stack as an array of pointers, |stack|. The array length is stored in
+ // |depth|. There is no null terminator.
+ const void** stack;
+ size_t depth;
+
+ // Hash of call stack. It is cached here so that it doesn not have to be
+ // recomputed each time.
+ size_t hash;
+};
+
+// Maintains and owns all unique call stack objects.
+// Not thread-safe.
+class CallStackManager {
+ public:
+ CallStackManager();
+ ~CallStackManager();
+
+ // Returns a CallStack object for a given raw call stack. The first time a
+ // particular raw call stack is passed into this function, it will create a
+ // new CallStack object to hold the raw call stack data, and then return it.
+ // The CallStack object is stored internally.
+ //
+ // On subsequent calls with the same raw call stack, this function will return
+ // the previously created CallStack object.
+ const CallStack* GetCallStack(size_t depth, const void* const stack[]);
+
+ size_t size() const { return call_stacks_.size(); }
+
+ private:
+ // Allocator class for unique call stacks.
+ using CallStackPointerAllocator = STLAllocator<CallStack*, CustomAllocator>;
+
+ // Hash operator for call stack object given as a pointer.
+ // Does not actually compute the hash. Instead, returns the already computed
+ // hash value stored in a CallStack object.
+ struct CallStackPointerStoredHash {
+ size_t operator()(const CallStack* call_stack) const {
+ return call_stack->hash;
+ }
+ };
+
+ // Equality comparator for call stack objects given as pointers. Compares
+ // their stack trace contents.
+ struct CallStackPointerEqual {
+ bool operator()(const CallStack* c1, const CallStack* c2) const;
+ };
+
+ // Holds all call stack objects. Each object is allocated elsewhere and stored
+ // as a pointer because the container may rearrange itself internally.
+ base::hash_set<CallStack*,
+ CallStackPointerStoredHash,
+ CallStackPointerEqual,
+ CallStackPointerAllocator> call_stacks_;
+
+ DISALLOW_COPY_AND_ASSIGN(CallStackManager);
+};
+
+} // namespace leak_detector
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_LEAK_DETECTOR_CALL_STACK_MANAGER_H_
diff --git a/components/metrics/leak_detector/call_stack_manager_unittest.cc b/components/metrics/leak_detector/call_stack_manager_unittest.cc
new file mode 100644
index 0000000..8b49006
--- /dev/null
+++ b/components/metrics/leak_detector/call_stack_manager_unittest.cc
@@ -0,0 +1,260 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/leak_detector/call_stack_manager.h"
+
+#include <stdint.h>
+
+#include <algorithm>
+
+#include "base/macros.h"
+#include "base/memory/scoped_ptr.h"
+#include "components/metrics/leak_detector/custom_allocator.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace metrics {
+namespace leak_detector {
+
+namespace {
+
+// Some test call stacks. The addresses are 64-bit but they should automatically
+// be truncated to 32 bits on a 32-bit machine.
+const void* kRawStack0[] = {
+ reinterpret_cast<const void*>(0x8899aabbccddeeff),
+ reinterpret_cast<const void*>(0x0000112233445566),
+ reinterpret_cast<const void*>(0x5566778899aabbcc),
+ reinterpret_cast<const void*>(0x9988776655443322),
+};
+// This is similar to kRawStack0, differing only in one address by 1. It should
+// still produce a distinct CallStack object and hash.
+const void* kRawStack1[] = {
+ kRawStack0[0], kRawStack0[1],
+ reinterpret_cast<const void*>(reinterpret_cast<uintptr_t>(kRawStack0[2]) +
+ 1),
+ kRawStack0[3],
+};
+const void* kRawStack2[] = {
+ reinterpret_cast<const void*>(0x900df00dcab58888),
+ reinterpret_cast<const void*>(0x00001337cafedeed),
+ reinterpret_cast<const void*>(0x0000deafbabe1234),
+};
+const void* kRawStack3[] = {
+ reinterpret_cast<const void*>(0x0000000012345678),
+ reinterpret_cast<const void*>(0x00000000abcdef01),
+ reinterpret_cast<const void*>(0x00000000fdecab98),
+ reinterpret_cast<const void*>(0x0000deadbeef0001),
+ reinterpret_cast<const void*>(0x0000900ddeed0002),
+ reinterpret_cast<const void*>(0x0000f00dcafe0003),
+ reinterpret_cast<const void*>(0x0000f00d900d0004),
+ reinterpret_cast<const void*>(0xdeedcafebabe0005),
+};
+
+// Creates a copy of a call stack as a scoped_ptr to a raw stack. The depth is
+// the same as the original stack, but it is not stored in the result.
+scoped_ptr<const void* []> CopyStack(const CallStack* stack) {
+ scoped_ptr<const void* []> stack_copy(new const void*[stack->depth]);
+ std::copy(stack->stack, stack->stack + stack->depth, stack_copy.get());
+ return stack_copy.Pass();
+}
+
+} // namespace
+
+class CallStackManagerTest : public ::testing::Test {
+ public:
+ CallStackManagerTest() {}
+
+ void SetUp() override { CustomAllocator::Initialize(); }
+ void TearDown() override { EXPECT_TRUE(CustomAllocator::Shutdown()); }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(CallStackManagerTest);
+};
+
+TEST_F(CallStackManagerTest, NewStacks) {
+ CallStackManager manager;
+ EXPECT_EQ(0U, manager.size());
+
+ // Request some new stacks and make sure their creation is reflected in the
+ // size of |manager|.
+ const CallStack* stack0 =
+ manager.GetCallStack(arraysize(kRawStack0), kRawStack0);
+ EXPECT_EQ(arraysize(kRawStack0), stack0->depth);
+ EXPECT_EQ(1U, manager.size());
+
+ const CallStack* stack1 =
+ manager.GetCallStack(arraysize(kRawStack1), kRawStack1);
+ EXPECT_EQ(arraysize(kRawStack1), stack1->depth);
+ EXPECT_EQ(2U, manager.size());
+
+ const CallStack* stack2 =
+ manager.GetCallStack(arraysize(kRawStack2), kRawStack2);
+ EXPECT_EQ(arraysize(kRawStack2), stack2->depth);
+ EXPECT_EQ(3U, manager.size());
+
+ const CallStack* stack3 =
+ manager.GetCallStack(arraysize(kRawStack3), kRawStack3);
+ EXPECT_EQ(arraysize(kRawStack3), stack3->depth);
+ EXPECT_EQ(4U, manager.size());
+
+ // Call stack objects should be unique.
+ EXPECT_NE(stack0, stack1);
+ EXPECT_NE(stack0, stack2);
+ EXPECT_NE(stack0, stack3);
+ EXPECT_NE(stack1, stack2);
+ EXPECT_NE(stack1, stack3);
+ EXPECT_NE(stack2, stack3);
+}
+
+TEST_F(CallStackManagerTest, Hashes) {
+ CallStackManager manager;
+
+ const CallStack* stack0 =
+ manager.GetCallStack(arraysize(kRawStack0), kRawStack0);
+ const CallStack* stack1 =
+ manager.GetCallStack(arraysize(kRawStack1), kRawStack1);
+ const CallStack* stack2 =
+ manager.GetCallStack(arraysize(kRawStack2), kRawStack2);
+ const CallStack* stack3 =
+ manager.GetCallStack(arraysize(kRawStack3), kRawStack3);
+
+ // Hash values should be unique. This test is not designed to make sure the
+ // hash function is generating unique hashes, but that CallStackManager is
+ // properly storing the hashes in CallStack structs.
+ EXPECT_NE(stack0->hash, stack1->hash);
+ EXPECT_NE(stack0->hash, stack2->hash);
+ EXPECT_NE(stack0->hash, stack3->hash);
+ EXPECT_NE(stack1->hash, stack2->hash);
+ EXPECT_NE(stack1->hash, stack3->hash);
+ EXPECT_NE(stack2->hash, stack3->hash);
+}
+
+TEST_F(CallStackManagerTest, MultipleManagersHashes) {
+ CallStackManager manager1;
+ const CallStack* stack10 =
+ manager1.GetCallStack(arraysize(kRawStack0), kRawStack0);
+ const CallStack* stack11 =
+ manager1.GetCallStack(arraysize(kRawStack1), kRawStack1);
+ const CallStack* stack12 =
+ manager1.GetCallStack(arraysize(kRawStack2), kRawStack2);
+ const CallStack* stack13 =
+ manager1.GetCallStack(arraysize(kRawStack3), kRawStack3);
+
+ CallStackManager manager2;
+ const CallStack* stack20 =
+ manager2.GetCallStack(arraysize(kRawStack0), kRawStack0);
+ const CallStack* stack21 =
+ manager2.GetCallStack(arraysize(kRawStack1), kRawStack1);
+ const CallStack* stack22 =
+ manager2.GetCallStack(arraysize(kRawStack2), kRawStack2);
+ const CallStack* stack23 =
+ manager2.GetCallStack(arraysize(kRawStack3), kRawStack3);
+
+ // Different CallStackManagers should still generate the same hashes.
+ EXPECT_EQ(stack10->hash, stack20->hash);
+ EXPECT_EQ(stack11->hash, stack21->hash);
+ EXPECT_EQ(stack12->hash, stack22->hash);
+ EXPECT_EQ(stack13->hash, stack23->hash);
+}
+
+TEST_F(CallStackManagerTest, HashWithReducedDepth) {
+ CallStackManager manager;
+ const CallStack* stack =
+ manager.GetCallStack(arraysize(kRawStack3), kRawStack3);
+
+ // Hash function should only operate on the first |CallStack::depth| elements
+ // of CallStack::stack. To test this, reduce the depth value of one of the
+ // stacks and make sure the hash changes.
+ EXPECT_NE(stack->hash,
+ manager.GetCallStack(stack->depth - 1, stack->stack)->hash);
+ EXPECT_NE(stack->hash,
+ manager.GetCallStack(stack->depth - 2, stack->stack)->hash);
+ EXPECT_NE(stack->hash,
+ manager.GetCallStack(stack->depth - 3, stack->stack)->hash);
+ EXPECT_NE(stack->hash,
+ manager.GetCallStack(stack->depth - 4, stack->stack)->hash);
+
+ // Also try subsets of the stack that don't start from the beginning.
+ EXPECT_NE(stack->hash,
+ manager.GetCallStack(stack->depth - 1, stack->stack + 1)->hash);
+ EXPECT_NE(stack->hash,
+ manager.GetCallStack(stack->depth - 2, stack->stack + 2)->hash);
+ EXPECT_NE(stack->hash,
+ manager.GetCallStack(stack->depth - 3, stack->stack + 3)->hash);
+ EXPECT_NE(stack->hash,
+ manager.GetCallStack(stack->depth - 4, stack->stack + 4)->hash);
+}
+
+TEST_F(CallStackManagerTest, DuplicateStacks) {
+ CallStackManager manager;
+ EXPECT_EQ(0U, manager.size());
+
+ // Calling manager.GetCallStack() multiple times with the same raw stack
+ // arguments will not result in creation of new call stack objects after the
+ // first call. Instead, the previously created object will be returned, and
+ // the size of |manager| will remain unchanged.
+ //
+ // Thus a call to GetCallStack() will always return the same result, given the
+ // same inputs.
+
+ // Add stack0.
+ const CallStack* stack0 =
+ manager.GetCallStack(arraysize(kRawStack0), kRawStack0);
+
+ scoped_ptr<const void* []> rawstack0_duplicate0 = CopyStack(stack0);
+ const CallStack* stack0_duplicate0 =
+ manager.GetCallStack(arraysize(kRawStack0), rawstack0_duplicate0.get());
+ EXPECT_EQ(1U, manager.size());
+ EXPECT_EQ(stack0, stack0_duplicate0);
+
+ // Add stack1.
+ const CallStack* stack1 =
+ manager.GetCallStack(arraysize(kRawStack1), kRawStack1);
+ EXPECT_EQ(2U, manager.size());
+
+ scoped_ptr<const void* []> rawstack0_duplicate1 = CopyStack(stack0);
+ const CallStack* stack0_duplicate1 =
+ manager.GetCallStack(arraysize(kRawStack0), rawstack0_duplicate1.get());
+ EXPECT_EQ(2U, manager.size());
+ EXPECT_EQ(stack0, stack0_duplicate1);
+
+ scoped_ptr<const void* []> rawstack1_duplicate0 = CopyStack(stack1);
+ const CallStack* stack1_duplicate0 =
+ manager.GetCallStack(arraysize(kRawStack1), rawstack1_duplicate0.get());
+ EXPECT_EQ(2U, manager.size());
+ EXPECT_EQ(stack1, stack1_duplicate0);
+
+ // Add stack2 and stack3.
+ const CallStack* stack2 =
+ manager.GetCallStack(arraysize(kRawStack2), kRawStack2);
+ const CallStack* stack3 =
+ manager.GetCallStack(arraysize(kRawStack3), kRawStack3);
+ EXPECT_EQ(4U, manager.size());
+
+ scoped_ptr<const void* []> rawstack1_duplicate1 = CopyStack(stack1);
+ const CallStack* stack1_duplicate1 =
+ manager.GetCallStack(arraysize(kRawStack1), rawstack1_duplicate1.get());
+ EXPECT_EQ(4U, manager.size());
+ EXPECT_EQ(stack1, stack1_duplicate1);
+
+ scoped_ptr<const void* []> rawstack0_duplicate2 = CopyStack(stack0);
+ const CallStack* stack0_duplicate2 =
+ manager.GetCallStack(arraysize(kRawStack0), rawstack0_duplicate2.get());
+ EXPECT_EQ(4U, manager.size());
+ EXPECT_EQ(stack0, stack0_duplicate2);
+
+ scoped_ptr<const void* []> rawstack3_duplicate0 = CopyStack(stack3);
+ const CallStack* stack3_duplicate0 =
+ manager.GetCallStack(arraysize(kRawStack3), rawstack3_duplicate0.get());
+ EXPECT_EQ(4U, manager.size());
+ EXPECT_EQ(stack3, stack3_duplicate0);
+
+ scoped_ptr<const void* []> rawstack2_duplicate0 = CopyStack(stack2);
+ const CallStack* stack2_duplicate0 =
+ manager.GetCallStack(arraysize(kRawStack2), rawstack2_duplicate0.get());
+ EXPECT_EQ(4U, manager.size());
+ EXPECT_EQ(stack2, stack2_duplicate0);
+}
+
+} // namespace leak_detector
+} // namespace metrics
diff --git a/components/metrics/leak_detector/call_stack_table.cc b/components/metrics/leak_detector/call_stack_table.cc
new file mode 100644
index 0000000..f861ebb
--- /dev/null
+++ b/components/metrics/leak_detector/call_stack_table.cc
@@ -0,0 +1,78 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/leak_detector/call_stack_table.h"
+
+#include "components/metrics/leak_detector/call_stack_manager.h"
+
+namespace metrics {
+namespace leak_detector {
+
+namespace {
+
+using ValueType = LeakDetectorValueType;
+
+// During leak analysis, we only want to examine the top
+// |kMaxCountOfSuspciousStacks| entries.
+const int kMaxCountOfSuspciousStacks = 16;
+
+const int kInitialHashTableSize = 1999;
+
+} // namespace
+
+size_t CallStackTable::StoredHash::operator()(
+ const CallStack* call_stack) const {
+ // The call stack object should already have a hash computed when it was
+ // created.
+ //
+ // This is NOT the actual hash computation function for a new call stack.
+ return call_stack->hash;
+}
+
+CallStackTable::CallStackTable(int call_stack_suspicion_threshold)
+ : num_allocs_(0),
+ num_frees_(0),
+ entry_map_(kInitialHashTableSize),
+ leak_analyzer_(kMaxCountOfSuspciousStacks,
+ call_stack_suspicion_threshold) {}
+
+CallStackTable::~CallStackTable() {}
+
+void CallStackTable::Add(const CallStack* call_stack) {
+ Entry* entry = &entry_map_[call_stack];
+
+ ++entry->net_num_allocs;
+ ++num_allocs_;
+}
+
+void CallStackTable::Remove(const CallStack* call_stack) {
+ auto iter = entry_map_.find(call_stack);
+ if (iter == entry_map_.end())
+ return;
+ Entry* entry = &iter->second;
+ --entry->net_num_allocs;
+ ++num_frees_;
+
+ // Delete zero-alloc entries to free up space.
+ if (entry->net_num_allocs == 0)
+ entry_map_.erase(iter);
+}
+
+void CallStackTable::TestForLeaks() {
+ // Add all entries to the ranked list.
+ RankedList ranked_list(kMaxCountOfSuspciousStacks);
+
+ for (const auto& entry_pair : entry_map_) {
+ const Entry& entry = entry_pair.second;
+ // Assumes that |entry.net_num_allocs| is always > 0. If that changes
+ // elsewhere in this class, this code should be updated to only pass values
+ // > 0 to |ranked_list|.
+ LeakDetectorValueType call_stack_value(entry_pair.first);
+ ranked_list.Add(call_stack_value, entry.net_num_allocs);
+ }
+ leak_analyzer_.AddSample(ranked_list.Pass());
+}
+
+} // namespace leak_detector
+} // namespace metrics
diff --git a/components/metrics/leak_detector/call_stack_table.h b/components/metrics/leak_detector/call_stack_table.h
new file mode 100644
index 0000000..c412895
--- /dev/null
+++ b/components/metrics/leak_detector/call_stack_table.h
@@ -0,0 +1,81 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_LEAK_DETECTOR_CALL_STACK_TABLE_H_
+#define COMPONENTS_METRICS_LEAK_DETECTOR_CALL_STACK_TABLE_H_
+
+#include <stdint.h>
+
+#include <functional> // For std::equal_to.
+
+#include "base/containers/hash_tables.h"
+#include "base/macros.h"
+#include "components/metrics/leak_detector/custom_allocator.h"
+#include "components/metrics/leak_detector/leak_analyzer.h"
+#include "components/metrics/leak_detector/stl_allocator.h"
+
+namespace metrics {
+namespace leak_detector {
+
+struct CallStack;
+
+// Contains a hash table where the key is the call stack and the value is the
+// number of allocations from that call stack.
+// Not thread-safe.
+class CallStackTable {
+ public:
+ struct StoredHash {
+ size_t operator()(const CallStack* call_stack) const;
+ };
+
+ explicit CallStackTable(int call_stack_suspicion_threshold);
+ ~CallStackTable();
+
+ // Add/Remove an allocation for the given call stack.
+ // Note that this class does NOT own the CallStack objects. Instead, it
+ // identifies different CallStacks by their hashes.
+ void Add(const CallStack* call_stack);
+ void Remove(const CallStack* call_stack);
+
+ // Check for leak patterns in the allocation data.
+ void TestForLeaks();
+
+ const LeakAnalyzer& leak_analyzer() const { return leak_analyzer_; }
+
+ size_t size() const { return entry_map_.size(); }
+ bool empty() const { return entry_map_.empty(); }
+
+ uint32_t num_allocs() const { return num_allocs_; }
+ uint32_t num_frees() const { return num_frees_; }
+
+ private:
+ // Hash table entry used to track allocation stats for a given call stack.
+ struct Entry {
+ // Net number of allocs (allocs minus frees).
+ uint32_t net_num_allocs;
+ };
+
+ // Total number of allocs and frees in this table.
+ uint32_t num_allocs_;
+ uint32_t num_frees_;
+
+ // Hash table containing entries.
+ using TableEntryAllocator =
+ STLAllocator<std::pair<const CallStack*, Entry>, CustomAllocator>;
+ base::hash_map<const CallStack*,
+ Entry,
+ StoredHash,
+ std::equal_to<const CallStack*>,
+ TableEntryAllocator> entry_map_;
+
+ // For detecting leak patterns in incoming allocations.
+ LeakAnalyzer leak_analyzer_;
+
+ DISALLOW_COPY_AND_ASSIGN(CallStackTable);
+};
+
+} // namespace leak_detector
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_LEAK_DETECTOR_CALL_STACK_TABLE_H_
diff --git a/components/metrics/leak_detector/call_stack_table_unittest.cc b/components/metrics/leak_detector/call_stack_table_unittest.cc
new file mode 100644
index 0000000..c455f37
--- /dev/null
+++ b/components/metrics/leak_detector/call_stack_table_unittest.cc
@@ -0,0 +1,306 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/leak_detector/call_stack_table.h"
+
+#include <set>
+
+#include "base/macros.h"
+#include "base/memory/scoped_ptr.h"
+#include "components/metrics/leak_detector/call_stack_manager.h"
+#include "components/metrics/leak_detector/custom_allocator.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace metrics {
+namespace leak_detector {
+
+namespace {
+
+// Default threshold used for leak analysis.
+const int kDefaultLeakThreshold = 5;
+
+// Some test call stacks.
+const void* kRawStack0[] = {
+ reinterpret_cast<const void*>(0xaabbccdd),
+ reinterpret_cast<const void*>(0x11223344),
+ reinterpret_cast<const void*>(0x55667788),
+ reinterpret_cast<const void*>(0x99887766),
+};
+const void* kRawStack1[] = {
+ reinterpret_cast<const void*>(0xdeadbeef),
+ reinterpret_cast<const void*>(0x900df00d),
+ reinterpret_cast<const void*>(0xcafedeed),
+ reinterpret_cast<const void*>(0xdeafbabe),
+};
+const void* kRawStack2[] = {
+ reinterpret_cast<const void*>(0x12345678),
+ reinterpret_cast<const void*>(0xabcdef01),
+ reinterpret_cast<const void*>(0xfdecab98),
+};
+const void* kRawStack3[] = {
+ reinterpret_cast<const void*>(0xdead0001),
+ reinterpret_cast<const void*>(0xbeef0002),
+ reinterpret_cast<const void*>(0x900d0003),
+ reinterpret_cast<const void*>(0xf00d0004),
+ reinterpret_cast<const void*>(0xcafe0005),
+ reinterpret_cast<const void*>(0xdeed0006),
+ reinterpret_cast<const void*>(0xdeaf0007),
+ reinterpret_cast<const void*>(0xbabe0008),
+};
+
+} // namespace
+
+class CallStackTableTest : public ::testing::Test {
+ public:
+ CallStackTableTest()
+ : stack0_(nullptr),
+ stack1_(nullptr),
+ stack2_(nullptr),
+ stack3_(nullptr) {}
+
+ void SetUp() override {
+ CustomAllocator::Initialize();
+
+ manager_.reset(new CallStackManager);
+
+ // The unit tests expect a certain order to the call stack pointers. It is
+ // an important detail when checking the output of LeakAnalyzer's suspected
+ // leaks, which are ordered by the leak value (call stack pointer). Use a
+ // set to sort the pointers as they are created.
+ std::set<const CallStack*> stacks;
+ stacks.insert(manager_->GetCallStack(arraysize(kRawStack0), kRawStack0));
+ stacks.insert(manager_->GetCallStack(arraysize(kRawStack1), kRawStack1));
+ stacks.insert(manager_->GetCallStack(arraysize(kRawStack2), kRawStack2));
+ stacks.insert(manager_->GetCallStack(arraysize(kRawStack3), kRawStack3));
+ ASSERT_EQ(4U, stacks.size());
+
+ std::set<const CallStack*>::const_iterator iter = stacks.begin();
+ stack0_ = *iter++;
+ stack1_ = *iter++;
+ stack2_ = *iter++;
+ stack3_ = *iter++;
+ }
+
+ void TearDown() override {
+ // All call stacks generated by |manager_| will be invalidated when it is
+ // destroyed.
+ stack0_ = nullptr;
+ stack1_ = nullptr;
+ stack2_ = nullptr;
+ stack3_ = nullptr;
+
+ // Destroy the call stack manager before shutting down the allocator.
+ manager_.reset();
+
+ EXPECT_TRUE(CustomAllocator::Shutdown());
+ }
+
+ protected:
+ // Unit tests should directly reference these pointers to CallStack objects.
+ const CallStack* stack0_;
+ const CallStack* stack1_;
+ const CallStack* stack2_;
+ const CallStack* stack3_;
+
+ private:
+ scoped_ptr<CallStackManager> manager_;
+
+ DISALLOW_COPY_AND_ASSIGN(CallStackTableTest);
+};
+
+TEST_F(CallStackTableTest, PointerOrder) {
+ EXPECT_LT(stack0_, stack1_);
+ EXPECT_LT(stack1_, stack2_);
+ EXPECT_LT(stack2_, stack3_);
+}
+
+TEST_F(CallStackTableTest, EmptyTable) {
+ CallStackTable table(kDefaultLeakThreshold);
+ EXPECT_TRUE(table.empty());
+
+ EXPECT_EQ(0U, table.num_allocs());
+ EXPECT_EQ(0U, table.num_frees());
+
+ // The table should be able to gracefully handle an attempt to remove a call
+ // stack entry when none exists.
+ table.Remove(stack0_);
+ table.Remove(stack1_);
+ table.Remove(stack2_);
+ table.Remove(stack3_);
+
+ EXPECT_EQ(0U, table.num_allocs());
+ EXPECT_EQ(0U, table.num_frees());
+}
+
+TEST_F(CallStackTableTest, InsertionAndRemoval) {
+ CallStackTable table(kDefaultLeakThreshold);
+
+ table.Add(stack0_);
+ EXPECT_EQ(1U, table.size());
+ EXPECT_EQ(1U, table.num_allocs());
+ table.Add(stack1_);
+ EXPECT_EQ(2U, table.size());
+ EXPECT_EQ(2U, table.num_allocs());
+ table.Add(stack2_);
+ EXPECT_EQ(3U, table.size());
+ EXPECT_EQ(3U, table.num_allocs());
+ table.Add(stack3_);
+ EXPECT_EQ(4U, table.size());
+ EXPECT_EQ(4U, table.num_allocs());
+
+ // Add some call stacks that have already been added. There should be no
+ // change in the number of entries, as they are aggregated by call stack.
+ table.Add(stack2_);
+ EXPECT_EQ(4U, table.size());
+ EXPECT_EQ(5U, table.num_allocs());
+ table.Add(stack3_);
+ EXPECT_EQ(4U, table.size());
+ EXPECT_EQ(6U, table.num_allocs());
+
+ // Start removing entries.
+ EXPECT_EQ(0U, table.num_frees());
+
+ table.Remove(stack0_);
+ EXPECT_EQ(3U, table.size());
+ EXPECT_EQ(1U, table.num_frees());
+ table.Remove(stack1_);
+ EXPECT_EQ(2U, table.size());
+ EXPECT_EQ(2U, table.num_frees());
+
+ // Removing call stacks with multiple counts will not reduce the overall
+ // number of table entries, until the count reaches 0.
+ table.Remove(stack2_);
+ EXPECT_EQ(2U, table.size());
+ EXPECT_EQ(3U, table.num_frees());
+ table.Remove(stack3_);
+ EXPECT_EQ(2U, table.size());
+ EXPECT_EQ(4U, table.num_frees());
+
+ table.Remove(stack2_);
+ EXPECT_EQ(1U, table.size());
+ EXPECT_EQ(5U, table.num_frees());
+ table.Remove(stack3_);
+ EXPECT_EQ(0U, table.size());
+ EXPECT_EQ(6U, table.num_frees());
+
+ // Now the table should be empty, but attempt to remove some more and make
+ // sure nothing breaks.
+ table.Remove(stack0_);
+ table.Remove(stack1_);
+ table.Remove(stack2_);
+ table.Remove(stack3_);
+
+ EXPECT_TRUE(table.empty());
+ EXPECT_EQ(6U, table.num_allocs());
+ EXPECT_EQ(6U, table.num_frees());
+}
+
+TEST_F(CallStackTableTest, MassiveInsertionAndRemoval) {
+ CallStackTable table(kDefaultLeakThreshold);
+
+ for (int i = 0; i < 100; ++i)
+ table.Add(stack3_);
+ EXPECT_EQ(1U, table.size());
+ EXPECT_EQ(100U, table.num_allocs());
+
+ for (int i = 0; i < 100; ++i)
+ table.Add(stack2_);
+ EXPECT_EQ(2U, table.size());
+ EXPECT_EQ(200U, table.num_allocs());
+
+ for (int i = 0; i < 100; ++i)
+ table.Add(stack1_);
+ EXPECT_EQ(3U, table.size());
+ EXPECT_EQ(300U, table.num_allocs());
+
+ for (int i = 0; i < 100; ++i)
+ table.Add(stack0_);
+ EXPECT_EQ(4U, table.size());
+ EXPECT_EQ(400U, table.num_allocs());
+
+ // Remove them in a different order, by removing one of each stack during one
+ // iteration. The size should not decrease until the last iteration.
+ EXPECT_EQ(0U, table.num_frees());
+
+ for (int i = 0; i < 100; ++i) {
+ table.Remove(stack0_);
+ EXPECT_EQ(4U * i + 1, table.num_frees());
+
+ table.Remove(stack1_);
+ EXPECT_EQ(4U * i + 2, table.num_frees());
+
+ table.Remove(stack2_);
+ EXPECT_EQ(4U * i + 3, table.num_frees());
+
+ table.Remove(stack3_);
+ EXPECT_EQ(4U * i + 4, table.num_frees());
+ }
+ EXPECT_EQ(400U, table.num_frees());
+ EXPECT_TRUE(table.empty());
+
+ // Try to remove some more from an empty table and make sure nothing breaks.
+ table.Remove(stack0_);
+ table.Remove(stack1_);
+ table.Remove(stack2_);
+ table.Remove(stack3_);
+
+ EXPECT_TRUE(table.empty());
+ EXPECT_EQ(400U, table.num_allocs());
+ EXPECT_EQ(400U, table.num_frees());
+}
+
+TEST_F(CallStackTableTest, DetectLeak) {
+ CallStackTable table(kDefaultLeakThreshold);
+
+ // Add some base number of entries.
+ for (int i = 0; i < 60; ++i)
+ table.Add(stack0_);
+ for (int i = 0; i < 50; ++i)
+ table.Add(stack1_);
+ for (int i = 0; i < 64; ++i)
+ table.Add(stack2_);
+ for (int i = 0; i < 72; ++i)
+ table.Add(stack3_);
+
+ table.TestForLeaks();
+ EXPECT_TRUE(table.leak_analyzer().suspected_leaks().empty());
+
+ // Use the following scheme:
+ // - stack0_: increase by 4 each time -- leak suspect
+ // - stack1_: increase by 3 each time -- leak suspect
+ // - stack2_: increase by 1 each time -- not a suspect
+ // - stack3_: alternate between increasing and decreasing - not a suspect
+ bool increase_kstack3 = true;
+ for (int i = 0; i < kDefaultLeakThreshold; ++i) {
+ EXPECT_TRUE(table.leak_analyzer().suspected_leaks().empty());
+
+ for (int j = 0; j < 4; ++j)
+ table.Add(stack0_);
+
+ for (int j = 0; j < 3; ++j)
+ table.Add(stack1_);
+
+ table.Add(stack2_);
+
+ // Alternate between adding and removing.
+ if (increase_kstack3)
+ table.Add(stack3_);
+ else
+ table.Remove(stack3_);
+ increase_kstack3 = !increase_kstack3;
+
+ table.TestForLeaks();
+ }
+
+ // Check that the correct leak values have been detected.
+ const auto& leaks = table.leak_analyzer().suspected_leaks();
+ ASSERT_EQ(2U, leaks.size());
+ // Suspected leaks are reported in increasing leak value -- in this case, the
+ // CallStack object's address.
+ EXPECT_EQ(stack0_, leaks[0].call_stack());
+ EXPECT_EQ(stack1_, leaks[1].call_stack());
+}
+
+} // namespace leak_detector
+} // namespace metrics
diff --git a/components/metrics/leak_detector/custom_allocator.cc b/components/metrics/leak_detector/custom_allocator.cc
new file mode 100644
index 0000000..1f80a97
--- /dev/null
+++ b/components/metrics/leak_detector/custom_allocator.cc
@@ -0,0 +1,61 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/leak_detector/custom_allocator.h"
+
+#include <stddef.h>
+
+namespace metrics {
+namespace leak_detector {
+
+namespace {
+
+// Wrappers around new and delete.
+void* DefaultAlloc(size_t size) {
+ return new char[size];
+}
+void DefaultFree(void* ptr, size_t /* size */) {
+ delete[] reinterpret_cast<char*>(ptr);
+}
+
+CustomAllocator::AllocFunc g_alloc_func = nullptr;
+CustomAllocator::FreeFunc g_free_func = nullptr;
+
+} // namespace
+
+// static
+void CustomAllocator::Initialize() {
+ Initialize(&DefaultAlloc, &DefaultFree);
+}
+
+// static
+void CustomAllocator::Initialize(AllocFunc alloc_func, FreeFunc free_func) {
+ g_alloc_func = alloc_func;
+ g_free_func = free_func;
+}
+
+// static
+bool CustomAllocator::Shutdown() {
+ g_alloc_func = nullptr;
+ g_free_func = nullptr;
+ return true;
+}
+
+// static
+bool CustomAllocator::IsInitialized() {
+ return g_alloc_func && g_free_func;
+}
+
+// static
+void* CustomAllocator::Allocate(size_t size) {
+ return g_alloc_func(size);
+}
+
+// static
+void CustomAllocator::Free(void* ptr, size_t size) {
+ g_free_func(ptr, size);
+}
+
+} // namespace leak_detector
+} // namespace metrics
diff --git a/components/metrics/leak_detector/custom_allocator.h b/components/metrics/leak_detector/custom_allocator.h
new file mode 100644
index 0000000..fdbfc77
--- /dev/null
+++ b/components/metrics/leak_detector/custom_allocator.h
@@ -0,0 +1,50 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_LEAK_DETECTOR_CUSTOM_ALLOCATOR_H_
+#define COMPONENTS_METRICS_LEAK_DETECTOR_CUSTOM_ALLOCATOR_H_
+
+#include <stddef.h>
+
+#include <type_traits>
+
+namespace metrics {
+namespace leak_detector {
+
+// Custom allocator class to be passed to STLAllocator as a template argument.
+//
+// By default, CustomAllocator uses the default allocator (new/delete), but the
+// caller of Initialize ()can provide a pair of alternative alloc/ free
+// functions to use as an external allocator.
+//
+// This is a stateless class, but there is static data within the module that
+// needs to be created and deleted.
+//
+// Not thread-safe.
+class CustomAllocator {
+ public:
+ using AllocFunc = std::add_pointer<void*(size_t)>::type;
+ using FreeFunc = std::add_pointer<void(void*, size_t)>::type;
+
+ // Initialize CustomAllocator to use the default allocator.
+ static void Initialize();
+
+ // Initialize CustomAllocator to use the given alloc/free functions.
+ static void Initialize(AllocFunc alloc_func, FreeFunc free_func);
+
+ // Performs any cleanup required, e.g. unset custom functions. Returns true
+ // on success or false if something failed.
+ static bool Shutdown();
+
+ static bool IsInitialized();
+
+ // These functions must match the specifications in STLAllocator.
+ static void* Allocate(size_t size);
+ static void Free(void* ptr, size_t size);
+};
+
+} // namespace leak_detector
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_LEAK_DETECTOR_CUSTOM_ALLOCATOR_H_
diff --git a/components/metrics/leak_detector/leak_analyzer.cc b/components/metrics/leak_detector/leak_analyzer.cc
new file mode 100644
index 0000000..56905ba
--- /dev/null
+++ b/components/metrics/leak_detector/leak_analyzer.cc
@@ -0,0 +1,139 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/leak_detector/leak_analyzer.h"
+
+#include <set>
+
+namespace metrics {
+namespace leak_detector {
+
+namespace {
+
+using RankedEntry = RankedList::Entry;
+
+// Increase suspicion scores by this much each time an entry is suspected as
+// being a leak.
+const int kSuspicionScoreIncrease = 1;
+
+} // namespace
+
+LeakAnalyzer::LeakAnalyzer(uint32_t ranking_size,
+ uint32_t num_suspicions_threshold)
+ : ranking_size_(ranking_size),
+ score_threshold_(num_suspicions_threshold),
+ ranked_entries_(ranking_size),
+ prev_ranked_entries_(ranking_size) {
+ suspected_leaks_.reserve(ranking_size);
+}
+
+LeakAnalyzer::~LeakAnalyzer() {}
+
+void LeakAnalyzer::AddSample(RankedList ranked_list) {
+ // Save the ranked entries from the previous call.
+ prev_ranked_entries_ = ranked_entries_.Pass();
+
+ // Save the current entries.
+ ranked_entries_ = ranked_list.Pass();
+
+ RankedList ranked_deltas(ranking_size_);
+ for (const RankedEntry& entry : ranked_entries_) {
+ // Determine what count was recorded for this value last time.
+ uint32_t prev_count = 0;
+ if (GetPreviousCountForValue(entry.value, &prev_count))
+ ranked_deltas.Add(entry.value, entry.count - prev_count);
+ }
+
+ AnalyzeDeltas(ranked_deltas);
+}
+
+void LeakAnalyzer::AnalyzeDeltas(const RankedList& ranked_deltas) {
+ bool found_drop = false;
+ RankedList::const_iterator drop_position = ranked_deltas.end();
+
+ if (ranked_deltas.size() > 1) {
+ RankedList::const_iterator entry_iter = ranked_deltas.begin();
+ RankedList::const_iterator next_entry_iter = ranked_deltas.begin();
+ ++next_entry_iter;
+
+ // If the first entry is 0, that means all deltas are 0 or negative. Do
+ // not treat this as a suspicion of leaks; just quit.
+ if (entry_iter->count > 0) {
+ while (next_entry_iter != ranked_deltas.end()) {
+ const RankedEntry& entry = *entry_iter;
+ const RankedEntry& next_entry = *next_entry_iter;
+
+ // Find the first major drop in values (i.e. by 50% or more).
+ if (entry.count > next_entry.count * 2) {
+ found_drop = true;
+ drop_position = next_entry_iter;
+ break;
+ }
+ ++entry_iter;
+ ++next_entry_iter;
+ }
+ }
+ }
+
+ // All leak values before the drop are suspected during this analysis.
+ std::set<ValueType, std::less<ValueType>, Allocator<ValueType>>
+ current_suspects;
+ if (found_drop) {
+ for (RankedList::const_iterator ranked_list_iter = ranked_deltas.begin();
+ ranked_list_iter != drop_position; ++ranked_list_iter) {
+ current_suspects.insert(ranked_list_iter->value);
+ }
+ }
+
+ // Reset the score to 0 for all previously suspected leak values that did
+ // not get suspected this time.
+ auto iter = suspected_histogram_.begin();
+ while (iter != suspected_histogram_.end()) {
+ const ValueType& value = iter->first;
+ // Erase entries whose suspicion score reaches 0.
+ auto erase_iter = iter++;
+ if (current_suspects.find(value) == current_suspects.end())
+ suspected_histogram_.erase(erase_iter);
+ }
+
+ // For currently suspected values, increase the leak score.
+ for (const ValueType& value : current_suspects) {
+ auto histogram_iter = suspected_histogram_.find(value);
+ if (histogram_iter != suspected_histogram_.end()) {
+ histogram_iter->second += kSuspicionScoreIncrease;
+ } else if (suspected_histogram_.size() < ranking_size_) {
+ // Create a new entry if it didn't already exist.
+ suspected_histogram_[value] = kSuspicionScoreIncrease;
+ }
+ }
+
+ // Now check the leak suspicion scores. Make sure to erase the suspected
+ // leaks from the previous call.
+ suspected_leaks_.clear();
+ for (const auto& entry : suspected_histogram_) {
+ if (suspected_leaks_.size() > ranking_size_)
+ break;
+
+ // Only report suspected values that have accumulated a suspicion score.
+ // This is achieved by maintaining suspicion for several cycles, with few
+ // skips.
+ if (entry.second >= score_threshold_)
+ suspected_leaks_.emplace_back(entry.first);
+ }
+}
+
+bool LeakAnalyzer::GetPreviousCountForValue(const ValueType& value,
+ uint32_t* count) const {
+ // Determine what count was recorded for this value last time.
+ for (const RankedEntry& entry : prev_ranked_entries_) {
+ if (entry.value == value) {
+ *count = entry.count;
+ return true;
+ }
+ }
+ return false;
+}
+
+} // namespace leak_detector
+} // namespace metrics
diff --git a/components/metrics/leak_detector/leak_analyzer.h b/components/metrics/leak_detector/leak_analyzer.h
new file mode 100644
index 0000000..9ca9c3e
--- /dev/null
+++ b/components/metrics/leak_detector/leak_analyzer.h
@@ -0,0 +1,81 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_LEAK_DETECTOR_LEAK_ANALYZER_H_
+#define COMPONENTS_METRICS_LEAK_DETECTOR_LEAK_ANALYZER_H_
+
+#include <map>
+#include <vector>
+
+#include "base/macros.h"
+#include "components/metrics/leak_detector/custom_allocator.h"
+#include "components/metrics/leak_detector/leak_detector_value_type.h"
+#include "components/metrics/leak_detector/ranked_list.h"
+#include "components/metrics/leak_detector/stl_allocator.h"
+
+namespace metrics {
+namespace leak_detector {
+
+// This class looks for possible leak patterns in allocation data over time.
+// Not thread-safe.
+class LeakAnalyzer {
+ public:
+ using ValueType = LeakDetectorValueType;
+
+ template <typename Type>
+ using Allocator = STLAllocator<Type, CustomAllocator>;
+
+ LeakAnalyzer(uint32_t ranking_size, uint32_t num_suspicions_threshold);
+ ~LeakAnalyzer();
+
+ // Take in a RankedList of allocations, sorted by count. Removes the contents
+ // of |ranked_list|, which must be passed in using move semantics.
+ void AddSample(RankedList ranked_list);
+
+ // Used to report suspected leaks. Reported leaks are sorted by ValueType.
+ const std::vector<ValueType, Allocator<ValueType>>& suspected_leaks() const {
+ return suspected_leaks_;
+ }
+
+ private:
+ // Analyze a list of allocation count deltas from the previous iteration. If
+ // anything looks like a possible leak, update the suspicion scores.
+ void AnalyzeDeltas(const RankedList& ranked_deltas);
+
+ // Returns the count for the given value from the previous analysis in
+ // |count|. Returns true if the given value was present in the previous
+ // analysis, or false if not.
+ bool GetPreviousCountForValue(const ValueType& value, uint32_t* count) const;
+
+ // Look for the top |ranking_size_| entries when analyzing leaks.
+ const uint32_t ranking_size_;
+
+ // Report suspected leaks when the suspicion score reaches this value.
+ const uint32_t score_threshold_;
+
+ // A mapping of allocation values to suspicion score. All allocations in this
+ // container are suspected leaks. The score can increase or decrease over
+ // time. Once the score reaches |score_threshold_|, the entry is reported as
+ // a suspected leak in |suspected_leaks_|.
+ std::map<ValueType,
+ uint32_t,
+ std::less<ValueType>,
+ Allocator<std::pair<ValueType, uint32_t>>> suspected_histogram_;
+
+ // Array of allocated values that passed the suspicion threshold and are being
+ // reported.
+ std::vector<ValueType, Allocator<ValueType>> suspected_leaks_;
+
+ // The most recent allocation entries, since the last call to AddSample().
+ RankedList ranked_entries_;
+ // The previous allocation entries, from before the last call to AddSample().
+ RankedList prev_ranked_entries_;
+
+ DISALLOW_COPY_AND_ASSIGN(LeakAnalyzer);
+};
+
+} // namespace leak_detector
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_LEAK_DETECTOR_LEAK_ANALYZER_H_
diff --git a/components/metrics/leak_detector/leak_analyzer_unittest.cc b/components/metrics/leak_detector/leak_analyzer_unittest.cc
new file mode 100644
index 0000000..0008708
--- /dev/null
+++ b/components/metrics/leak_detector/leak_analyzer_unittest.cc
@@ -0,0 +1,362 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/leak_detector/leak_analyzer.h"
+
+#include "base/macros.h"
+#include "components/metrics/leak_detector/custom_allocator.h"
+#include "components/metrics/leak_detector/ranked_list.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace metrics {
+namespace leak_detector {
+
+namespace {
+
+// Default ranking size and threshold used for leak analysis.
+const int kDefaultRankedListSize = 10;
+const int kDefaultLeakThreshold = 5;
+
+// Makes it easier to instantiate LeakDetectorValueTypes. Instantiates with an
+// integer value that indicates an allocation size. Storing the size allows us
+// to track the storage of the LeakDetectorValueType object within LeakAnalyzer.
+//
+// There is no need to test this with call stacks in addition to sizes because
+// call stacks will be contained in a LeakDetectorValueType object as well.
+LeakDetectorValueType Size(uint32_t value) {
+ return LeakDetectorValueType(value);
+}
+
+} // namespace
+
+class LeakAnalyzerTest : public ::testing::Test {
+ public:
+ LeakAnalyzerTest() {}
+
+ void SetUp() override { CustomAllocator::Initialize(); }
+ void TearDown() override { EXPECT_TRUE(CustomAllocator::Shutdown()); }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(LeakAnalyzerTest);
+};
+
+TEST_F(LeakAnalyzerTest, Empty) {
+ LeakAnalyzer analyzer(kDefaultRankedListSize, kDefaultLeakThreshold);
+ EXPECT_TRUE(analyzer.suspected_leaks().empty());
+}
+
+TEST_F(LeakAnalyzerTest, SingleSize) {
+ LeakAnalyzer analyzer(kDefaultRankedListSize, kDefaultLeakThreshold);
+
+ for (int i = 0; i < kDefaultLeakThreshold + 20; ++i) {
+ RankedList list(kDefaultRankedListSize);
+ list.Add(Size(24), 10);
+ analyzer.AddSample(list.Pass());
+
+ // No leaks should have been detected.
+ EXPECT_TRUE(analyzer.suspected_leaks().empty());
+ }
+}
+
+TEST_F(LeakAnalyzerTest, VariousSizesWithoutIncrease) {
+ LeakAnalyzer analyzer(kDefaultRankedListSize, kDefaultLeakThreshold);
+
+ for (int i = 0; i < kDefaultLeakThreshold + 20; ++i) {
+ RankedList list(kDefaultRankedListSize);
+ list.Add(Size(24), 30);
+ list.Add(Size(32), 10);
+ list.Add(Size(56), 90);
+ list.Add(Size(64), 40);
+ analyzer.AddSample(list.Pass());
+
+ // No leaks should have been detected.
+ EXPECT_TRUE(analyzer.suspected_leaks().empty());
+ }
+}
+
+TEST_F(LeakAnalyzerTest, VariousSizesWithEqualIncrease) {
+ LeakAnalyzer analyzer(kDefaultRankedListSize, kDefaultLeakThreshold);
+
+ for (int i = 0; i < kDefaultLeakThreshold + 20; ++i) {
+ RankedList list(kDefaultRankedListSize);
+ list.Add(Size(24), 30 + i * 10);
+ list.Add(Size(32), 10 + i * 10);
+ list.Add(Size(56), 90 + i * 10);
+ list.Add(Size(64), 40 + i * 10);
+ analyzer.AddSample(list.Pass());
+
+ // No leaks should have been detected.
+ EXPECT_TRUE(analyzer.suspected_leaks().empty());
+ }
+}
+
+TEST_F(LeakAnalyzerTest, NotEnoughRunsToTriggerLeakReport) {
+ LeakAnalyzer analyzer(kDefaultRankedListSize, kDefaultLeakThreshold);
+
+ // Run this one iteration short of the number of cycles needed to trigger a
+ // leak report. Because LeakAnalyzer requires |kDefaultLeakThreshold|
+ // suspicions based on deltas between AddSample() calls, the below loop needs
+ // to run |kDefaultLeakThreshold + 1| times to trigger a leak report.
+ for (int i = 0; i <= kDefaultLeakThreshold - 1; ++i) {
+ RankedList list(kDefaultRankedListSize);
+ list.Add(Size(24), 30 + i * 10); // This one has a potential leak.
+ list.Add(Size(32), 10 + i * 2);
+ list.Add(Size(56), 90 + i);
+ list.Add(Size(64), 40 + i / 2);
+ analyzer.AddSample(list.Pass());
+
+ // No leaks should have been detected.
+ EXPECT_TRUE(analyzer.suspected_leaks().empty());
+ }
+}
+
+TEST_F(LeakAnalyzerTest, LeakSingleSize) {
+ LeakAnalyzer analyzer(kDefaultRankedListSize, kDefaultLeakThreshold);
+
+ // Run this past the number of iterations required to trigger a leak report.
+ for (int i = 0; i < kDefaultLeakThreshold + 10; ++i) {
+ RankedList list(kDefaultRankedListSize);
+ list.Add(Size(32), 10);
+ list.Add(Size(56), 90);
+ list.Add(Size(24), 30 + i * 10); // This one has a potential leak.
+ list.Add(Size(64), 40);
+ analyzer.AddSample(list.Pass());
+
+ // No leaks should have been detected initially...
+ if (i < kDefaultLeakThreshold) {
+ EXPECT_TRUE(analyzer.suspected_leaks().empty());
+ } else {
+ // ... but there should be reported leaks once the threshold is reached.
+ const auto& leaks = analyzer.suspected_leaks();
+ ASSERT_EQ(1U, leaks.size());
+ EXPECT_EQ(24U, leaks[0].size());
+ }
+ }
+}
+
+TEST_F(LeakAnalyzerTest, LeakSingleSizeOthersAlsoIncreasing) {
+ LeakAnalyzer analyzer(kDefaultRankedListSize, kDefaultLeakThreshold);
+
+ for (int i = 0; i < kDefaultLeakThreshold + 10; ++i) {
+ RankedList list(kDefaultRankedListSize);
+ list.Add(Size(24), 30 + i * 10); // This one has a potential leak.
+ list.Add(Size(32), 10 + i * 2);
+ list.Add(Size(56), 90 + i);
+ list.Add(Size(64), 40 + i / 2);
+ analyzer.AddSample(list.Pass());
+
+ // No leaks should have been detected initially...
+ if (i < kDefaultLeakThreshold) {
+ EXPECT_TRUE(analyzer.suspected_leaks().empty());
+ } else {
+ // ... but there should be reported leaks once the threshold is reached.
+ const auto& leaks = analyzer.suspected_leaks();
+ ASSERT_EQ(1U, leaks.size());
+ EXPECT_EQ(24U, leaks[0].size());
+ }
+ }
+}
+
+TEST_F(LeakAnalyzerTest, LeakMultipleSizes) {
+ LeakAnalyzer analyzer(kDefaultRankedListSize, kDefaultLeakThreshold);
+
+ for (int i = 0; i < kDefaultLeakThreshold + 10; ++i) {
+ RankedList list(kDefaultRankedListSize);
+ list.Add(Size(24), 30 + i * 5);
+ list.Add(Size(32), 10 + i * 40);
+ list.Add(Size(56), 90 + i * 30);
+ list.Add(Size(64), 40 + i * 20);
+ list.Add(Size(80), 20 + i * 3);
+ analyzer.AddSample(list.Pass());
+
+ // No leaks should have been detected initially...
+ if (i < kDefaultLeakThreshold) {
+ EXPECT_TRUE(analyzer.suspected_leaks().empty());
+ } else {
+ // ... but there should be reported leaks once the threshold is reached.
+ const auto& leaks = analyzer.suspected_leaks();
+ ASSERT_EQ(3U, leaks.size());
+ // These should be in order of increasing allocation size.
+ EXPECT_EQ(32U, leaks[0].size());
+ EXPECT_EQ(56U, leaks[1].size());
+ EXPECT_EQ(64U, leaks[2].size());
+ }
+ }
+}
+
+TEST_F(LeakAnalyzerTest, LeakMultipleSizesValueOrder) {
+ LeakAnalyzer analyzer(kDefaultRankedListSize, kDefaultLeakThreshold);
+
+ for (int i = 0; i <= kDefaultLeakThreshold; ++i) {
+ RankedList list(kDefaultRankedListSize);
+ // These are similar to LeakMultipleSizes, but the relative order of
+ // allocation increases is different from the relative order of sizes.
+ list.Add(Size(24), 30 + i * 5);
+ list.Add(Size(32), 10 + i * 20);
+ list.Add(Size(56), 90 + i * 40);
+ list.Add(Size(64), 40 + i * 30);
+ list.Add(Size(80), 20 + i * 3);
+ analyzer.AddSample(list.Pass());
+ }
+
+ const auto& leaks = analyzer.suspected_leaks();
+ ASSERT_EQ(3U, leaks.size());
+ // These should be in order of increasing allocation size, NOT in order of
+ // allocation count or deltas.
+ EXPECT_EQ(32U, leaks[0].size());
+ EXPECT_EQ(56U, leaks[1].size());
+ EXPECT_EQ(64U, leaks[2].size());
+}
+
+TEST_F(LeakAnalyzerTest, EqualIncreasesNoLeak) {
+ LeakAnalyzer analyzer(kDefaultRankedListSize, kDefaultLeakThreshold);
+
+ for (int i = 0; i < kDefaultLeakThreshold + 20; ++i) {
+ RankedList list(kDefaultRankedListSize);
+ list.Add(Size(24), 30 + i * 10);
+ list.Add(Size(32), 10 + i * 10);
+ list.Add(Size(56), 90 + i * 10);
+ list.Add(Size(64), 40 + i * 10);
+ list.Add(Size(80), 20 + i * 10);
+ analyzer.AddSample(list.Pass());
+
+ EXPECT_TRUE(analyzer.suspected_leaks().empty());
+ }
+}
+
+TEST_F(LeakAnalyzerTest, NotBigEnoughDeltaGap) {
+ LeakAnalyzer analyzer(kDefaultRankedListSize, kDefaultLeakThreshold);
+
+ for (int i = 0; i < kDefaultLeakThreshold + 20; ++i) {
+ RankedList list(kDefaultRankedListSize);
+ // These all have different increments but there is no clear group of
+ // increases that are larger than the rest.
+ list.Add(Size(24), 30 + i * 80);
+ list.Add(Size(32), 10 + i * 45);
+ list.Add(Size(56), 90 + i * 25);
+ list.Add(Size(64), 40 + i * 15);
+ list.Add(Size(80), 20 + i * 10);
+ analyzer.AddSample(list.Pass());
+
+ EXPECT_TRUE(analyzer.suspected_leaks().empty());
+ }
+}
+
+TEST_F(LeakAnalyzerTest, RepeatedRisesUntilLeakFound) {
+ LeakAnalyzer analyzer(kDefaultRankedListSize, kDefaultLeakThreshold);
+
+ // Remember, there is an extra iteration beyond |kDefaultLeakThreshold| needed
+ // to actually trigger the leak detection.
+ for (int i = 0; i <= kDefaultLeakThreshold - 2; ++i) {
+ RankedList list(kDefaultRankedListSize);
+ list.Add(Size(24), 30 + i * 10);
+ list.Add(Size(32), 10);
+ list.Add(Size(56), 90);
+ list.Add(Size(64), 40);
+ list.Add(Size(80), 20);
+ analyzer.AddSample(list.Pass());
+
+ EXPECT_TRUE(analyzer.suspected_leaks().empty());
+ }
+
+ // Drop back down to 30.
+ for (int i = 0; i <= kDefaultLeakThreshold - 1; ++i) {
+ RankedList list(kDefaultRankedListSize);
+ list.Add(Size(24), 30 + i * 10);
+ list.Add(Size(32), 10);
+ list.Add(Size(56), 90);
+ list.Add(Size(64), 40);
+ list.Add(Size(80), 20);
+ analyzer.AddSample(list.Pass());
+
+ EXPECT_TRUE(analyzer.suspected_leaks().empty());
+ }
+
+ // Drop back down to 30.
+ for (int i = 0; i <= kDefaultLeakThreshold; ++i) {
+ // Initially there should not be any leak detected.
+ EXPECT_TRUE(analyzer.suspected_leaks().empty());
+
+ RankedList list(kDefaultRankedListSize);
+ list.Add(Size(24), 30 + i * 10);
+ list.Add(Size(32), 10);
+ list.Add(Size(56), 90);
+ list.Add(Size(64), 40);
+ list.Add(Size(80), 20);
+ analyzer.AddSample(list.Pass());
+ }
+ const auto& leaks = analyzer.suspected_leaks();
+ ASSERT_EQ(1U, leaks.size());
+ EXPECT_EQ(24U, leaks[0].size());
+}
+
+TEST_F(LeakAnalyzerTest, LeakWithMultipleGroupsOfDeltas) {
+ const int kRankedListSize = 20;
+ LeakAnalyzer analyzer(kRankedListSize, kDefaultLeakThreshold);
+
+ for (int i = 0; i <= kDefaultLeakThreshold; ++i) {
+ RankedList list(kRankedListSize);
+ list.Add(Size(24), 30 + i * 10); // A group of smaller deltas.
+ list.Add(Size(32), 10 + i * 3);
+ list.Add(Size(80), 20 + i * 5);
+ list.Add(Size(40), 30 + i * 7);
+ list.Add(Size(56), 90);
+ list.Add(Size(64), 40);
+ list.Add(Size(128), 100);
+ list.Add(Size(44), 100 + i * 10); // A group of medium deltas.
+ list.Add(Size(16), 60 + i * 50);
+ list.Add(Size(4), 20 + i * 40);
+ list.Add(Size(8), 100 + i * 60);
+ list.Add(Size(48), 100);
+ list.Add(Size(72), 60 + i * 240); // A group of largest deltas.
+ list.Add(Size(28), 100);
+ list.Add(Size(100), 100 + i * 200);
+ list.Add(Size(104), 60 + i * 128);
+ analyzer.AddSample(list.Pass());
+ }
+ // Only the group of largest deltas should be caught.
+ const auto& leaks = analyzer.suspected_leaks();
+ ASSERT_EQ(3U, leaks.size());
+ // These should be in order of increasing allocation size.
+ EXPECT_EQ(72U, leaks[0].size());
+ EXPECT_EQ(100U, leaks[1].size());
+ EXPECT_EQ(104U, leaks[2].size());
+}
+
+TEST_F(LeakAnalyzerTest, LeakMultipleSizesWithLargeThreshold) {
+ const int kLeakThreshold = 50;
+ LeakAnalyzer analyzer(kDefaultRankedListSize, kLeakThreshold);
+
+ for (int i = 0; i <= kLeakThreshold + 10; ++i) {
+ RankedList list(kDefaultRankedListSize);
+ // * - Cluster of larger deltas
+ list.Add(Size(24), 30 + i * 5);
+ list.Add(Size(32), 10 + i * 40); // *
+ list.Add(Size(56), 90 + i * 30); // *
+ list.Add(Size(40), 30 + i * 7);
+ list.Add(Size(64), 40 + i * 25); // *
+ list.Add(Size(80), 20 + i * 3);
+ list.Add(Size(128), 100);
+ list.Add(Size(44), 100 + i * 10);
+ list.Add(Size(16), 60 + i * 50); // *
+ analyzer.AddSample(list.Pass());
+
+ // No leaks should have been detected initially...
+ if (i < kLeakThreshold) {
+ EXPECT_TRUE(analyzer.suspected_leaks().empty());
+ } else {
+ // ... but there should be reported leaks once the threshold is reached.
+ const auto& leaks = analyzer.suspected_leaks();
+ ASSERT_EQ(4U, leaks.size());
+ // These should be in order of increasing allocation size.
+ EXPECT_EQ(16U, leaks[0].size());
+ EXPECT_EQ(32U, leaks[1].size());
+ EXPECT_EQ(56U, leaks[2].size());
+ EXPECT_EQ(64U, leaks[3].size());
+ }
+ }
+}
+
+} // namespace leak_detector
+} // namespace metrics
diff --git a/components/metrics/leak_detector/leak_detector_impl.cc b/components/metrics/leak_detector/leak_detector_impl.cc
new file mode 100644
index 0000000..091d1d1
--- /dev/null
+++ b/components/metrics/leak_detector/leak_detector_impl.cc
@@ -0,0 +1,215 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "leak_detector_impl.h"
+
+#include <inttypes.h>
+#include <stddef.h>
+
+#include <algorithm>
+#include <new>
+
+#include "base/hash.h"
+#include "base/process/process_handle.h"
+#include "components/metrics/leak_detector/call_stack_table.h"
+#include "components/metrics/leak_detector/custom_allocator.h"
+#include "components/metrics/leak_detector/ranked_list.h"
+
+namespace metrics {
+namespace leak_detector {
+
+namespace {
+
+// Look for leaks in the the top N entries in each tier, where N is this value.
+const int kRankedListSize = 16;
+
+// Initial hash table size for |LeakDetectorImpl::address_map_|.
+const int kAddressMapNumBuckets = 100003;
+
+// Number of entries in the alloc size table. As sizes are aligned to 32-bits
+// the max supported allocation size is (kNumSizeEntries * 4 - 1). Any larger
+// sizes are ignored. This value is chosen high enough that such large sizes
+// are rare if not nonexistent.
+const int kNumSizeEntries = 2048;
+
+using ValueType = LeakDetectorValueType;
+
+// Functions to convert an allocation size to/from the array index used for
+// |LeakDetectorImpl::size_entries_|.
+size_t SizeToIndex(const size_t size) {
+ int result = static_cast<int>(size / sizeof(uint32_t));
+ if (result < kNumSizeEntries)
+ return result;
+ return 0;
+}
+
+size_t IndexToSize(size_t index) {
+ return sizeof(uint32_t) * index;
+}
+
+} // namespace
+
+LeakDetectorImpl::LeakReport::LeakReport() : alloc_size_bytes_(0) {}
+
+LeakDetectorImpl::LeakReport::~LeakReport() {}
+
+bool LeakDetectorImpl::LeakReport::operator<(const LeakReport& other) const {
+ if (alloc_size_bytes_ != other.alloc_size_bytes_)
+ return alloc_size_bytes_ < other.alloc_size_bytes_;
+ for (size_t i = 0; i < call_stack_.size() && i < other.call_stack_.size();
+ ++i) {
+ if (call_stack_[i] != other.call_stack_[i])
+ return call_stack_[i] < other.call_stack_[i];
+ }
+ return call_stack_.size() < other.call_stack_.size();
+}
+
+LeakDetectorImpl::LeakDetectorImpl(uintptr_t mapping_addr,
+ size_t mapping_size,
+ int size_suspicion_threshold,
+ int call_stack_suspicion_threshold)
+ : num_allocs_(0),
+ num_frees_(0),
+ alloc_size_(0),
+ free_size_(0),
+ num_allocs_with_call_stack_(0),
+ num_stack_tables_(0),
+ address_map_(kAddressMapNumBuckets),
+ size_leak_analyzer_(kRankedListSize, size_suspicion_threshold),
+ size_entries_(kNumSizeEntries),
+ mapping_addr_(mapping_addr),
+ mapping_size_(mapping_size),
+ call_stack_suspicion_threshold_(call_stack_suspicion_threshold) {}
+
+LeakDetectorImpl::~LeakDetectorImpl() {
+ // Free any call stack tables.
+ for (AllocSizeEntry& entry : size_entries_) {
+ CallStackTable* table = entry.stack_table;
+ if (!table)
+ continue;
+ table->~CallStackTable();
+ CustomAllocator::Free(table, sizeof(CallStackTable));
+ }
+ size_entries_.clear();
+}
+
+bool LeakDetectorImpl::ShouldGetStackTraceForSize(size_t size) const {
+ return size_entries_[SizeToIndex(size)].stack_table != nullptr;
+}
+
+void LeakDetectorImpl::RecordAlloc(const void* ptr,
+ size_t size,
+ int stack_depth,
+ const void* const stack[]) {
+ AllocInfo alloc_info;
+ alloc_info.size = size;
+
+ alloc_size_ += alloc_info.size;
+ ++num_allocs_;
+
+ AllocSizeEntry* entry = &size_entries_[SizeToIndex(size)];
+ ++entry->num_allocs;
+
+ if (entry->stack_table && stack_depth > 0) {
+ alloc_info.call_stack =
+ call_stack_manager_.GetCallStack(stack_depth, stack);
+ entry->stack_table->Add(alloc_info.call_stack);
+
+ ++num_allocs_with_call_stack_;
+ }
+
+ uintptr_t addr = reinterpret_cast<uintptr_t>(ptr);
+ address_map_.insert(std::pair<uintptr_t, AllocInfo>(addr, alloc_info));
+}
+
+void LeakDetectorImpl::RecordFree(const void* ptr) {
+ // Look up address.
+ uintptr_t addr = reinterpret_cast<uintptr_t>(ptr);
+ auto iter = address_map_.find(addr);
+ // TODO(sque): Catch and report double frees.
+ if (iter == address_map_.end())
+ return;
+
+ const AllocInfo& alloc_info = iter->second;
+
+ AllocSizeEntry* entry = &size_entries_[SizeToIndex(alloc_info.size)];
+ ++entry->num_frees;
+
+ const CallStack* call_stack = alloc_info.call_stack;
+ if (call_stack) {
+ if (entry->stack_table)
+ entry->stack_table->Remove(call_stack);
+ }
+ ++num_frees_;
+ free_size_ += alloc_info.size;
+
+ address_map_.erase(iter);
+}
+
+void LeakDetectorImpl::TestForLeaks(InternalVector<LeakReport>* reports) {
+ // Add net alloc counts for each size to a ranked list.
+ RankedList size_ranked_list(kRankedListSize);
+ for (size_t i = 0; i < size_entries_.size(); ++i) {
+ const AllocSizeEntry& entry = size_entries_[i];
+ ValueType size_value(IndexToSize(i));
+ size_ranked_list.Add(size_value, entry.num_allocs - entry.num_frees);
+ }
+ size_leak_analyzer_.AddSample(size_ranked_list.Pass());
+
+ // Get suspected leaks by size.
+ for (const ValueType& size_value : size_leak_analyzer_.suspected_leaks()) {
+ uint32_t size = size_value.size();
+ AllocSizeEntry* entry = &size_entries_[SizeToIndex(size)];
+ if (entry->stack_table)
+ continue;
+ entry->stack_table = new (CustomAllocator::Allocate(sizeof(CallStackTable)))
+ CallStackTable(call_stack_suspicion_threshold_);
+ ++num_stack_tables_;
+ }
+
+ // Check for leaks in each CallStackTable. It makes sense to this before
+ // checking the size allocations, because that could potentially create new
+ // CallStackTable. However, the overhead to check a new CallStackTable is
+ // small since this function is run very rarely. So handle the leak checks of
+ // Tier 2 here.
+ reports->clear();
+ for (size_t i = 0; i < size_entries_.size(); ++i) {
+ const AllocSizeEntry& entry = size_entries_[i];
+ CallStackTable* stack_table = entry.stack_table;
+ if (!stack_table || stack_table->empty())
+ continue;
+
+ size_t size = IndexToSize(i);
+
+ // Get suspected leaks by call stack.
+ stack_table->TestForLeaks();
+ const LeakAnalyzer& leak_analyzer = stack_table->leak_analyzer();
+ for (const ValueType& call_stack_value : leak_analyzer.suspected_leaks()) {
+ const CallStack* call_stack = call_stack_value.call_stack();
+
+ // Return reports by storing in |*reports|.
+ reports->resize(reports->size() + 1);
+ LeakReport* report = &reports->back();
+ report->alloc_size_bytes_ = size;
+ report->call_stack_.resize(call_stack->depth);
+ for (size_t j = 0; j < call_stack->depth; ++j) {
+ report->call_stack_[j] = GetOffset(call_stack->stack[j]);
+ }
+ }
+ }
+}
+
+size_t LeakDetectorImpl::AddressHash::operator()(uintptr_t addr) const {
+ return base::Hash(reinterpret_cast<const char*>(&addr), sizeof(addr));
+}
+
+uintptr_t LeakDetectorImpl::GetOffset(const void* ptr) const {
+ uintptr_t ptr_value = reinterpret_cast<uintptr_t>(ptr);
+ if (ptr_value >= mapping_addr_ && ptr_value < mapping_addr_ + mapping_size_)
+ return ptr_value - mapping_addr_;
+ return ptr_value;
+}
+
+} // namespace leak_detector
+} // namespace metrics
diff --git a/components/metrics/leak_detector/leak_detector_impl.h b/components/metrics/leak_detector/leak_detector_impl.h
new file mode 100644
index 0000000..055743b
--- /dev/null
+++ b/components/metrics/leak_detector/leak_detector_impl.h
@@ -0,0 +1,161 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_LEAK_DETECTOR_LEAK_DETECTOR_IMPL_H_
+#define COMPONENTS_METRICS_LEAK_DETECTOR_LEAK_DETECTOR_IMPL_H_
+
+#include <stdint.h>
+
+#include <vector>
+
+#include "base/containers/hash_tables.h"
+#include "base/macros.h"
+#include "components/metrics/leak_detector/call_stack_manager.h"
+#include "components/metrics/leak_detector/custom_allocator.h"
+#include "components/metrics/leak_detector/leak_analyzer.h"
+
+namespace metrics {
+namespace leak_detector {
+
+class CallStackTable;
+
+// Class that contains the actual leak detection mechanism.
+// Not thread-safe.
+class LeakDetectorImpl {
+ public:
+ // Vector type that's safe to use within the memory leak detector. Uses
+ // CustomAllocator to avoid recursive malloc hook invocation.
+ template <typename T>
+ using InternalVector = std::vector<T, STLAllocator<T, CustomAllocator>>;
+
+ // Leak report generated by LeakDetectorImpl.
+ class LeakReport {
+ public:
+ LeakReport();
+ ~LeakReport();
+
+ size_t alloc_size_bytes() const { return alloc_size_bytes_; }
+
+ const InternalVector<uintptr_t>& call_stack() const { return call_stack_; }
+
+ // Used to compare the contents of two leak reports.
+ bool operator<(const LeakReport& other) const;
+
+ private:
+ // LeakDetectorImpl needs access to class members when creating a new leak
+ // report.
+ friend class LeakDetectorImpl;
+
+ // Number of bytes allocated by the leak site during each allocation.
+ size_t alloc_size_bytes_;
+
+ // Unlike the CallStack struct, which consists of addresses, this call stack
+ // will contain offsets in the executable binary.
+ InternalVector<uintptr_t> call_stack_;
+
+ // TODO(sque): Add leak detector parameters.
+ };
+
+ LeakDetectorImpl(uintptr_t mapping_addr,
+ size_t mapping_size,
+ int size_suspicion_threshold,
+ int call_stack_suspicion_threshold);
+ ~LeakDetectorImpl();
+
+ // Indicates whether the given allocation size has an associated call stack
+ // table, and thus requires a stack unwind.
+ bool ShouldGetStackTraceForSize(size_t size) const;
+
+ // Record allocs and frees.
+ void RecordAlloc(const void* ptr,
+ size_t size,
+ int stack_depth,
+ const void* const call_stack[]);
+ void RecordFree(const void* ptr);
+
+ // Run check for possible leaks based on the current profiling data.
+ void TestForLeaks(InternalVector<LeakReport>* reports);
+
+ private:
+ // A record of allocations for a particular size.
+ struct AllocSizeEntry {
+ // Number of allocations and frees for this size.
+ uint32_t num_allocs;
+ uint32_t num_frees;
+
+ // A stack table, if this size is being profiled for stack as well.
+ CallStackTable* stack_table;
+ };
+
+ // Info for a single allocation.
+ struct AllocInfo {
+ AllocInfo() : call_stack(nullptr) {}
+
+ // Number of bytes in this allocation.
+ size_t size;
+
+ // Points to a unique call stack.
+ const CallStack* call_stack;
+ };
+
+ // Allocator class for allocation entry map. Maps allocated addresses to
+ // AllocInfo objects.
+ using AllocationEntryAllocator =
+ STLAllocator<std::pair<const void*, AllocInfo>, CustomAllocator>;
+
+ // Hash class for addresses.
+ struct AddressHash {
+ size_t operator()(uintptr_t addr) const;
+ };
+
+ // Returns the offset of |ptr| within the current binary. If it is not in the
+ // current binary, just return |ptr| as an integer.
+ uintptr_t GetOffset(const void* ptr) const;
+
+ // Owns all unique call stack objects, which are allocated on the heap. Any
+ // other class or function that references a call stack must get it from here,
+ // but may not take ownership of the call stack object.
+ CallStackManager call_stack_manager_;
+
+ // Allocation stats.
+ uint64_t num_allocs_;
+ uint64_t num_frees_;
+ uint64_t alloc_size_;
+ uint64_t free_size_;
+
+ uint32_t num_allocs_with_call_stack_;
+ uint32_t num_stack_tables_;
+
+ // Stores all individual recorded allocations.
+ base::hash_map<uintptr_t,
+ AllocInfo,
+ AddressHash,
+ std::equal_to<uintptr_t>,
+ AllocationEntryAllocator> address_map_;
+
+ // Used to analyze potential leak patterns in the allocation sizes.
+ LeakAnalyzer size_leak_analyzer_;
+
+ // Allocation stats for each size.
+ InternalVector<AllocSizeEntry> size_entries_;
+
+ // Address mapping info of the current binary.
+ uintptr_t mapping_addr_;
+ size_t mapping_size_;
+
+ // Number of consecutive times an allocation size must trigger suspicion to be
+ // considered a leak suspect.
+ int size_suspicion_threshold_;
+
+ // Number of consecutive times a call stack must trigger suspicion to be
+ // considered a leak suspect.
+ int call_stack_suspicion_threshold_;
+
+ DISALLOW_COPY_AND_ASSIGN(LeakDetectorImpl);
+};
+
+} // namespace leak_detector
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_LEAK_DETECTOR_LEAK_DETECTOR_IMPL_H_
diff --git a/components/metrics/leak_detector/leak_detector_impl_unittest.cc b/components/metrics/leak_detector/leak_detector_impl_unittest.cc
new file mode 100644
index 0000000..0e9293e
--- /dev/null
+++ b/components/metrics/leak_detector/leak_detector_impl_unittest.cc
@@ -0,0 +1,464 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/leak_detector/leak_detector_impl.h"
+
+#include <math.h>
+#include <stdint.h>
+
+#include <complex>
+#include <new>
+#include <set>
+#include <vector>
+
+#include "base/macros.h"
+#include "base/memory/scoped_ptr.h"
+#include "components/metrics/leak_detector/custom_allocator.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace metrics {
+namespace leak_detector {
+
+using InternalLeakReport = LeakDetectorImpl::LeakReport;
+
+namespace {
+
+// Makes working with complex numbers easier.
+using Complex = std::complex<double>;
+
+// The mapping location in memory for a fictional executable.
+const uintptr_t kMappingAddr = 0x800000;
+const size_t kMappingSize = 0x200000;
+
+// Some call stacks within the fictional executable.
+// * - outside the mapping range, e.g. JIT code.
+const uintptr_t kRawStack0[] = {
+ 0x800100, 0x900000, 0x880080, 0x810000,
+};
+const uintptr_t kRawStack1[] = {
+ 0x940000, 0x980000,
+ 0xdeadbeef, // *
+ 0x9a0000,
+};
+const uintptr_t kRawStack2[] = {
+ 0x8f0d00, 0x803abc, 0x9100a0,
+};
+const uintptr_t kRawStack3[] = {
+ 0x90fcde,
+ 0x900df00d, // *
+ 0x801000, 0x880088,
+ 0xdeadcafe, // *
+ 0x9f0000, 0x8700a0, 0x96037c,
+};
+const uintptr_t kRawStack4[] = {
+ 0x8c0000, 0x85d00d, 0x921337,
+ 0x780000, // *
+};
+const uintptr_t kRawStack5[] = {
+ 0x990000, 0x888888, 0x830ac0, 0x8e0000,
+ 0xc00000, // *
+};
+
+// This struct makes it easier to pass call stack info to
+// LeakDetectorImplTest::Alloc().
+struct TestCallStack {
+ const uintptr_t* stack; // A reference to the original stack data.
+ size_t depth;
+};
+
+const TestCallStack kStack0 = {kRawStack0, arraysize(kRawStack0)};
+const TestCallStack kStack1 = {kRawStack1, arraysize(kRawStack1)};
+const TestCallStack kStack2 = {kRawStack2, arraysize(kRawStack2)};
+const TestCallStack kStack3 = {kRawStack3, arraysize(kRawStack3)};
+const TestCallStack kStack4 = {kRawStack4, arraysize(kRawStack4)};
+const TestCallStack kStack5 = {kRawStack5, arraysize(kRawStack5)};
+
+// The interval between consecutive analyses (LeakDetectorImpl::TestForLeaks),
+// in number of bytes allocated. e.g. if |kAllocedSizeAnalysisInterval| = 1024
+// then call TestForLeaks() every 1024 bytes of allocation that occur.
+static const size_t kAllocedSizeAnalysisInterval = 8192;
+
+} // namespace
+
+// This test suite will test the ability of LeakDetectorImpl to catch leaks in
+// a program. Individual tests can run leaky code locally.
+//
+// The leaky code must call Alloc() and Free() for heap memory management. It
+// should not call See comments on those
+// functions for more details.
+class LeakDetectorImplTest : public ::testing::Test {
+ public:
+ LeakDetectorImplTest()
+ : total_num_allocs_(0),
+ total_num_frees_(0),
+ total_alloced_size_(0),
+ next_analysis_total_alloced_size_(kAllocedSizeAnalysisInterval) {}
+
+ void SetUp() override {
+ CustomAllocator::Initialize();
+
+ const int kSizeSuspicionThreshold = 4;
+ const int kCallStackSuspicionThreshold = 4;
+ detector_.reset(new LeakDetectorImpl(kMappingAddr, kMappingSize,
+ kSizeSuspicionThreshold,
+ kCallStackSuspicionThreshold));
+ }
+
+ void TearDown() override {
+ // Free any memory that was leaked by test cases. Do not use Free() because
+ // that will try to modify |alloced_ptrs_|.
+ for (void* ptr : alloced_ptrs_)
+ delete[] reinterpret_cast<char*>(ptr);
+ alloced_ptrs_.clear();
+
+ // Must destroy all objects that use CustomAllocator before shutting down.
+ detector_.reset();
+ stored_reports_.clear();
+
+ EXPECT_TRUE(CustomAllocator::Shutdown());
+ }
+
+ protected:
+ // Alloc and free functions that allocate and free heap memory and
+ // automatically pass alloc/free info to |detector_|. They emulate the
+ // alloc/free hook functions that would call into LeakDetectorImpl in
+ // real-life usage. They also keep track of individual allocations locally, so
+ // any leaked memory could be cleaned up.
+ //
+ // |stack| is just a nominal call stack object to identify the call site. It
+ // doesn't have to contain the stack trace of the actual call stack.
+ void* Alloc(size_t size, const TestCallStack& stack) {
+ void* ptr = new char[size];
+ detector_->RecordAlloc(ptr, size, stack.depth,
+ reinterpret_cast<const void* const*>(stack.stack));
+
+ EXPECT_TRUE(alloced_ptrs_.find(ptr) == alloced_ptrs_.end());
+ alloced_ptrs_.insert(ptr);
+
+ ++total_num_allocs_;
+ total_alloced_size_ += size;
+ if (total_alloced_size_ >= next_analysis_total_alloced_size_) {
+ LeakDetectorImpl::InternalVector<InternalLeakReport> reports;
+ detector_->TestForLeaks(&reports);
+ for (const InternalLeakReport& report : reports)
+ stored_reports_.insert(report);
+
+ // Determine when the next leak analysis should occur.
+ while (total_alloced_size_ >= next_analysis_total_alloced_size_)
+ next_analysis_total_alloced_size_ += kAllocedSizeAnalysisInterval;
+ }
+ return ptr;
+ }
+
+ // See comment for Alloc().
+ void Free(void* ptr) {
+ auto find_ptr_iter = alloced_ptrs_.find(ptr);
+ EXPECT_FALSE(find_ptr_iter == alloced_ptrs_.end());
+ if (find_ptr_iter == alloced_ptrs_.end())
+ return;
+ alloced_ptrs_.erase(find_ptr_iter);
+ ++total_num_frees_;
+
+ detector_->RecordFree(ptr);
+
+ delete[] reinterpret_cast<char*>(ptr);
+ }
+
+ // TEST CASE: Julia set fractal computation. Pass in enable_leaks=true to
+ // trigger some memory leaks.
+ void JuliaSet(bool enable_leaks);
+
+ // Instance of the class being tested.
+ scoped_ptr<LeakDetectorImpl> detector_;
+
+ // Number of pointers allocated and freed so far.
+ size_t total_num_allocs_;
+ size_t total_num_frees_;
+
+ // Keeps count of total size allocated by Alloc().
+ size_t total_alloced_size_;
+
+ // The cumulative allocation size at which to trigger the TestForLeaks() call.
+ size_t next_analysis_total_alloced_size_;
+
+ // Stores all pointers to memory allocated by by Alloc() so we can manually
+ // free the leaked pointers at the end. This also serves as redundant
+ // bookkeepping: it stores all pointers that have been allocated but not yet
+ // freed.
+ std::set<void*> alloced_ptrs_;
+
+ // Store leak reports here. Use a set so duplicate reports are not stored.
+ std::set<InternalLeakReport> stored_reports_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(LeakDetectorImplTest);
+};
+
+void LeakDetectorImplTest::JuliaSet(bool enable_leaks) {
+ // The center region of the complex plane that is the basis for our Julia set
+ // computations is a circle of radius kRadius.
+ constexpr double kRadius = 2;
+
+ // To track points in the complex plane, we will use a rectangular grid in the
+ // range defined by [-kRadius, kRadius] along both axes.
+ constexpr double kRangeMin = -kRadius;
+ constexpr double kRangeMax = kRadius;
+
+ // Divide each axis into intervals, each of which is associated with a point
+ // on that axis at its center.
+ constexpr double kIntervalInverse = 64;
+ constexpr double kInterval = 1.0 / kIntervalInverse;
+ constexpr int kNumPoints = (kRangeMax - kRangeMin) / kInterval + 1;
+
+ // Contains some useful functions for converting between points on the complex
+ // plane and in a gridlike data structure.
+ struct ComplexPlane {
+ static int GetXGridIndex(const Complex& value) {
+ return (value.real() + kInterval / 2 - kRangeMin) / kInterval;
+ }
+ static int GetYGridIndex(const Complex& value) {
+ return (value.imag() + kInterval / 2 - kRangeMin) / kInterval;
+ }
+ static int GetArrayIndex(const Complex& value) {
+ return GetXGridIndex(value) + GetYGridIndex(value) * kNumPoints;
+ }
+ static Complex GetComplexForGridPoint(size_t x, size_t y) {
+ return Complex(kRangeMin + x * kInterval, kRangeMin + y * kInterval);
+ }
+ };
+
+ // Make sure the choice of interval doesn't result in any loss of precision.
+ ASSERT_EQ(1.0, kInterval * kIntervalInverse);
+
+ // Create a grid for part of the complex plane, with each axis within the
+ // range [kRangeMin, kRangeMax].
+ constexpr size_t width = kNumPoints;
+ constexpr size_t height = kNumPoints;
+ std::vector<Complex*> grid(width * height);
+
+ // Initialize an object for each point within the inner circle |z| < kRadius.
+ for (size_t i = 0; i < width; ++i) {
+ for (size_t j = 0; j < height; ++j) {
+ Complex point = ComplexPlane::GetComplexForGridPoint(i, j);
+ // Do not store any values outside the inner circle.
+ if (abs(point) <= kRadius) {
+ grid[i + j * width] =
+ new (Alloc(sizeof(Complex), kStack0)) Complex(point);
+ }
+ }
+ }
+ EXPECT_LE(alloced_ptrs_.size(), width * height);
+
+ // Create a new grid for the result of the transformation.
+ std::vector<Complex*> next_grid(width * height, nullptr);
+
+ const int kNumIterations = 20;
+ for (int n = 0; n < kNumIterations; ++n) {
+ for (int i = 0; i < kNumPoints; ++i) {
+ for (int j = 0; j < kNumPoints; ++j) {
+ if (!grid[i + j * width])
+ continue;
+
+ // NOTE: The below code is NOT an efficient way to compute a Julia set.
+ // This is only to test the leak detector with some nontrivial code.
+
+ // A simple polynomial function for generating Julia sets is:
+ // f(z) = z^n + c
+
+ // But in this algorithm, we need the inverse:
+ // fInv(z) = (z - c)^(1/n)
+
+ // Here, let's use n=5 and c=0.544.
+ const Complex c(0.544, 0);
+ const Complex& z = *grid[i + j * width];
+
+ // This is the principal root.
+ Complex root = pow(z - c, 0.2);
+
+ // Discard the result if it is too far out from the center of the plane.
+ if (abs(root) > kRadius)
+ continue;
+
+ // The below code only allocates Complex objects of the same size. The
+ // leak detector expects various sizes, so increase the allocation size
+ // by a different amount at each call site.
+
+ // Nth root produces N results.
+ // Place all root results on |next_grid|.
+
+ // First, place the principal root.
+ if (!next_grid[ComplexPlane::GetArrayIndex(root)]) {
+ next_grid[ComplexPlane::GetArrayIndex(root)] =
+ new (Alloc(sizeof(Complex) + 24, kStack1)) Complex(root);
+ }
+
+ double magnitude = abs(root);
+ double angle = arg(root);
+ // To generate other roots, rotate the principal root by increments of
+ // 1/N of a full circle.
+ const double kAngleIncrement = M_PI * 2 / 5;
+
+ // Second root.
+ root = std::polar(magnitude, angle + kAngleIncrement);
+ if (!next_grid[ComplexPlane::GetArrayIndex(root)]) {
+ next_grid[ComplexPlane::GetArrayIndex(root)] =
+ new (Alloc(sizeof(Complex) + 40, kStack2)) Complex(root);
+ }
+
+ // In some of the sections below, setting |enable_leaks| to true will
+ // trigger a memory leak by overwriting the old Complex pointer value
+ // without freeing it. Due to the nature of complex roots being confined
+ // to equal sections of the complex plane, each new pointer will
+ // displace an old pointer that was allocated from the same line of
+ // code.
+
+ // Third root.
+ root = std::polar(magnitude, angle + kAngleIncrement * 2);
+ // *** LEAK ***
+ if (enable_leaks || !next_grid[ComplexPlane::GetArrayIndex(root)]) {
+ next_grid[ComplexPlane::GetArrayIndex(root)] =
+ new (Alloc(sizeof(Complex) + 40, kStack3)) Complex(root);
+ }
+
+ // Fourth root.
+ root = std::polar(magnitude, angle + kAngleIncrement * 3);
+ // *** LEAK ***
+ if (enable_leaks || !next_grid[ComplexPlane::GetArrayIndex(root)]) {
+ next_grid[ComplexPlane::GetArrayIndex(root)] =
+ new (Alloc(sizeof(Complex) + 52, kStack4)) Complex(root);
+ }
+
+ // Fifth root.
+ root = std::polar(magnitude, angle + kAngleIncrement * 4);
+ if (!next_grid[ComplexPlane::GetArrayIndex(root)]) {
+ next_grid[ComplexPlane::GetArrayIndex(root)] =
+ new (Alloc(sizeof(Complex) + 52, kStack5)) Complex(root);
+ }
+ }
+ }
+
+ // Clear the previously allocated points.
+ for (Complex*& point : grid) {
+ if (point) {
+ Free(point);
+ point = nullptr;
+ }
+ }
+
+ // Now swap the two grids for the next iteration.
+ grid.swap(next_grid);
+ }
+
+ // Clear the previously allocated points.
+ for (Complex*& point : grid) {
+ if (point) {
+ Free(point);
+ point = nullptr;
+ }
+ }
+}
+
+TEST_F(LeakDetectorImplTest, CheckTestFramework) {
+ EXPECT_EQ(0U, total_num_allocs_);
+ EXPECT_EQ(0U, total_num_frees_);
+ EXPECT_EQ(0U, alloced_ptrs_.size());
+
+ // Allocate some memory.
+ void* ptr0 = Alloc(12, kStack0);
+ void* ptr1 = Alloc(16, kStack0);
+ void* ptr2 = Alloc(24, kStack0);
+ EXPECT_EQ(3U, total_num_allocs_);
+ EXPECT_EQ(0U, total_num_frees_);
+ EXPECT_EQ(3U, alloced_ptrs_.size());
+
+ // Free one of the pointers.
+ Free(ptr1);
+ EXPECT_EQ(3U, total_num_allocs_);
+ EXPECT_EQ(1U, total_num_frees_);
+ EXPECT_EQ(2U, alloced_ptrs_.size());
+
+ // Allocate some more memory.
+ void* ptr3 = Alloc(72, kStack1);
+ void* ptr4 = Alloc(104, kStack1);
+ void* ptr5 = Alloc(96, kStack1);
+ void* ptr6 = Alloc(24, kStack1);
+ EXPECT_EQ(7U, total_num_allocs_);
+ EXPECT_EQ(1U, total_num_frees_);
+ EXPECT_EQ(6U, alloced_ptrs_.size());
+
+ // Free more pointers.
+ Free(ptr2);
+ Free(ptr4);
+ Free(ptr6);
+ EXPECT_EQ(7U, total_num_allocs_);
+ EXPECT_EQ(4U, total_num_frees_);
+ EXPECT_EQ(3U, alloced_ptrs_.size());
+
+ // Free remaining memory.
+ Free(ptr0);
+ Free(ptr3);
+ Free(ptr5);
+ EXPECT_EQ(7U, total_num_allocs_);
+ EXPECT_EQ(7U, total_num_frees_);
+ EXPECT_EQ(0U, alloced_ptrs_.size());
+}
+
+TEST_F(LeakDetectorImplTest, JuliaSetNoLeak) {
+ JuliaSet(false /* enable_leaks */);
+
+ // JuliaSet() should have run cleanly without leaking.
+ EXPECT_EQ(total_num_allocs_, total_num_frees_);
+ EXPECT_EQ(0U, alloced_ptrs_.size());
+ ASSERT_EQ(0U, stored_reports_.size());
+}
+
+TEST_F(LeakDetectorImplTest, JuliaSetWithLeak) {
+ JuliaSet(true /* enable_leaks */);
+
+ // JuliaSet() should have leaked some memory from two call sites.
+ EXPECT_GT(total_num_allocs_, total_num_frees_);
+ EXPECT_GT(alloced_ptrs_.size(), 0U);
+
+ // There should be one unique leak report generated for each leaky call site.
+ ASSERT_EQ(2U, stored_reports_.size());
+
+ // The reports should be stored in order of size.
+
+ // |report1| comes from the call site in JuliaSet() corresponding to
+ // |kStack3|.
+ const InternalLeakReport& report1 = *stored_reports_.begin();
+ EXPECT_EQ(sizeof(Complex) + 40, report1.alloc_size_bytes());
+ EXPECT_EQ(kStack3.depth, report1.call_stack().size());
+ for (size_t i = 0; i < kStack3.depth && i < report1.call_stack().size();
+ ++i) {
+ // The call stack's addresses may not fall within the mapping address.
+ // Those that don't will not be converted to mapping offsets.
+ if (kStack3.stack[i] >= kMappingAddr &&
+ kStack3.stack[i] <= kMappingAddr + kMappingSize) {
+ EXPECT_EQ(kStack3.stack[i] - kMappingAddr, report1.call_stack()[i]);
+ } else {
+ EXPECT_EQ(kStack3.stack[i], report1.call_stack()[i]);
+ }
+ }
+
+ // |report2| comes from the call site in JuliaSet() corresponding to
+ // |kStack4|.
+ const InternalLeakReport& report2 = *(++stored_reports_.begin());
+ EXPECT_EQ(sizeof(Complex) + 52, report2.alloc_size_bytes());
+ EXPECT_EQ(kStack4.depth, report2.call_stack().size());
+ for (size_t i = 0; i < kStack4.depth && i < report2.call_stack().size();
+ ++i) {
+ // The call stack's addresses may not fall within the mapping address.
+ // Those that don't will not be converted to mapping offsets.
+ if (kStack4.stack[i] >= kMappingAddr &&
+ kStack4.stack[i] <= kMappingAddr + kMappingSize) {
+ EXPECT_EQ(kStack4.stack[i] - kMappingAddr, report2.call_stack()[i]);
+ } else {
+ EXPECT_EQ(kStack4.stack[i], report2.call_stack()[i]);
+ }
+ }
+}
+
+} // namespace leak_detector
+} // namespace metrics
diff --git a/components/metrics/leak_detector/leak_detector_value_type.cc b/components/metrics/leak_detector/leak_detector_value_type.cc
new file mode 100644
index 0000000..4642f51
--- /dev/null
+++ b/components/metrics/leak_detector/leak_detector_value_type.cc
@@ -0,0 +1,47 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/leak_detector/leak_detector_value_type.h"
+
+#include <stdio.h>
+
+namespace metrics {
+namespace leak_detector {
+
+bool LeakDetectorValueType::operator==(
+ const LeakDetectorValueType& other) const {
+ if (type_ != other.type_)
+ return false;
+
+ switch (type_) {
+ case TYPE_SIZE:
+ return size_ == other.size_;
+ case TYPE_CALL_STACK:
+ return call_stack_ == other.call_stack_;
+ case TYPE_NONE:
+ // "NONE" types are considered to be all identical.
+ return true;
+ }
+ return false;
+}
+
+bool LeakDetectorValueType::operator<(
+ const LeakDetectorValueType& other) const {
+ if (type_ != other.type_)
+ return type_ < other.type_;
+
+ switch (type_) {
+ case TYPE_SIZE:
+ return size_ < other.size_;
+ case TYPE_CALL_STACK:
+ return call_stack_ < other.call_stack_;
+ case TYPE_NONE:
+ // "NONE" types are considered to be all identical.
+ return false;
+ }
+ return false;
+}
+
+} // namespace leak_detector
+} // namespace metrics
diff --git a/components/metrics/leak_detector/leak_detector_value_type.h b/components/metrics/leak_detector/leak_detector_value_type.h
new file mode 100644
index 0000000..40f6108
--- /dev/null
+++ b/components/metrics/leak_detector/leak_detector_value_type.h
@@ -0,0 +1,52 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_LEAK_DETECTOR_LEAK_DETECTOR_VALUE_TYPE_
+#define COMPONENTS_METRICS_LEAK_DETECTOR_LEAK_DETECTOR_VALUE_TYPE_
+
+#include <stddef.h>
+#include <stdint.h>
+
+namespace metrics {
+namespace leak_detector {
+
+// Used for tracking unique call stacks.
+// Not thread-safe.
+struct CallStack;
+
+class LeakDetectorValueType {
+ public:
+ // Supported types.
+ enum Type {
+ TYPE_NONE,
+ TYPE_SIZE,
+ TYPE_CALL_STACK,
+ };
+
+ LeakDetectorValueType() : type_(TYPE_NONE), size_(0), call_stack_(nullptr) {}
+ explicit LeakDetectorValueType(size_t size)
+ : type_(TYPE_SIZE), size_(size), call_stack_(nullptr) {}
+ explicit LeakDetectorValueType(const CallStack* call_stack)
+ : type_(TYPE_CALL_STACK), size_(0), call_stack_(call_stack) {}
+
+ // Accessors.
+ Type type() const { return type_; }
+ size_t size() const { return size_; }
+ const CallStack* call_stack() const { return call_stack_; }
+
+ // Comparators.
+ bool operator==(const LeakDetectorValueType& other) const;
+ bool operator<(const LeakDetectorValueType& other) const;
+
+ private:
+ Type type_;
+
+ size_t size_;
+ const CallStack* call_stack_;
+};
+
+} // namespace leak_detector
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_LEAK_DETECTOR_LEAK_DETECTOR_VALUE_TYPE_
diff --git a/components/metrics/leak_detector/ranked_list.cc b/components/metrics/leak_detector/ranked_list.cc
new file mode 100644
index 0000000..4900025
--- /dev/null
+++ b/components/metrics/leak_detector/ranked_list.cc
@@ -0,0 +1,45 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/leak_detector/ranked_list.h"
+
+#include <algorithm>
+
+namespace metrics {
+namespace leak_detector {
+
+RankedList::RankedList(size_t max_size) : max_size_(max_size) {}
+
+RankedList::~RankedList() {}
+
+RankedList::RankedList(RankedList::RValue other)
+ : max_size_(other.object->max_size_) {
+ entries_.swap(other.object->entries_);
+}
+
+RankedList& RankedList::operator=(RankedList::RValue other) {
+ max_size_ = other.object->max_size_;
+ entries_.swap(other.object->entries_);
+ return *this;
+}
+
+void RankedList::Add(const ValueType& value, int count) {
+ // Determine where to insert the value given its count.
+ EntryList::iterator iter = std::upper_bound(entries_.begin(), entries_.end(),
+ Entry{ValueType(), count});
+
+ // If the list is full, do not add any entry with |count| if does not exceed
+ // the lowest count of the entries in the list.
+ if (size() == max_size_ && iter == end())
+ return;
+
+ entries_.insert(iter, Entry({value, count}));
+
+ // Limit the list size if it exceeds the maximum allowed size.
+ if (entries_.size() > max_size_)
+ entries_.resize(max_size_);
+}
+
+} // namespace leak_detector
+} // namespace metrics
diff --git a/components/metrics/leak_detector/ranked_list.h b/components/metrics/leak_detector/ranked_list.h
new file mode 100644
index 0000000..5cb87ba
--- /dev/null
+++ b/components/metrics/leak_detector/ranked_list.h
@@ -0,0 +1,81 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_LEAK_DETECTOR_RANKED_LIST_H_
+#define COMPONENTS_METRICS_LEAK_DETECTOR_RANKED_LIST_H_
+
+#include <stddef.h>
+
+#include <list>
+
+#include "base/macros.h"
+#include "base/move.h"
+#include "components/metrics/leak_detector/custom_allocator.h"
+#include "components/metrics/leak_detector/leak_detector_value_type.h"
+#include "components/metrics/leak_detector/stl_allocator.h"
+
+namespace metrics {
+namespace leak_detector {
+
+// RankedList lets you add entries consisting of a value-count pair, and
+// automatically sorts them internally by count in descending order. This allows
+// for the user of this list to put value-count pairs into this list without
+// having to explicitly sort them by count.
+class RankedList {
+ MOVE_ONLY_TYPE_FOR_CPP_03(RankedList, RValue);
+
+ public:
+ using ValueType = LeakDetectorValueType;
+
+ // A single entry in the RankedList. The RankedList sorts entries by |count|
+ // in descending order.
+ struct Entry {
+ ValueType value;
+ int count;
+
+ // Create a < comparator for reverse sorting.
+ bool operator<(const Entry& entry) const { return count > entry.count; }
+ };
+
+ using EntryList = std::list<Entry, STLAllocator<Entry, CustomAllocator>>;
+ using const_iterator = EntryList::const_iterator;
+
+ explicit RankedList(size_t max_size);
+ ~RankedList();
+
+ // For move semantics.
+ RankedList(RValue other);
+ RankedList& operator=(RValue other);
+
+ // Accessors for begin() and end() const iterators.
+ const_iterator begin() const { return entries_.begin(); }
+ const_iterator end() const { return entries_.end(); }
+
+ size_t size() const { return entries_.size(); }
+ size_t max_size() const { return max_size_; }
+
+ // Add a new value-count pair to the list. Does not check for existing entries
+ // with the same value. Is an O(n) operation due to ordering.
+ void Add(const ValueType& value, int count);
+
+ private:
+ // Max and min counts. Returns 0 if the list is empty.
+ int max_count() const {
+ return entries_.empty() ? 0 : entries_.begin()->count;
+ }
+ int min_count() const {
+ return entries_.empty() ? 0 : entries_.rbegin()->count;
+ }
+
+ // Max number of items that can be stored in the list.
+ size_t max_size_;
+
+ // Points to the array of entries.
+ std::list<Entry, STLAllocator<Entry, CustomAllocator>> entries_;
+};
+
+} // namespace leak_detector
+} // namespace metrics
+
+#endif // COMPONENTS_METRICS_LEAK_DETECTOR_RANKED_LIST_H_
diff --git a/components/metrics/leak_detector/ranked_list_unittest.cc b/components/metrics/leak_detector/ranked_list_unittest.cc
new file mode 100644
index 0000000..9ebbefb
--- /dev/null
+++ b/components/metrics/leak_detector/ranked_list_unittest.cc
@@ -0,0 +1,266 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "components/metrics/leak_detector/ranked_list.h"
+
+#include "base/macros.h"
+#include "components/metrics/leak_detector/custom_allocator.h"
+#include "components/metrics/leak_detector/leak_detector_value_type.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace metrics {
+namespace leak_detector {
+
+namespace {
+
+// Makes it easier to instantiate LeakDetectorValueTypes.
+LeakDetectorValueType Value(uint32_t value) {
+ return LeakDetectorValueType(value);
+}
+
+} // namespace
+
+class RankedListTest : public ::testing::Test {
+ public:
+ RankedListTest() {}
+
+ void SetUp() override { CustomAllocator::Initialize(); }
+ void TearDown() override { EXPECT_TRUE(CustomAllocator::Shutdown()); }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(RankedListTest);
+};
+
+TEST_F(RankedListTest, Iterators) {
+ RankedList list(10);
+ EXPECT_TRUE(list.begin() == list.end());
+
+ list.Add(Value(0x1234), 100);
+ EXPECT_FALSE(list.begin() == list.end());
+}
+
+TEST_F(RankedListTest, SingleInsertion) {
+ RankedList list(10);
+ EXPECT_EQ(0U, list.size());
+
+ list.Add(Value(0x1234), 100);
+ EXPECT_EQ(1U, list.size());
+
+ auto iter = list.begin();
+ EXPECT_EQ(0x1234U, iter->value.size());
+ EXPECT_EQ(100, iter->count);
+}
+
+TEST_F(RankedListTest, InOrderInsertion) {
+ RankedList list(10);
+ EXPECT_EQ(0U, list.size());
+
+ list.Add(Value(0x1234), 100);
+ EXPECT_EQ(1U, list.size());
+ list.Add(Value(0x2345), 95);
+ EXPECT_EQ(2U, list.size());
+ list.Add(Value(0x3456), 90);
+ EXPECT_EQ(3U, list.size());
+ list.Add(Value(0x4567), 85);
+ EXPECT_EQ(4U, list.size());
+ list.Add(Value(0x5678), 80);
+ EXPECT_EQ(5U, list.size());
+
+ // Iterate through the contents to make sure they match what went in.
+ const RankedList::Entry kExpectedValues[] = {
+ {Value(0x1234), 100}, {Value(0x2345), 95}, {Value(0x3456), 90},
+ {Value(0x4567), 85}, {Value(0x5678), 80},
+ };
+
+ size_t index = 0;
+ for (const auto& entry : list) {
+ EXPECT_LT(index, arraysize(kExpectedValues));
+ EXPECT_EQ(kExpectedValues[index].value.size(), entry.value.size());
+ EXPECT_EQ(kExpectedValues[index].count, entry.count);
+ ++index;
+ }
+}
+
+TEST_F(RankedListTest, ReverseOrderInsertion) {
+ RankedList list(10);
+ EXPECT_EQ(0U, list.size());
+
+ list.Add(Value(0x1234), 0);
+ EXPECT_EQ(1U, list.size());
+ list.Add(Value(0x2345), 5);
+ EXPECT_EQ(2U, list.size());
+ list.Add(Value(0x3456), 10);
+ EXPECT_EQ(3U, list.size());
+ list.Add(Value(0x4567), 15);
+ EXPECT_EQ(4U, list.size());
+ list.Add(Value(0x5678), 20);
+ EXPECT_EQ(5U, list.size());
+
+ // Iterate through the contents to make sure they match what went in.
+ const RankedList::Entry kExpectedValues[] = {
+ {Value(0x5678), 20}, {Value(0x4567), 15}, {Value(0x3456), 10},
+ {Value(0x2345), 5}, {Value(0x1234), 0},
+ };
+
+ size_t index = 0;
+ for (const auto& entry : list) {
+ EXPECT_LT(index, arraysize(kExpectedValues));
+ EXPECT_EQ(kExpectedValues[index].value.size(), entry.value.size());
+ EXPECT_EQ(kExpectedValues[index].count, entry.count);
+ ++index;
+ }
+}
+
+TEST_F(RankedListTest, UnorderedInsertion) {
+ RankedList list(10);
+ EXPECT_EQ(0U, list.size());
+
+ list.Add(Value(0x1234), 15);
+ list.Add(Value(0x2345), 20);
+ list.Add(Value(0x3456), 10);
+ list.Add(Value(0x4567), 30);
+ list.Add(Value(0x5678), 25);
+ EXPECT_EQ(5U, list.size());
+
+ // Iterate through the contents to make sure they match what went in.
+ const RankedList::Entry kExpectedValues1[] = {
+ {Value(0x4567), 30}, {Value(0x5678), 25}, {Value(0x2345), 20},
+ {Value(0x1234), 15}, {Value(0x3456), 10},
+ };
+
+ size_t index = 0;
+ for (const auto& entry : list) {
+ EXPECT_LT(index, arraysize(kExpectedValues1));
+ EXPECT_EQ(kExpectedValues1[index].value.size(), entry.value.size());
+ EXPECT_EQ(kExpectedValues1[index].count, entry.count);
+ ++index;
+ }
+
+ // Add more items.
+ list.Add(Value(0x6789), 35);
+ list.Add(Value(0x789a), 40);
+ list.Add(Value(0x89ab), 50);
+ list.Add(Value(0x9abc), 5);
+ list.Add(Value(0xabcd), 0);
+ EXPECT_EQ(10U, list.size());
+
+ // Iterate through the contents to make sure they match what went in.
+ const RankedList::Entry kExpectedValues2[] = {
+ {Value(0x89ab), 50}, {Value(0x789a), 40}, {Value(0x6789), 35},
+ {Value(0x4567), 30}, {Value(0x5678), 25}, {Value(0x2345), 20},
+ {Value(0x1234), 15}, {Value(0x3456), 10}, {Value(0x9abc), 5},
+ {Value(0xabcd), 0},
+ };
+
+ index = 0;
+ for (const auto& entry : list) {
+ EXPECT_LT(index, arraysize(kExpectedValues2));
+ EXPECT_EQ(kExpectedValues2[index].value.size(), entry.value.size());
+ EXPECT_EQ(kExpectedValues2[index].count, entry.count);
+ ++index;
+ }
+}
+
+TEST_F(RankedListTest, InsertionWithOverflow) {
+ RankedList list(5);
+ EXPECT_EQ(0U, list.size());
+
+ list.Add(Value(0x1234), 15);
+ list.Add(Value(0x2345), 20);
+ list.Add(Value(0x3456), 10);
+ list.Add(Value(0x4567), 30);
+ list.Add(Value(0x5678), 25);
+ EXPECT_EQ(5U, list.size());
+
+ // These values will not make it into the list, which is now full.
+ list.Add(Value(0x6789), 0);
+ EXPECT_EQ(5U, list.size());
+ list.Add(Value(0x789a), 5);
+ EXPECT_EQ(5U, list.size());
+
+ // Iterate through the contents to make sure they match what went in.
+ const RankedList::Entry kExpectedValues1[] = {
+ {Value(0x4567), 30}, {Value(0x5678), 25}, {Value(0x2345), 20},
+ {Value(0x1234), 15}, {Value(0x3456), 10},
+ };
+
+ size_t index = 0;
+ for (const auto& entry : list) {
+ EXPECT_LT(index, arraysize(kExpectedValues1));
+ EXPECT_EQ(kExpectedValues1[index].value.size(), entry.value.size());
+ EXPECT_EQ(kExpectedValues1[index].count, entry.count);
+ ++index;
+ }
+
+ // Insert some more values that go in the middle of the list.
+ list.Add(Value(0x89ab), 27);
+ EXPECT_EQ(5U, list.size());
+ list.Add(Value(0x9abc), 22);
+ EXPECT_EQ(5U, list.size());
+
+ // Iterate through the contents to make sure they match what went in.
+ const RankedList::Entry kExpectedValues2[] = {
+ {Value(0x4567), 30}, {Value(0x89ab), 27}, {Value(0x5678), 25},
+ {Value(0x9abc), 22}, {Value(0x2345), 20},
+ };
+
+ index = 0;
+ for (const auto& entry : list) {
+ EXPECT_LT(index, arraysize(kExpectedValues2));
+ EXPECT_EQ(kExpectedValues2[index].value.size(), entry.value.size());
+ EXPECT_EQ(kExpectedValues2[index].count, entry.count);
+ ++index;
+ }
+
+ // Insert some more values at the front of the list.
+ list.Add(Value(0xabcd), 40);
+ EXPECT_EQ(5U, list.size());
+ list.Add(Value(0xbcde), 35);
+ EXPECT_EQ(5U, list.size());
+
+ // Iterate through the contents to make sure they match what went in.
+ const RankedList::Entry kExpectedValues3[] = {
+ {Value(0xabcd), 40}, {Value(0xbcde), 35}, {Value(0x4567), 30},
+ {Value(0x89ab), 27}, {Value(0x5678), 25},
+ };
+
+ index = 0;
+ for (const auto& entry : list) {
+ EXPECT_LT(index, arraysize(kExpectedValues3));
+ EXPECT_EQ(kExpectedValues3[index].value.size(), entry.value.size());
+ EXPECT_EQ(kExpectedValues3[index].count, entry.count);
+ ++index;
+ }
+}
+
+TEST_F(RankedListTest, MoveOperation) {
+ const RankedList::Entry kExpectedValues[] = {
+ {Value(0x89ab), 50}, {Value(0x789a), 40}, {Value(0x6789), 35},
+ {Value(0x4567), 30}, {Value(0x5678), 25}, {Value(0x2345), 20},
+ {Value(0x1234), 15}, {Value(0x3456), 10}, {Value(0x9abc), 5},
+ {Value(0xabcd), 0},
+ };
+
+ RankedList source_list(10);
+ for (const RankedList::Entry& entry : kExpectedValues) {
+ source_list.Add(entry.value, entry.count);
+ }
+ EXPECT_EQ(10U, source_list.size());
+
+ RankedList dest_list(25); // This should be changed by the move.
+ dest_list = source_list.Pass();
+ EXPECT_EQ(10U, dest_list.size());
+ EXPECT_EQ(10U, dest_list.max_size());
+
+ size_t index = 0;
+ for (const auto& entry : dest_list) {
+ EXPECT_LT(index, arraysize(kExpectedValues));
+ EXPECT_EQ(kExpectedValues[index].value.size(), entry.value.size());
+ EXPECT_EQ(kExpectedValues[index].count, entry.count);
+ ++index;
+ }
+}
+
+} // namespace leak_detector
+} // namespace metrics
diff --git a/components/metrics/leak_detector/stl_allocator.h b/components/metrics/leak_detector/stl_allocator.h
new file mode 100644
index 0000000..bfef0a3
--- /dev/null
+++ b/components/metrics/leak_detector/stl_allocator.h
@@ -0,0 +1,61 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COMPONENTS_METRICS_LEAK_DETECTOR_STL_ALLOCATOR_H_
+#define COMPONENTS_METRICS_LEAK_DETECTOR_STL_ALLOCATOR_H_
+
+#include <stddef.h>
+
+#include <limits>
+#include <memory>
+
+#include "base/logging.h"
+
+// Generic allocator class for STL objects.
+// deallocate() to use the template class Alloc's allocation.
+// that uses a given type-less allocator Alloc, which must provide:
+// static void* Alloc::Allocate(size_t size);
+// static void Alloc::Free(void* ptr, size_t size);
+//
+// Inherits from the default allocator, std::allocator. Overrides allocate() and
+// deallocate() and some other functions.
+//
+// STLAllocator<T, MyAlloc> provides the same thread-safety guarantees as
+// MyAlloc.
+//
+// Usage example:
+// set<T, less<T>, STLAllocator<T, MyAlloc> > my_set;
+
+template <typename T, class Alloc>
+class STLAllocator : public std::allocator<T> {
+ public:
+ typedef size_t size_type;
+ typedef T* pointer;
+
+ template <class T1>
+ struct rebind {
+ typedef STLAllocator<T1, Alloc> other;
+ };
+
+ STLAllocator() {}
+ explicit STLAllocator(const STLAllocator&) {}
+ template <class T1>
+ STLAllocator(const STLAllocator<T1, Alloc>&) {}
+ ~STLAllocator() {}
+
+ pointer allocate(size_type n, const void* = 0) {
+ // Make sure the computation of the total allocation size does not cause an
+ // integer overflow.
+ RAW_CHECK(n < max_size());
+ return static_cast<T*>(Alloc::Allocate(n * sizeof(T)));
+ }
+
+ void deallocate(pointer p, size_type n) { Alloc::Free(p, n * sizeof(T)); }
+
+ size_type max_size() const {
+ return std::numeric_limits<size_t>::max() / sizeof(T);
+ }
+};
+
+#endif // COMPONENTS_METRICS_LEAK_DETECTOR_STL_ALLOCATOR_H_