summaryrefslogtreecommitdiffstats
path: root/base
diff options
context:
space:
mode:
authorbcwhite <bcwhite@chromium.org>2016-03-17 06:21:56 -0700
committerCommit bot <commit-bot@chromium.org>2016-03-17 13:23:06 +0000
commit3dd85c4f5f230f7c1fa1055cb035c72196a46237 (patch)
tree14a45d86d1ed84dc906f47939467d81401365876 /base
parentf2845f64f32b990039c7cd6011df7be7bce10f6f (diff)
downloadchromium_src-3dd85c4f5f230f7c1fa1055cb035c72196a46237.zip
chromium_src-3dd85c4f5f230f7c1fa1055cb035c72196a46237.tar.gz
chromium_src-3dd85c4f5f230f7c1fa1055cb035c72196a46237.tar.bz2
Add support for persistent sparse histograms.
Sparse histograms don't have a single "values vector" that can be created during construction. Rather, they create a list of value "records" that can be easily added to as needed. BUG=546019 TBR=mark mark: gn and gyp (new files) Review URL: https://codereview.chromium.org/1734033003 Cr-Commit-Position: refs/heads/master@{#381699}
Diffstat (limited to 'base')
-rw-r--r--base/BUILD.gn3
-rw-r--r--base/base.gyp1
-rw-r--r--base/base.gypi2
-rw-r--r--base/metrics/histogram.cc11
-rw-r--r--base/metrics/histogram_unittest.cc15
-rw-r--r--base/metrics/persistent_histogram_allocator.cc100
-rw-r--r--base/metrics/persistent_sample_map.cc267
-rw-r--r--base/metrics/persistent_sample_map.h78
-rw-r--r--base/metrics/persistent_sample_map_unittest.cc243
-rw-r--r--base/metrics/sample_map.cc103
-rw-r--r--base/metrics/sample_map.h31
-rw-r--r--base/metrics/sparse_histogram.cc102
-rw-r--r--base/metrics/sparse_histogram.h17
-rw-r--r--base/metrics/sparse_histogram_unittest.cc133
14 files changed, 964 insertions, 142 deletions
diff --git a/base/BUILD.gn b/base/BUILD.gn
index e1b1d9d..1cbc063 100644
--- a/base/BUILD.gn
+++ b/base/BUILD.gn
@@ -536,6 +536,8 @@ component("base") {
"metrics/persistent_histogram_allocator.h",
"metrics/persistent_memory_allocator.cc",
"metrics/persistent_memory_allocator.h",
+ "metrics/persistent_sample_map.cc",
+ "metrics/persistent_sample_map.h",
"metrics/sample_map.cc",
"metrics/sample_map.h",
"metrics/sample_vector.cc",
@@ -1756,6 +1758,7 @@ test("base_unittests") {
"metrics/metrics_hashes_unittest.cc",
"metrics/persistent_histogram_allocator_unittest.cc",
"metrics/persistent_memory_allocator_unittest.cc",
+ "metrics/persistent_sample_map_unittest.cc",
"metrics/sample_map_unittest.cc",
"metrics/sample_vector_unittest.cc",
"metrics/sparse_histogram_unittest.cc",
diff --git a/base/base.gyp b/base/base.gyp
index 88e27f1..d7f3519 100644
--- a/base/base.gyp
+++ b/base/base.gyp
@@ -492,6 +492,7 @@
'metrics/metrics_hashes_unittest.cc',
'metrics/persistent_histogram_allocator_unittest.cc',
'metrics/persistent_memory_allocator_unittest.cc',
+ 'metrics/persistent_sample_map_unittest.cc',
'metrics/sample_map_unittest.cc',
'metrics/sample_vector_unittest.cc',
'metrics/sparse_histogram_unittest.cc',
diff --git a/base/base.gypi b/base/base.gypi
index 7ef9266..5d7693f 100644
--- a/base/base.gypi
+++ b/base/base.gypi
@@ -416,6 +416,8 @@
'metrics/persistent_histogram_allocator.h',
'metrics/persistent_memory_allocator.cc',
'metrics/persistent_memory_allocator.h',
+ 'metrics/persistent_sample_map.cc',
+ 'metrics/persistent_sample_map.h',
'metrics/sample_map.cc',
'metrics/sample_map.h',
'metrics/sample_vector.cc',
diff --git a/base/metrics/histogram.cc b/base/metrics/histogram.cc
index cd6d6be..35197e2 100644
--- a/base/metrics/histogram.cc
+++ b/base/metrics/histogram.cc
@@ -127,10 +127,9 @@ class Histogram::Factory {
}
// Perform any required datafill on the just-created histogram. If
- // overridden, be sure to call the "super" version.
- virtual void FillHistogram(HistogramBase* histogram) {
- histogram->SetFlags(flags_);
- }
+ // overridden, be sure to call the "super" version -- this method may not
+ // always remain empty.
+ virtual void FillHistogram(HistogramBase* histogram) {}
// These values are protected (instead of private) because they need to
// be accessible to methods of sub-classes in order to avoid passing
@@ -172,7 +171,7 @@ HistogramBase* Histogram::Factory::Build() {
}
// Try to create the histogram using a "persistent" allocator. As of
- // 2015-01-14, the availability of such is controlled by a base::Feature
+ // 2016-02-25, the availability of such is controlled by a base::Feature
// that is off by default. If the allocator doesn't exist or if
// allocating from it fails, code below will allocate the histogram from
// the process heap.
@@ -181,7 +180,6 @@ HistogramBase* Histogram::Factory::Build() {
PersistentHistogramAllocator* allocator =
PersistentHistogramAllocator::GetGlobalAllocator();
if (allocator) {
- flags_ |= HistogramBase::kIsPersistent;
tentative_histogram = allocator->AllocateHistogram(
histogram_type_,
name_,
@@ -199,6 +197,7 @@ HistogramBase* Histogram::Factory::Build() {
DCHECK(!allocator); // Shouldn't have failed.
flags_ &= ~HistogramBase::kIsPersistent;
tentative_histogram = HeapAlloc(registered_ranges);
+ tentative_histogram->SetFlags(flags_);
}
FillHistogram(tentative_histogram.get());
diff --git a/base/metrics/histogram_unittest.cc b/base/metrics/histogram_unittest.cc
index f621ec5..03dc7bd 100644
--- a/base/metrics/histogram_unittest.cc
+++ b/base/metrics/histogram_unittest.cc
@@ -37,11 +37,12 @@ class HistogramTest : public testing::TestWithParam<bool> {
HistogramTest() : use_persistent_histogram_allocator_(GetParam()) {}
void SetUp() override {
+ if (use_persistent_histogram_allocator_)
+ CreatePersistentHistogramAllocator();
+
// Each test will have a clean state (no Histogram / BucketRanges
// registered).
InitializeStatisticsRecorder();
- if (use_persistent_histogram_allocator_)
- CreatePersistentHistogramAllocator();
}
void TearDown() override {
@@ -69,14 +70,8 @@ class HistogramTest : public testing::TestWithParam<bool> {
// any persistent memory segment (which simplifies some tests).
PersistentHistogramAllocator::GetCreateHistogramResultHistogram();
- if (!allocator_memory_)
- allocator_memory_.reset(new char[kAllocatorMemorySize]);
-
- PersistentHistogramAllocator::ReleaseGlobalAllocatorForTesting();
- memset(allocator_memory_.get(), 0, kAllocatorMemorySize);
- PersistentHistogramAllocator::CreateGlobalAllocatorOnPersistentMemory(
- allocator_memory_.get(), kAllocatorMemorySize, 0, 0,
- "HistogramAllocatorTest");
+ PersistentHistogramAllocator::CreateGlobalAllocatorOnLocalMemory(
+ kAllocatorMemorySize, 0, "HistogramAllocatorTest");
allocator_ =
PersistentHistogramAllocator::GetGlobalAllocator()->memory_allocator();
}
diff --git a/base/metrics/persistent_histogram_allocator.cc b/base/metrics/persistent_histogram_allocator.cc
index 2e4029e..6006d31 100644
--- a/base/metrics/persistent_histogram_allocator.cc
+++ b/base/metrics/persistent_histogram_allocator.cc
@@ -10,6 +10,7 @@
#include "base/metrics/histogram.h"
#include "base/metrics/histogram_base.h"
#include "base/metrics/histogram_samples.h"
+#include "base/metrics/sparse_histogram.h"
#include "base/metrics/statistics_recorder.h"
#include "base/synchronization/lock.h"
@@ -71,12 +72,12 @@ scoped_ptr<BucketRanges> CreateRangesFromData(
size_t CalculateRequiredCountsBytes(size_t bucket_count) {
// 2 because each "sample count" also requires a backup "logged count"
// used for calculating the delta during snapshot operations.
- const unsigned kBytesPerBucket = 2 * sizeof(HistogramBase::AtomicCount);
+ const size_t kBytesPerBucket = 2 * sizeof(HistogramBase::AtomicCount);
// If the |bucket_count| is such that it would overflow the return type,
// perhaps as the result of a malicious actor, then return zero to
// indicate the problem to the caller.
- if (bucket_count > std::numeric_limits<uint32_t>::max() / kBytesPerBucket)
+ if (bucket_count > std::numeric_limits<size_t>::max() / kBytesPerBucket)
return 0;
return bucket_count * kBytesPerBucket;
@@ -276,6 +277,18 @@ scoped_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram(
return nullptr;
}
+ // Sparse histograms are quite different so handle them as a special case.
+ if (histogram_data_ptr->histogram_type == SPARSE_HISTOGRAM) {
+ scoped_ptr<HistogramBase> histogram = SparseHistogram::PersistentCreate(
+ memory_allocator(), histogram_data_ptr->name,
+ &histogram_data_ptr->samples_metadata,
+ &histogram_data_ptr->logged_metadata);
+ DCHECK(histogram);
+ histogram->SetFlags(histogram_data_ptr->flags);
+ RecordCreateHistogramResult(CREATE_HISTOGRAM_SUCCESS);
+ return histogram;
+ }
+
// Copy the histogram_data to local storage because anything in persistent
// memory cannot be trusted as it could be changed at any moment by a
// malicious actor that shares access. The contents of histogram_data are
@@ -318,7 +331,7 @@ scoped_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram(
histogram_data.counts_ref, kTypeIdCountsArray);
size_t counts_bytes =
CalculateRequiredCountsBytes(histogram_data.bucket_count);
- if (!counts_data || !counts_bytes ||
+ if (!counts_data || counts_bytes == 0 ||
memory_allocator_->GetAllocSize(histogram_data.counts_ref) <
counts_bytes) {
RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_COUNTS_ARRAY);
@@ -445,49 +458,64 @@ scoped_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram(
return nullptr;
}
- // If CalculateRequiredCountsBytes() returns zero then the bucket_count
- // was not valid.
- size_t bucket_count = bucket_ranges->bucket_count();
- size_t counts_bytes = CalculateRequiredCountsBytes(bucket_count);
- if (!counts_bytes) {
- NOTREACHED();
- return nullptr;
- }
-
- size_t ranges_bytes = (bucket_count + 1) * sizeof(HistogramBase::Sample);
- PersistentMemoryAllocator::Reference ranges_ref =
- memory_allocator_->Allocate(ranges_bytes, kTypeIdRangesArray);
- PersistentMemoryAllocator::Reference counts_ref =
- memory_allocator_->Allocate(counts_bytes, kTypeIdCountsArray);
+ // Create the metadata necessary for a persistent sparse histogram. This
+ // is done first because it is a small subset of what is required for
+ // other histograms.
PersistentMemoryAllocator::Reference histogram_ref =
memory_allocator_->Allocate(
offsetof(PersistentHistogramData, name) + name.length() + 1,
kTypeIdHistogram);
- HistogramBase::Sample* ranges_data =
- memory_allocator_->GetAsObject<HistogramBase::Sample>(ranges_ref,
- kTypeIdRangesArray);
PersistentHistogramData* histogram_data =
memory_allocator_->GetAsObject<PersistentHistogramData>(histogram_ref,
kTypeIdHistogram);
+ if (histogram_data) {
+ memcpy(histogram_data->name, name.c_str(), name.size() + 1);
+ histogram_data->histogram_type = histogram_type;
+ histogram_data->flags = flags | HistogramBase::kIsPersistent;
+ }
- // Only continue here if all allocations were successful. If they weren't,
- // there is no way to free the space but that's not really a problem since
- // the allocations only fail because the space is full or corrupt and so
- // any future attempts will also fail.
- if (counts_ref && ranges_data && histogram_data) {
- strcpy(histogram_data->name, name.c_str());
- for (size_t i = 0; i < bucket_ranges->size(); ++i)
- ranges_data[i] = bucket_ranges->range(i);
+ // Create the remaining metadata necessary for regular histograms.
+ if (histogram_type != SPARSE_HISTOGRAM) {
+ size_t bucket_count = bucket_ranges->bucket_count();
+ size_t counts_bytes = CalculateRequiredCountsBytes(bucket_count);
+ if (counts_bytes == 0) {
+ // |bucket_count| was out-of-range.
+ NOTREACHED();
+ return nullptr;
+ }
- histogram_data->histogram_type = histogram_type;
- histogram_data->flags = flags;
- histogram_data->minimum = minimum;
- histogram_data->maximum = maximum;
- histogram_data->bucket_count = static_cast<uint32_t>(bucket_count);
- histogram_data->ranges_ref = ranges_ref;
- histogram_data->ranges_checksum = bucket_ranges->checksum();
- histogram_data->counts_ref = counts_ref;
+ size_t ranges_bytes = (bucket_count + 1) * sizeof(HistogramBase::Sample);
+ PersistentMemoryAllocator::Reference counts_ref =
+ memory_allocator_->Allocate(counts_bytes, kTypeIdCountsArray);
+ PersistentMemoryAllocator::Reference ranges_ref =
+ memory_allocator_->Allocate(ranges_bytes, kTypeIdRangesArray);
+ HistogramBase::Sample* ranges_data =
+ memory_allocator_->GetAsObject<HistogramBase::Sample>(
+ ranges_ref, kTypeIdRangesArray);
+
+ // Only continue here if all allocations were successful. If they weren't,
+ // there is no way to free the space but that's not really a problem since
+ // the allocations only fail because the space is full or corrupt and so
+ // any future attempts will also fail.
+ if (counts_ref && ranges_data && histogram_data) {
+ for (size_t i = 0; i < bucket_ranges->size(); ++i)
+ ranges_data[i] = bucket_ranges->range(i);
+
+ histogram_data->minimum = minimum;
+ histogram_data->maximum = maximum;
+ // |bucket_count| must fit within 32-bits or the allocation of the counts
+ // array would have failed for being too large; the allocator supports
+ // less than 4GB total size.
+ histogram_data->bucket_count = static_cast<uint32_t>(bucket_count);
+ histogram_data->ranges_ref = ranges_ref;
+ histogram_data->ranges_checksum = bucket_ranges->checksum();
+ histogram_data->counts_ref = counts_ref;
+ } else {
+ histogram_data = nullptr; // Clear this for proper handling below.
+ }
+ }
+ if (histogram_data) {
// Create the histogram using resources in persistent memory. This ends up
// resolving the "ref" values stored in histogram_data instad of just
// using what is already known above but avoids duplicating the switch
diff --git a/base/metrics/persistent_sample_map.cc b/base/metrics/persistent_sample_map.cc
new file mode 100644
index 0000000..014a865
--- /dev/null
+++ b/base/metrics/persistent_sample_map.cc
@@ -0,0 +1,267 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/persistent_sample_map.h"
+
+#include "base/logging.h"
+#include "base/stl_util.h"
+
+namespace base {
+
+typedef HistogramBase::Count Count;
+typedef HistogramBase::Sample Sample;
+
+namespace {
+
+// An iterator for going through a PersistentSampleMap. The logic here is
+// identical to that of SampleMapIterator but with different data structures.
+// Changes here likely need to be duplicated there.
+class PersistentSampleMapIterator : public SampleCountIterator {
+ public:
+ typedef std::map<HistogramBase::Sample, HistogramBase::Count*>
+ SampleToCountMap;
+
+ explicit PersistentSampleMapIterator(const SampleToCountMap& sample_counts);
+ ~PersistentSampleMapIterator() override;
+
+ // SampleCountIterator:
+ bool Done() const override;
+ void Next() override;
+ void Get(HistogramBase::Sample* min,
+ HistogramBase::Sample* max,
+ HistogramBase::Count* count) const override;
+
+ private:
+ void SkipEmptyBuckets();
+
+ SampleToCountMap::const_iterator iter_;
+ const SampleToCountMap::const_iterator end_;
+};
+
+PersistentSampleMapIterator::PersistentSampleMapIterator(
+ const SampleToCountMap& sample_counts)
+ : iter_(sample_counts.begin()),
+ end_(sample_counts.end()) {
+ SkipEmptyBuckets();
+}
+
+PersistentSampleMapIterator::~PersistentSampleMapIterator() {}
+
+bool PersistentSampleMapIterator::Done() const {
+ return iter_ == end_;
+}
+
+void PersistentSampleMapIterator::Next() {
+ DCHECK(!Done());
+ ++iter_;
+ SkipEmptyBuckets();
+}
+
+void PersistentSampleMapIterator::Get(Sample* min,
+ Sample* max,
+ Count* count) const {
+ DCHECK(!Done());
+ if (min)
+ *min = iter_->first;
+ if (max)
+ *max = iter_->first + 1;
+ if (count)
+ *count = *iter_->second;
+}
+
+void PersistentSampleMapIterator::SkipEmptyBuckets() {
+ while (!Done() && *iter_->second == 0) {
+ ++iter_;
+ }
+}
+
+// This structure holds an entry for a PersistentSampleMap within a persistent
+// memory allocator. The "id" must be unique across all maps held by an
+// allocator or they will get attached to the wrong sample map.
+struct SampleRecord {
+ uint64_t id; // Unique identifier of owner.
+ Sample value; // The value for which this record holds a count.
+ Count count; // The count associated with the above value.
+};
+
+// The type-id used to identify sample records inside an allocator.
+const uint32_t kTypeIdSampleRecord = 0x8FE6A69F + 1; // SHA1(SampleRecord) v1
+
+} // namespace
+
+PersistentSampleMap::PersistentSampleMap(
+ uint64_t id,
+ PersistentMemoryAllocator* allocator,
+ Metadata* meta)
+ : HistogramSamples(id, meta),
+ allocator_(allocator) {
+ // This is created once but will continue to return new iterables even when
+ // it has previously reached the end.
+ allocator->CreateIterator(&sample_iter_);
+
+ // Load all existing samples during construction. It's no worse to do it
+ // here than at some point in the future and could be better if construction
+ // takes place on some background thread. New samples could be created at
+ // any time by parallel threads; if so, they'll get loaded when needed.
+ ImportSamples(kAllSamples);
+}
+
+PersistentSampleMap::~PersistentSampleMap() {}
+
+void PersistentSampleMap::Accumulate(Sample value, Count count) {
+ *GetOrCreateSampleCountStorage(value) += count;
+ IncreaseSum(static_cast<int64_t>(count) * value);
+ IncreaseRedundantCount(count);
+}
+
+Count PersistentSampleMap::GetCount(Sample value) const {
+ // Have to override "const" to make sure all samples have been loaded before
+ // being able to know what value to return.
+ Count* count_pointer =
+ const_cast<PersistentSampleMap*>(this)->GetSampleCountStorage(value);
+ return count_pointer ? *count_pointer : 0;
+}
+
+Count PersistentSampleMap::TotalCount() const {
+ // Have to override "const" in order to make sure all samples have been
+ // loaded before trying to iterate over the map.
+ const_cast<PersistentSampleMap*>(this)->ImportSamples(kAllSamples);
+
+ Count count = 0;
+ for (const auto& entry : sample_counts_) {
+ count += *entry.second;
+ }
+ return count;
+}
+
+scoped_ptr<SampleCountIterator> PersistentSampleMap::Iterator() const {
+ // Have to override "const" in order to make sure all samples have been
+ // loaded before trying to iterate over the map.
+ const_cast<PersistentSampleMap*>(this)->ImportSamples(kAllSamples);
+ return make_scoped_ptr(new PersistentSampleMapIterator(sample_counts_));
+}
+
+bool PersistentSampleMap::AddSubtractImpl(SampleCountIterator* iter,
+ Operator op) {
+ Sample min;
+ Sample max;
+ Count count;
+ for (; !iter->Done(); iter->Next()) {
+ iter->Get(&min, &max, &count);
+ if (min + 1 != max)
+ return false; // SparseHistogram only supports bucket with size 1.
+
+ *GetOrCreateSampleCountStorage(min) +=
+ (op == HistogramSamples::ADD) ? count : -count;
+ }
+ return true;
+}
+
+Count* PersistentSampleMap::GetSampleCountStorage(Sample value) {
+ DCHECK_LE(0, value);
+
+ // If |value| is already in the map, just return that.
+ auto it = sample_counts_.find(value);
+ if (it != sample_counts_.end())
+ return it->second;
+
+ // Import any new samples from persistent memory looking for the value.
+ return ImportSamples(value);
+}
+
+Count* PersistentSampleMap::GetOrCreateSampleCountStorage(Sample value) {
+ // Get any existing count storage.
+ Count* count_pointer = GetSampleCountStorage(value);
+ if (count_pointer)
+ return count_pointer;
+
+ // Create a new record in persistent memory for the value.
+ PersistentMemoryAllocator::Reference ref =
+ allocator_->Allocate(sizeof(SampleRecord), kTypeIdSampleRecord);
+ SampleRecord* record =
+ allocator_->GetAsObject<SampleRecord>(ref, kTypeIdSampleRecord);
+ if (!record) {
+ // If the allocator was unable to create a record then it is full or
+ // corrupt. Instead, allocate the counter from the heap. This sample will
+ // not be persistent, will not be shared, and will leak but it's better
+ // than crashing.
+ NOTREACHED() << "full=" << allocator_->IsFull()
+ << ", corrupt=" << allocator_->IsCorrupt();
+ count_pointer = new Count(0);
+ sample_counts_[value] = count_pointer;
+ return count_pointer;
+ }
+ record->id = id();
+ record->value = value;
+ record->count = 0; // Should already be zero but don't trust other processes.
+ allocator_->MakeIterable(ref);
+
+ // A race condition between two independent processes (i.e. two independent
+ // histogram objects sharing the same sample data) could cause two of the
+ // above records to be created. The allocator, however, forces a strict
+ // ordering on iterable objects so use the import method to actually add the
+ // just-created record. This ensures that all PersistentSampleMap objects
+ // will always use the same record, whichever was first made iterable.
+ // Thread-safety within a process where multiple threads use the same
+ // histogram object is delegated to the controlling histogram object which,
+ // for sparse histograms, is a lock object.
+ count_pointer = ImportSamples(value);
+ DCHECK(count_pointer);
+ return count_pointer;
+}
+
+Count* PersistentSampleMap::ImportSamples(Sample until_value) {
+ // TODO(bcwhite): This import operates in O(V+N) total time per sparse
+ // histogram where V is the number of values for this object and N is
+ // the number of other iterable objects in the allocator. This becomes
+ // O(S*(SV+N)) or O(S^2*V + SN) overall where S is the number of sparse
+ // histograms.
+ //
+ // This is actually okay when histograms are expected to exist for the
+ // lifetime of the program, spreading the cost out, and S and V are
+ // relatively small, as is the current case.
+ //
+ // However, it is not so good for objects that are created, detroyed, and
+ // recreated on a periodic basis, such as when making a snapshot of
+ // sparse histograms owned by another, ongoing process. In that case, the
+ // entire cost is compressed into a single sequential operation... on the
+ // UI thread no less.
+ //
+ // This will be addressed in a future CL.
+
+ uint32_t type_id;
+ PersistentMemoryAllocator::Reference ref;
+ while ((ref = allocator_->GetNextIterable(&sample_iter_, &type_id)) != 0) {
+ if (type_id == kTypeIdSampleRecord) {
+ SampleRecord* record =
+ allocator_->GetAsObject<SampleRecord>(ref, kTypeIdSampleRecord);
+ if (!record)
+ continue;
+
+ // A sample record has been found but may not be for this histogram.
+ if (record->id != id())
+ continue;
+
+ // Check if the record's value is already known.
+ if (!ContainsKey(sample_counts_, record->value)) {
+ // No: Add it to map of known values if the value is valid.
+ if (record->value >= 0)
+ sample_counts_[record->value] = &record->count;
+ } else {
+ // Yes: Ignore it; it's a duplicate caused by a race condition -- see
+ // code & comment in GetOrCreateSampleCountStorage() for details.
+ // Check that nothing ever operated on the duplicate record.
+ DCHECK_EQ(0, record->count);
+ }
+
+ // Stop if it's the value being searched for.
+ if (record->value == until_value)
+ return &record->count;
+ }
+ }
+
+ return nullptr;
+}
+
+} // namespace base
diff --git a/base/metrics/persistent_sample_map.h b/base/metrics/persistent_sample_map.h
new file mode 100644
index 0000000..a23b751
--- /dev/null
+++ b/base/metrics/persistent_sample_map.h
@@ -0,0 +1,78 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// PersistentSampleMap implements HistogramSamples interface. It is used
+// by the SparseHistogram class to store samples in persistent memory which
+// allows it to be shared between processes or live across restarts.
+
+#ifndef BASE_METRICS_PERSISTENT_SAMPLE_MAP_H_
+#define BASE_METRICS_PERSISTENT_SAMPLE_MAP_H_
+
+#include <stdint.h>
+
+#include <map>
+
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/metrics/histogram_base.h"
+#include "base/metrics/histogram_samples.h"
+#include "base/metrics/persistent_memory_allocator.h"
+
+namespace base {
+
+// The logic here is similar to that of SampleMap but with different data
+// structures. Changes here likely need to be duplicated there.
+class BASE_EXPORT PersistentSampleMap : public HistogramSamples {
+ public:
+ PersistentSampleMap(uint64_t id,
+ PersistentMemoryAllocator* allocator,
+ Metadata* meta);
+ ~PersistentSampleMap() override;
+
+ // HistogramSamples:
+ void Accumulate(HistogramBase::Sample value,
+ HistogramBase::Count count) override;
+ HistogramBase::Count GetCount(HistogramBase::Sample value) const override;
+ HistogramBase::Count TotalCount() const override;
+ scoped_ptr<SampleCountIterator> Iterator() const override;
+
+ protected:
+ // Performs arithemetic. |op| is ADD or SUBTRACT.
+ bool AddSubtractImpl(SampleCountIterator* iter, Operator op) override;
+
+ // Gets a pointer to a "count" corresponding to a given |value|. Returns NULL
+ // if sample does not exist.
+ HistogramBase::Count* GetSampleCountStorage(HistogramBase::Sample value);
+
+ // Gets a pointer to a "count" corresponding to a given |value|, creating
+ // the sample (initialized to zero) if it does not already exists.
+ HistogramBase::Count* GetOrCreateSampleCountStorage(
+ HistogramBase::Sample value);
+
+ private:
+ enum : HistogramBase::Sample { kAllSamples = -1 };
+
+ // Imports samples from persistent memory by iterating over all sample
+ // records found therein, adding them to the sample_counts_ map. If a
+ // count for the sample |until_value| is found, stop the import and return
+ // a pointer to that counter. If that value is not found, null will be
+ // returned after all currently available samples have been loaded. Pass
+ // kAllSamples to force the importing of all available samples.
+ HistogramBase::Count* ImportSamples(HistogramBase::Sample until_value);
+
+ // All created/loaded sample values and their associated counts. The storage
+ // for the actual Count numbers is owned by the |allocator_|.
+ std::map<HistogramBase::Sample, HistogramBase::Count*> sample_counts_;
+
+ // The persistent memory allocator holding samples and an iterator through it.
+ PersistentMemoryAllocator* allocator_;
+ PersistentMemoryAllocator::Iterator sample_iter_;
+
+ DISALLOW_COPY_AND_ASSIGN(PersistentSampleMap);
+};
+
+} // namespace base
+
+#endif // BASE_METRICS_PERSISTENT_SAMPLE_MAP_H_
diff --git a/base/metrics/persistent_sample_map_unittest.cc b/base/metrics/persistent_sample_map_unittest.cc
new file mode 100644
index 0000000..c735f8f
--- /dev/null
+++ b/base/metrics/persistent_sample_map_unittest.cc
@@ -0,0 +1,243 @@
+// Copyright (c) 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/persistent_sample_map.h"
+
+#include "base/memory/scoped_ptr.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace {
+
+TEST(PersistentSampleMapTest, AccumulateTest) {
+ LocalPersistentMemoryAllocator allocator(64 << 10, 0, ""); // 64 KiB
+
+ HistogramSamples::Metadata* meta =
+ allocator.GetAsObject<HistogramSamples::Metadata>(
+ allocator.Allocate(sizeof(HistogramSamples::Metadata), 0), 0);
+ PersistentSampleMap samples(1, &allocator, meta);
+
+ samples.Accumulate(1, 100);
+ samples.Accumulate(2, 200);
+ samples.Accumulate(1, -200);
+ EXPECT_EQ(-100, samples.GetCount(1));
+ EXPECT_EQ(200, samples.GetCount(2));
+
+ EXPECT_EQ(300, samples.sum());
+ EXPECT_EQ(100, samples.TotalCount());
+ EXPECT_EQ(samples.redundant_count(), samples.TotalCount());
+}
+
+TEST(PersistentSampleMapTest, Accumulate_LargeValuesDontOverflow) {
+ LocalPersistentMemoryAllocator allocator(64 << 10, 0, ""); // 64 KiB
+
+ HistogramSamples::Metadata* meta =
+ allocator.GetAsObject<HistogramSamples::Metadata>(
+ allocator.Allocate(sizeof(HistogramSamples::Metadata), 0), 0);
+ PersistentSampleMap samples(1, &allocator, meta);
+
+ samples.Accumulate(250000000, 100);
+ samples.Accumulate(500000000, 200);
+ samples.Accumulate(250000000, -200);
+ EXPECT_EQ(-100, samples.GetCount(250000000));
+ EXPECT_EQ(200, samples.GetCount(500000000));
+
+ EXPECT_EQ(75000000000LL, samples.sum());
+ EXPECT_EQ(100, samples.TotalCount());
+ EXPECT_EQ(samples.redundant_count(), samples.TotalCount());
+}
+
+TEST(PersistentSampleMapTest, AddSubtractTest) {
+ LocalPersistentMemoryAllocator allocator(64 << 10, 0, ""); // 64 KiB
+
+ HistogramSamples::Metadata* meta1 =
+ allocator.GetAsObject<HistogramSamples::Metadata>(
+ allocator.Allocate(sizeof(HistogramSamples::Metadata), 0), 0);
+ HistogramSamples::Metadata* meta2 =
+ allocator.GetAsObject<HistogramSamples::Metadata>(
+ allocator.Allocate(sizeof(HistogramSamples::Metadata), 0), 0);
+ PersistentSampleMap samples1(1, &allocator, meta1);
+ PersistentSampleMap samples2(2, &allocator, meta2);
+
+ samples1.Accumulate(1, 100);
+ samples1.Accumulate(2, 100);
+ samples1.Accumulate(3, 100);
+
+ samples2.Accumulate(1, 200);
+ samples2.Accumulate(2, 200);
+ samples2.Accumulate(4, 200);
+
+ samples1.Add(samples2);
+ EXPECT_EQ(300, samples1.GetCount(1));
+ EXPECT_EQ(300, samples1.GetCount(2));
+ EXPECT_EQ(100, samples1.GetCount(3));
+ EXPECT_EQ(200, samples1.GetCount(4));
+ EXPECT_EQ(2000, samples1.sum());
+ EXPECT_EQ(900, samples1.TotalCount());
+ EXPECT_EQ(samples1.redundant_count(), samples1.TotalCount());
+
+ samples1.Subtract(samples2);
+ EXPECT_EQ(100, samples1.GetCount(1));
+ EXPECT_EQ(100, samples1.GetCount(2));
+ EXPECT_EQ(100, samples1.GetCount(3));
+ EXPECT_EQ(0, samples1.GetCount(4));
+ EXPECT_EQ(600, samples1.sum());
+ EXPECT_EQ(300, samples1.TotalCount());
+ EXPECT_EQ(samples1.redundant_count(), samples1.TotalCount());
+}
+
+TEST(PersistentSampleMapTest, PersistenceTest) {
+ LocalPersistentMemoryAllocator allocator(64 << 10, 0, ""); // 64 KiB
+
+ HistogramSamples::Metadata* meta12 =
+ allocator.GetAsObject<HistogramSamples::Metadata>(
+ allocator.Allocate(sizeof(HistogramSamples::Metadata), 0), 0);
+ PersistentSampleMap samples1(12, &allocator, meta12);
+ samples1.Accumulate(1, 100);
+ samples1.Accumulate(2, 200);
+ samples1.Accumulate(1, -200);
+ EXPECT_EQ(-100, samples1.GetCount(1));
+ EXPECT_EQ(200, samples1.GetCount(2));
+ EXPECT_EQ(300, samples1.sum());
+ EXPECT_EQ(100, samples1.TotalCount());
+ EXPECT_EQ(samples1.redundant_count(), samples1.TotalCount());
+
+ PersistentSampleMap samples2(12, &allocator, meta12);
+ EXPECT_EQ(samples1.id(), samples2.id());
+ EXPECT_EQ(samples1.sum(), samples2.sum());
+ EXPECT_EQ(samples1.redundant_count(), samples2.redundant_count());
+ EXPECT_EQ(samples1.TotalCount(), samples2.TotalCount());
+ EXPECT_EQ(-100, samples2.GetCount(1));
+ EXPECT_EQ(200, samples2.GetCount(2));
+ EXPECT_EQ(300, samples2.sum());
+ EXPECT_EQ(100, samples2.TotalCount());
+ EXPECT_EQ(samples2.redundant_count(), samples2.TotalCount());
+
+ EXPECT_EQ(0, samples2.GetCount(3));
+ EXPECT_EQ(0, samples1.GetCount(3));
+ samples2.Accumulate(3, 300);
+ EXPECT_EQ(300, samples2.GetCount(3));
+ EXPECT_EQ(300, samples1.GetCount(3));
+ EXPECT_EQ(samples1.sum(), samples2.sum());
+ EXPECT_EQ(samples1.redundant_count(), samples2.redundant_count());
+ EXPECT_EQ(samples1.TotalCount(), samples2.TotalCount());
+}
+
+TEST(PersistentSampleMapIteratorTest, IterateTest) {
+ LocalPersistentMemoryAllocator allocator(64 << 10, 0, ""); // 64 KiB
+
+ HistogramSamples::Metadata* meta =
+ allocator.GetAsObject<HistogramSamples::Metadata>(
+ allocator.Allocate(sizeof(HistogramSamples::Metadata), 0), 0);
+ PersistentSampleMap samples(1, &allocator, meta);
+ samples.Accumulate(1, 100);
+ samples.Accumulate(2, 200);
+ samples.Accumulate(4, -300);
+ samples.Accumulate(5, 0);
+
+ scoped_ptr<SampleCountIterator> it = samples.Iterator();
+
+ HistogramBase::Sample min;
+ HistogramBase::Sample max;
+ HistogramBase::Count count;
+
+ it->Get(&min, &max, &count);
+ EXPECT_EQ(1, min);
+ EXPECT_EQ(2, max);
+ EXPECT_EQ(100, count);
+ EXPECT_FALSE(it->GetBucketIndex(NULL));
+
+ it->Next();
+ it->Get(&min, &max, &count);
+ EXPECT_EQ(2, min);
+ EXPECT_EQ(3, max);
+ EXPECT_EQ(200, count);
+
+ it->Next();
+ it->Get(&min, &max, &count);
+ EXPECT_EQ(4, min);
+ EXPECT_EQ(5, max);
+ EXPECT_EQ(-300, count);
+
+ it->Next();
+ EXPECT_TRUE(it->Done());
+}
+
+TEST(PersistentSampleMapIteratorTest, SkipEmptyRanges) {
+ LocalPersistentMemoryAllocator allocator(64 << 10, 0, ""); // 64 KiB
+
+ HistogramSamples::Metadata* meta =
+ allocator.GetAsObject<HistogramSamples::Metadata>(
+ allocator.Allocate(sizeof(HistogramSamples::Metadata), 0), 0);
+ PersistentSampleMap samples(1, &allocator, meta);
+ samples.Accumulate(5, 1);
+ samples.Accumulate(10, 2);
+ samples.Accumulate(15, 3);
+ samples.Accumulate(20, 4);
+ samples.Accumulate(25, 5);
+
+ HistogramSamples::Metadata* meta2 =
+ allocator.GetAsObject<HistogramSamples::Metadata>(
+ allocator.Allocate(sizeof(HistogramSamples::Metadata), 0), 0);
+ PersistentSampleMap samples2(2, &allocator, meta2);
+ samples2.Accumulate(5, 1);
+ samples2.Accumulate(20, 4);
+ samples2.Accumulate(25, 5);
+
+ samples.Subtract(samples2);
+
+ scoped_ptr<SampleCountIterator> it = samples.Iterator();
+ EXPECT_FALSE(it->Done());
+
+ HistogramBase::Sample min;
+ HistogramBase::Sample max;
+ HistogramBase::Count count;
+
+ it->Get(&min, &max, &count);
+ EXPECT_EQ(10, min);
+ EXPECT_EQ(11, max);
+ EXPECT_EQ(2, count);
+
+ it->Next();
+ EXPECT_FALSE(it->Done());
+
+ it->Get(&min, &max, &count);
+ EXPECT_EQ(15, min);
+ EXPECT_EQ(16, max);
+ EXPECT_EQ(3, count);
+
+ it->Next();
+ EXPECT_TRUE(it->Done());
+}
+
+// Only run this test on builds that support catching a DCHECK crash.
+#if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)) && GTEST_HAS_DEATH_TEST
+TEST(PersistentSampleMapIteratorDeathTest, IterateDoneTest) {
+ LocalPersistentMemoryAllocator allocator(64 << 10, 0, ""); // 64 KiB
+
+ HistogramSamples::Metadata* meta =
+ allocator.GetAsObject<HistogramSamples::Metadata>(
+ allocator.Allocate(sizeof(HistogramSamples::Metadata), 0), 0);
+ PersistentSampleMap samples(1, &allocator, meta);
+
+ scoped_ptr<SampleCountIterator> it = samples.Iterator();
+
+ EXPECT_TRUE(it->Done());
+
+ HistogramBase::Sample min;
+ HistogramBase::Sample max;
+ HistogramBase::Count count;
+ EXPECT_DEATH(it->Get(&min, &max, &count), "");
+
+ EXPECT_DEATH(it->Next(), "");
+
+ samples.Accumulate(1, 100);
+ it = samples.Iterator();
+ EXPECT_FALSE(it->Done());
+}
+#endif
+// (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)) && GTEST_HAS_DEATH_TEST
+
+} // namespace
+} // namespace base
diff --git a/base/metrics/sample_map.cc b/base/metrics/sample_map.cc
index e276b91..21a4e35 100644
--- a/base/metrics/sample_map.cc
+++ b/base/metrics/sample_map.cc
@@ -5,12 +5,76 @@
#include "base/metrics/sample_map.h"
#include "base/logging.h"
+#include "base/stl_util.h"
namespace base {
typedef HistogramBase::Count Count;
typedef HistogramBase::Sample Sample;
+namespace {
+
+// An iterator for going through a SampleMap. The logic here is identical
+// to that of PersistentSampleMapIterator but with different data structures.
+// Changes here likely need to be duplicated there.
+class SampleMapIterator : public SampleCountIterator {
+ public:
+ typedef std::map<HistogramBase::Sample, HistogramBase::Count>
+ SampleToCountMap;
+
+ explicit SampleMapIterator(const SampleToCountMap& sample_counts);
+ ~SampleMapIterator() override;
+
+ // SampleCountIterator:
+ bool Done() const override;
+ void Next() override;
+ void Get(HistogramBase::Sample* min,
+ HistogramBase::Sample* max,
+ HistogramBase::Count* count) const override;
+
+ private:
+ void SkipEmptyBuckets();
+
+ SampleToCountMap::const_iterator iter_;
+ const SampleToCountMap::const_iterator end_;
+};
+
+SampleMapIterator::SampleMapIterator(const SampleToCountMap& sample_counts)
+ : iter_(sample_counts.begin()),
+ end_(sample_counts.end()) {
+ SkipEmptyBuckets();
+}
+
+SampleMapIterator::~SampleMapIterator() {}
+
+bool SampleMapIterator::Done() const {
+ return iter_ == end_;
+}
+
+void SampleMapIterator::Next() {
+ DCHECK(!Done());
+ ++iter_;
+ SkipEmptyBuckets();
+}
+
+void SampleMapIterator::Get(Sample* min, Sample* max, Count* count) const {
+ DCHECK(!Done());
+ if (min)
+ *min = iter_->first;
+ if (max)
+ *max = iter_->first + 1;
+ if (count)
+ *count = iter_->second;
+}
+
+void SampleMapIterator::SkipEmptyBuckets() {
+ while (!Done() && iter_->second == 0) {
+ ++iter_;
+ }
+}
+
+} // namespace
+
SampleMap::SampleMap() : SampleMap(0) {}
SampleMap::SampleMap(uint64_t id) : HistogramSamples(id) {}
@@ -39,11 +103,10 @@ Count SampleMap::TotalCount() const {
}
scoped_ptr<SampleCountIterator> SampleMap::Iterator() const {
- return scoped_ptr<SampleCountIterator>(new SampleMapIterator(sample_counts_));
+ return make_scoped_ptr(new SampleMapIterator(sample_counts_));
}
-bool SampleMap::AddSubtractImpl(SampleCountIterator* iter,
- HistogramSamples::Operator op) {
+bool SampleMap::AddSubtractImpl(SampleCountIterator* iter, Operator op) {
Sample min;
Sample max;
Count count;
@@ -57,38 +120,4 @@ bool SampleMap::AddSubtractImpl(SampleCountIterator* iter,
return true;
}
-SampleMapIterator::SampleMapIterator(const SampleToCountMap& sample_counts)
- : iter_(sample_counts.begin()),
- end_(sample_counts.end()) {
- SkipEmptyBuckets();
-}
-
-SampleMapIterator::~SampleMapIterator() {}
-
-bool SampleMapIterator::Done() const {
- return iter_ == end_;
-}
-
-void SampleMapIterator::Next() {
- DCHECK(!Done());
- ++iter_;
- SkipEmptyBuckets();
-}
-
-void SampleMapIterator::Get(Sample* min, Sample* max, Count* count) const {
- DCHECK(!Done());
- if (min != NULL)
- *min = iter_->first;
- if (max != NULL)
- *max = iter_->first + 1;
- if (count != NULL)
- *count = iter_->second;
-}
-
-void SampleMapIterator::SkipEmptyBuckets() {
- while (!Done() && iter_->second == 0) {
- ++iter_;
- }
-}
-
} // namespace base
diff --git a/base/metrics/sample_map.h b/base/metrics/sample_map.h
index da536e3..2f24e1f 100644
--- a/base/metrics/sample_map.h
+++ b/base/metrics/sample_map.h
@@ -20,13 +20,15 @@
namespace base {
+// The logic here is similar to that of PersistentSampleMap but with different
+// data structures. Changes here likely need to be duplicated there.
class BASE_EXPORT SampleMap : public HistogramSamples {
public:
SampleMap();
explicit SampleMap(uint64_t id);
~SampleMap() override;
- // HistogramSamples implementation:
+ // HistogramSamples:
void Accumulate(HistogramBase::Sample value,
HistogramBase::Count count) override;
HistogramBase::Count GetCount(HistogramBase::Sample value) const override;
@@ -34,9 +36,8 @@ class BASE_EXPORT SampleMap : public HistogramSamples {
scoped_ptr<SampleCountIterator> Iterator() const override;
protected:
- bool AddSubtractImpl(
- SampleCountIterator* iter,
- HistogramSamples::Operator op) override; // |op| is ADD or SUBTRACT.
+ // Performs arithemetic. |op| is ADD or SUBTRACT.
+ bool AddSubtractImpl(SampleCountIterator* iter, Operator op) override;
private:
std::map<HistogramBase::Sample, HistogramBase::Count> sample_counts_;
@@ -44,28 +45,6 @@ class BASE_EXPORT SampleMap : public HistogramSamples {
DISALLOW_COPY_AND_ASSIGN(SampleMap);
};
-class BASE_EXPORT SampleMapIterator : public SampleCountIterator {
- public:
- typedef std::map<HistogramBase::Sample, HistogramBase::Count>
- SampleToCountMap;
-
- explicit SampleMapIterator(const SampleToCountMap& sample_counts);
- ~SampleMapIterator() override;
-
- // SampleCountIterator implementation:
- bool Done() const override;
- void Next() override;
- void Get(HistogramBase::Sample* min,
- HistogramBase::Sample* max,
- HistogramBase::Count* count) const override;
-
- private:
- void SkipEmptyBuckets();
-
- SampleToCountMap::const_iterator iter_;
- const SampleToCountMap::const_iterator end_;
-};
-
} // namespace base
#endif // BASE_METRICS_SAMPLE_MAP_H_
diff --git a/base/metrics/sparse_histogram.cc b/base/metrics/sparse_histogram.cc
index 485f179..ee18cfa 100644
--- a/base/metrics/sparse_histogram.cc
+++ b/base/metrics/sparse_histogram.cc
@@ -7,6 +7,8 @@
#include <utility>
#include "base/metrics/metrics_hashes.h"
+#include "base/metrics/persistent_histogram_allocator.h"
+#include "base/metrics/persistent_sample_map.h"
#include "base/metrics/sample_map.h"
#include "base/metrics/statistics_recorder.h"
#include "base/pickle.h"
@@ -21,26 +23,75 @@ typedef HistogramBase::Sample Sample;
// static
HistogramBase* SparseHistogram::FactoryGet(const std::string& name,
int32_t flags) {
- HistogramBase* histogram = StatisticsRecorder::FindHistogram(name);
+ // Import histograms from known persistent storage. Histograms could have
+ // been added by other processes and they must be fetched and recognized
+ // locally in order to be found by FindHistograms() below. If the persistent
+ // memory segment is not shared between processes, this call does nothing.
+ PersistentHistogramAllocator::ImportGlobalHistograms();
+ HistogramBase* histogram = StatisticsRecorder::FindHistogram(name);
if (!histogram) {
- // To avoid racy destruction at shutdown, the following will be leaked.
- HistogramBase* tentative_histogram = new SparseHistogram(name);
- tentative_histogram->SetFlags(flags);
- histogram =
- StatisticsRecorder::RegisterOrDeleteDuplicate(tentative_histogram);
+ // Try to create the histogram using a "persistent" allocator. As of
+ // 2016-02-25, the availability of such is controlled by a base::Feature
+ // that is off by default. If the allocator doesn't exist or if
+ // allocating from it fails, code below will allocate the histogram from
+ // the process heap.
+ PersistentMemoryAllocator::Reference histogram_ref = 0;
+ scoped_ptr<HistogramBase> tentative_histogram;
+ PersistentHistogramAllocator* allocator =
+ PersistentHistogramAllocator::GetGlobalAllocator();
+ if (allocator) {
+ tentative_histogram = allocator->AllocateHistogram(
+ SPARSE_HISTOGRAM, name, 0, 0, nullptr, flags, &histogram_ref);
+ }
+
+ // Handle the case where no persistent allocator is present or the
+ // persistent allocation fails (perhaps because it is full).
+ if (!tentative_histogram) {
+ DCHECK(!histogram_ref); // Should never have been set.
+ DCHECK(!allocator); // Shouldn't have failed.
+ flags &= ~HistogramBase::kIsPersistent;
+ tentative_histogram.reset(new SparseHistogram(name));
+ tentative_histogram->SetFlags(flags);
+ }
+
+ // Register this histogram with the StatisticsRecorder. Keep a copy of
+ // the pointer value to tell later whether the locally created histogram
+ // was registered or deleted. The type is "void" because it could point
+ // to released memory after the following line.
+ const void* tentative_histogram_ptr = tentative_histogram.get();
+ histogram = StatisticsRecorder::RegisterOrDeleteDuplicate(
+ tentative_histogram.release());
+
+ // Persistent histograms need some follow-up processing.
+ if (histogram_ref) {
+ allocator->FinalizeHistogram(histogram_ref,
+ histogram == tentative_histogram_ptr);
+ }
+
ReportHistogramActivity(*histogram, HISTOGRAM_CREATED);
} else {
ReportHistogramActivity(*histogram, HISTOGRAM_LOOKUP);
}
+
DCHECK_EQ(SPARSE_HISTOGRAM, histogram->GetHistogramType());
return histogram;
}
+// static
+scoped_ptr<HistogramBase> SparseHistogram::PersistentCreate(
+ PersistentMemoryAllocator* allocator,
+ const std::string& name,
+ HistogramSamples::Metadata* meta,
+ HistogramSamples::Metadata* logged_meta) {
+ return make_scoped_ptr(
+ new SparseHistogram(allocator, name, meta, logged_meta));
+}
+
SparseHistogram::~SparseHistogram() {}
uint64_t SparseHistogram::name_hash() const {
- return samples_.id();
+ return samples_->id();
}
HistogramType SparseHistogram::GetHistogramType() const {
@@ -66,7 +117,7 @@ void SparseHistogram::AddCount(Sample value, int count) {
}
{
base::AutoLock auto_lock(lock_);
- samples_.Accumulate(value, count);
+ samples_->Accumulate(value, count);
}
FindAndRunCallback(value);
@@ -76,29 +127,29 @@ scoped_ptr<HistogramSamples> SparseHistogram::SnapshotSamples() const {
scoped_ptr<SampleMap> snapshot(new SampleMap(name_hash()));
base::AutoLock auto_lock(lock_);
- snapshot->Add(samples_);
+ snapshot->Add(*samples_);
return std::move(snapshot);
}
scoped_ptr<HistogramSamples> SparseHistogram::SnapshotDelta() {
scoped_ptr<SampleMap> snapshot(new SampleMap(name_hash()));
base::AutoLock auto_lock(lock_);
- snapshot->Add(samples_);
+ snapshot->Add(*samples_);
// Subtract what was previously logged and update that information.
- snapshot->Subtract(logged_samples_);
- logged_samples_.Add(*snapshot);
+ snapshot->Subtract(*logged_samples_);
+ logged_samples_->Add(*snapshot);
return std::move(snapshot);
}
void SparseHistogram::AddSamples(const HistogramSamples& samples) {
base::AutoLock auto_lock(lock_);
- samples_.Add(samples);
+ samples_->Add(samples);
}
bool SparseHistogram::AddSamplesFromPickle(PickleIterator* iter) {
base::AutoLock auto_lock(lock_);
- return samples_.AddFromPickle(iter);
+ return samples_->AddFromPickle(iter);
}
void SparseHistogram::WriteHTMLGraph(std::string* output) const {
@@ -117,7 +168,28 @@ bool SparseHistogram::SerializeInfoImpl(Pickle* pickle) const {
SparseHistogram::SparseHistogram(const std::string& name)
: HistogramBase(name),
- samples_(HashMetricName(name)) {}
+ samples_(new SampleMap(HashMetricName(name))),
+ logged_samples_(new SampleMap(samples_->id())) {}
+
+SparseHistogram::SparseHistogram(PersistentMemoryAllocator* allocator,
+ const std::string& name,
+ HistogramSamples::Metadata* meta,
+ HistogramSamples::Metadata* logged_meta)
+ : HistogramBase(name),
+ // While other histogram types maintain a static vector of values with
+ // sufficient space for both "active" and "logged" samples, with each
+ // SampleVector being given the appropriate half, sparse histograms
+ // have no such initial allocation. Each sample has its own record
+ // attached to a single PersistentSampleMap by a common 64-bit identifier.
+ // Since a sparse histogram has two sample maps (active and logged),
+ // there must be two sets of sample records with diffent IDs. The
+ // "active" samples use, for convenience purposes, an ID matching
+ // that of the histogram while the "logged" samples use that number
+ // plus 1.
+ samples_(new PersistentSampleMap(HashMetricName(name), allocator, meta)),
+ logged_samples_(
+ new PersistentSampleMap(samples_->id() + 1, allocator, logged_meta)) {
+}
HistogramBase* SparseHistogram::DeserializeInfoImpl(PickleIterator* iter) {
std::string histogram_name;
diff --git a/base/metrics/sparse_histogram.h b/base/metrics/sparse_histogram.h
index 7f36313..b876737 100644
--- a/base/metrics/sparse_histogram.h
+++ b/base/metrics/sparse_histogram.h
@@ -51,6 +51,7 @@ namespace base {
} while (0)
class HistogramSamples;
+class PersistentMemoryAllocator;
class BASE_EXPORT SparseHistogram : public HistogramBase {
public:
@@ -58,6 +59,13 @@ class BASE_EXPORT SparseHistogram : public HistogramBase {
// new one.
static HistogramBase* FactoryGet(const std::string& name, int32_t flags);
+ // Create a histogram using data in persistent storage.
+ static scoped_ptr<HistogramBase> PersistentCreate(
+ PersistentMemoryAllocator* allocator,
+ const std::string& name,
+ HistogramSamples::Metadata* meta,
+ HistogramSamples::Metadata* logged_meta);
+
~SparseHistogram() override;
// HistogramBase implementation:
@@ -83,6 +91,11 @@ class BASE_EXPORT SparseHistogram : public HistogramBase {
// Clients should always use FactoryGet to create SparseHistogram.
explicit SparseHistogram(const std::string& name);
+ SparseHistogram(PersistentMemoryAllocator* allocator,
+ const std::string& name,
+ HistogramSamples::Metadata* meta,
+ HistogramSamples::Metadata* logged_meta);
+
friend BASE_EXPORT HistogramBase* DeserializeHistogramInfo(
base::PickleIterator* iter);
static HistogramBase* DeserializeInfoImpl(base::PickleIterator* iter);
@@ -107,8 +120,8 @@ class BASE_EXPORT SparseHistogram : public HistogramBase {
// Protects access to |samples_|.
mutable base::Lock lock_;
- SampleMap samples_;
- SampleMap logged_samples_;
+ scoped_ptr<HistogramSamples> samples_;
+ scoped_ptr<HistogramSamples> logged_samples_;
DISALLOW_COPY_AND_ASSIGN(SparseHistogram);
};
diff --git a/base/metrics/sparse_histogram_unittest.cc b/base/metrics/sparse_histogram_unittest.cc
index 7ad5558..5d5dbcb 100644
--- a/base/metrics/sparse_histogram_unittest.cc
+++ b/base/metrics/sparse_histogram_unittest.cc
@@ -9,6 +9,8 @@
#include "base/memory/scoped_ptr.h"
#include "base/metrics/histogram_base.h"
#include "base/metrics/histogram_samples.h"
+#include "base/metrics/persistent_histogram_allocator.h"
+#include "base/metrics/persistent_memory_allocator.h"
#include "base/metrics/sample_map.h"
#include "base/metrics/statistics_recorder.h"
#include "base/pickle.h"
@@ -17,17 +19,35 @@
namespace base {
-class SparseHistogramTest : public testing::Test {
+// Test parameter indicates if a persistent memory allocator should be used
+// for histogram allocation. False will allocate histograms from the process
+// heap.
+class SparseHistogramTest : public testing::TestWithParam<bool> {
protected:
+ const int32_t kAllocatorMemorySize = 8 << 20; // 8 MiB
+
+ SparseHistogramTest() : use_persistent_histogram_allocator_(GetParam()) {}
+
void SetUp() override {
+ if (use_persistent_histogram_allocator_)
+ CreatePersistentMemoryAllocator();
+
// Each test will have a clean state (no Histogram / BucketRanges
// registered).
InitializeStatisticsRecorder();
}
- void TearDown() override { UninitializeStatisticsRecorder(); }
+ void TearDown() override {
+ if (allocator_) {
+ ASSERT_FALSE(allocator_->IsFull());
+ ASSERT_FALSE(allocator_->IsCorrupt());
+ }
+ UninitializeStatisticsRecorder();
+ DestroyPersistentMemoryAllocator();
+ }
void InitializeStatisticsRecorder() {
+ StatisticsRecorder::ResetForTesting();
statistics_recorder_ = new StatisticsRecorder();
}
@@ -36,14 +56,44 @@ class SparseHistogramTest : public testing::Test {
statistics_recorder_ = NULL;
}
+ void CreatePersistentMemoryAllocator() {
+ // By getting the results-histogram before any persistent allocator
+ // is attached, that histogram is guaranteed not to be stored in
+ // any persistent memory segment (which simplifies some tests).
+ PersistentHistogramAllocator::GetCreateHistogramResultHistogram();
+
+ PersistentHistogramAllocator::CreateGlobalAllocatorOnLocalMemory(
+ kAllocatorMemorySize, 0, "SparseHistogramAllocatorTest");
+ allocator_ =
+ PersistentHistogramAllocator::GetGlobalAllocator()->memory_allocator();
+ }
+
+ void DestroyPersistentMemoryAllocator() {
+ allocator_ = nullptr;
+ PersistentHistogramAllocator::ReleaseGlobalAllocatorForTesting();
+ }
+
scoped_ptr<SparseHistogram> NewSparseHistogram(const std::string& name) {
return scoped_ptr<SparseHistogram>(new SparseHistogram(name));
}
+ const bool use_persistent_histogram_allocator_;
+
StatisticsRecorder* statistics_recorder_;
+ scoped_ptr<char[]> allocator_memory_;
+ PersistentMemoryAllocator* allocator_ = nullptr;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(SparseHistogramTest);
};
-TEST_F(SparseHistogramTest, BasicTest) {
+// Run all HistogramTest cases with both heap and persistent memory.
+INSTANTIATE_TEST_CASE_P(HeapAndPersistent,
+ SparseHistogramTest,
+ testing::Bool());
+
+
+TEST_P(SparseHistogramTest, BasicTest) {
scoped_ptr<SparseHistogram> histogram(NewSparseHistogram("Sparse"));
scoped_ptr<HistogramSamples> snapshot(histogram->SnapshotSamples());
EXPECT_EQ(0, snapshot->TotalCount());
@@ -62,7 +112,7 @@ TEST_F(SparseHistogramTest, BasicTest) {
EXPECT_EQ(1, snapshot2->GetCount(101));
}
-TEST_F(SparseHistogramTest, BasicTestAddCount) {
+TEST_P(SparseHistogramTest, BasicTestAddCount) {
scoped_ptr<SparseHistogram> histogram(NewSparseHistogram("Sparse"));
scoped_ptr<HistogramSamples> snapshot(histogram->SnapshotSamples());
EXPECT_EQ(0, snapshot->TotalCount());
@@ -81,7 +131,7 @@ TEST_F(SparseHistogramTest, BasicTestAddCount) {
EXPECT_EQ(25, snapshot2->GetCount(101));
}
-TEST_F(SparseHistogramTest, AddCount_LargeValuesDontOverflow) {
+TEST_P(SparseHistogramTest, AddCount_LargeValuesDontOverflow) {
scoped_ptr<SparseHistogram> histogram(NewSparseHistogram("Sparse"));
scoped_ptr<HistogramSamples> snapshot(histogram->SnapshotSamples());
EXPECT_EQ(0, snapshot->TotalCount());
@@ -101,7 +151,7 @@ TEST_F(SparseHistogramTest, AddCount_LargeValuesDontOverflow) {
EXPECT_EQ(55250000000LL, snapshot2->sum());
}
-TEST_F(SparseHistogramTest, MacroBasicTest) {
+TEST_P(SparseHistogramTest, MacroBasicTest) {
UMA_HISTOGRAM_SPARSE_SLOWLY("Sparse", 100);
UMA_HISTOGRAM_SPARSE_SLOWLY("Sparse", 200);
UMA_HISTOGRAM_SPARSE_SLOWLY("Sparse", 100);
@@ -114,8 +164,11 @@ TEST_F(SparseHistogramTest, MacroBasicTest) {
EXPECT_EQ(SPARSE_HISTOGRAM, sparse_histogram->GetHistogramType());
EXPECT_EQ("Sparse", sparse_histogram->histogram_name());
- EXPECT_EQ(HistogramBase::kUmaTargetedHistogramFlag,
- sparse_histogram->flags());
+ EXPECT_EQ(
+ HistogramBase::kUmaTargetedHistogramFlag |
+ (use_persistent_histogram_allocator_ ? HistogramBase::kIsPersistent
+ : 0),
+ sparse_histogram->flags());
scoped_ptr<HistogramSamples> samples = sparse_histogram->SnapshotSamples();
EXPECT_EQ(3, samples->TotalCount());
@@ -123,7 +176,7 @@ TEST_F(SparseHistogramTest, MacroBasicTest) {
EXPECT_EQ(1, samples->GetCount(200));
}
-TEST_F(SparseHistogramTest, MacroInLoopTest) {
+TEST_P(SparseHistogramTest, MacroInLoopTest) {
// Unlike the macros in histogram.h, SparseHistogram macros can have a
// variable as histogram name.
for (int i = 0; i < 2; i++) {
@@ -141,7 +194,7 @@ TEST_F(SparseHistogramTest, MacroInLoopTest) {
("Sparse2" == name1 && "Sparse1" == name2));
}
-TEST_F(SparseHistogramTest, Serialize) {
+TEST_P(SparseHistogramTest, Serialize) {
scoped_ptr<SparseHistogram> histogram(NewSparseHistogram("Sparse"));
histogram->SetFlags(HistogramBase::kIPCSerializationSourceFlag);
@@ -166,4 +219,64 @@ TEST_F(SparseHistogramTest, Serialize) {
EXPECT_FALSE(iter.SkipBytes(1));
}
+TEST_P(SparseHistogramTest, FactoryTime) {
+ const int kTestCreateCount = 1 << 10; // Must be power-of-2.
+ const int kTestLookupCount = 100000;
+ const int kTestAddCount = 100000;
+
+ // Create all histogram names in advance for accurate timing below.
+ std::vector<std::string> histogram_names;
+ for (int i = 0; i < kTestCreateCount; ++i) {
+ histogram_names.push_back(
+ StringPrintf("TestHistogram.%d", i % kTestCreateCount));
+ }
+
+ // Calculate cost of creating histograms.
+ TimeTicks create_start = TimeTicks::Now();
+ for (int i = 0; i < kTestCreateCount; ++i)
+ SparseHistogram::FactoryGet(histogram_names[i], HistogramBase::kNoFlags);
+ TimeDelta create_ticks = TimeTicks::Now() - create_start;
+ int64_t create_ms = create_ticks.InMilliseconds();
+
+ VLOG(1) << kTestCreateCount << " histogram creations took " << create_ms
+ << "ms or about "
+ << (create_ms * 1000000) / kTestCreateCount
+ << "ns each.";
+
+ // Calculate cost of looking up existing histograms.
+ TimeTicks lookup_start = TimeTicks::Now();
+ for (int i = 0; i < kTestLookupCount; ++i) {
+ // 6007 is co-prime with kTestCreateCount and so will do lookups in an
+ // order less likely to be cacheable (but still hit them all) should the
+ // underlying storage use the exact histogram name as the key.
+ const int i_mult = 6007;
+ static_assert(i_mult < INT_MAX / kTestCreateCount, "Multiplier too big");
+ int index = (i * i_mult) & (kTestCreateCount - 1);
+ SparseHistogram::FactoryGet(histogram_names[index],
+ HistogramBase::kNoFlags);
+ }
+ TimeDelta lookup_ticks = TimeTicks::Now() - lookup_start;
+ int64_t lookup_ms = lookup_ticks.InMilliseconds();
+
+ VLOG(1) << kTestLookupCount << " histogram lookups took " << lookup_ms
+ << "ms or about "
+ << (lookup_ms * 1000000) / kTestLookupCount
+ << "ns each.";
+
+ // Calculate cost of accessing histograms.
+ HistogramBase* histogram =
+ SparseHistogram::FactoryGet(histogram_names[0], HistogramBase::kNoFlags);
+ ASSERT_TRUE(histogram);
+ TimeTicks add_start = TimeTicks::Now();
+ for (int i = 0; i < kTestAddCount; ++i)
+ histogram->Add(i & 127);
+ TimeDelta add_ticks = TimeTicks::Now() - add_start;
+ int64_t add_ms = add_ticks.InMilliseconds();
+
+ VLOG(1) << kTestAddCount << " histogram adds took " << add_ms
+ << "ms or about "
+ << (add_ms * 1000000) / kTestAddCount
+ << "ns each.";
+}
+
} // namespace base