summaryrefslogtreecommitdiffstats
path: root/base/metrics/persistent_histogram_allocator.cc
blob: 2e4029ef6a60e7f2bd68e67c42f1ad861e9851cf (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "base/metrics/persistent_histogram_allocator.h"

#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
#include "base/metrics/histogram.h"
#include "base/metrics/histogram_base.h"
#include "base/metrics/histogram_samples.h"
#include "base/metrics/statistics_recorder.h"
#include "base/synchronization/lock.h"

// TODO(bcwhite): Order these methods to match the header file. The current
// order is only temporary in order to aid review of the transition from
// a non-class implementation.

namespace base {

namespace {

// Name of histogram for storing results of local operations.
const char kResultHistogram[] = "UMA.CreatePersistentHistogram.Result";

// Type identifiers used when storing in persistent memory so they can be
// identified during extraction; the first 4 bytes of the SHA1 of the name
// is used as a unique integer. A "version number" is added to the base
// so that, if the structure of that object changes, stored older versions
// will be safely ignored.
enum : uint32_t {
  kTypeIdHistogram   = 0xF1645910 + 2,  // SHA1(Histogram)   v2
  kTypeIdRangesArray = 0xBCEA225A + 1,  // SHA1(RangesArray) v1
  kTypeIdCountsArray = 0x53215530 + 1,  // SHA1(CountsArray) v1
};

// The current globally-active persistent allocator for all new histograms.
// The object held here will obviously not be destructed at process exit
// but that's best since PersistentMemoryAllocator objects (that underlie
// PersistentHistogramAllocator objects) are explicitly forbidden from doing
// anything essential at exit anyway due to the fact that they depend on data
// managed elsewhere and which could be destructed first.
PersistentHistogramAllocator* g_allocator;

// Take an array of range boundaries and create a proper BucketRanges object
// which is returned to the caller. A return of nullptr indicates that the
// passed boundaries are invalid.
scoped_ptr<BucketRanges> CreateRangesFromData(
    HistogramBase::Sample* ranges_data,
    uint32_t ranges_checksum,
    size_t count) {
  // To avoid racy destruction at shutdown, the following may be leaked.
  scoped_ptr<BucketRanges> ranges(new BucketRanges(count));
  DCHECK_EQ(count, ranges->size());
  for (size_t i = 0; i < count; ++i) {
    if (i > 0 && ranges_data[i] <= ranges_data[i - 1])
      return nullptr;
    ranges->set_range(i, ranges_data[i]);
  }

  ranges->ResetChecksum();
  if (ranges->checksum() != ranges_checksum)
    return nullptr;

  return ranges;
}

// Calculate the number of bytes required to store all of a histogram's
// "counts". This will return zero (0) if |bucket_count| is not valid.
size_t CalculateRequiredCountsBytes(size_t bucket_count) {
  // 2 because each "sample count" also requires a backup "logged count"
  // used for calculating the delta during snapshot operations.
  const unsigned kBytesPerBucket = 2 * sizeof(HistogramBase::AtomicCount);

  // If the |bucket_count| is such that it would overflow the return type,
  // perhaps as the result of a malicious actor, then return zero to
  // indicate the problem to the caller.
  if (bucket_count > std::numeric_limits<uint32_t>::max() / kBytesPerBucket)
    return 0;

  return bucket_count * kBytesPerBucket;
}

}  // namespace

const Feature kPersistentHistogramsFeature{
  "PersistentHistograms", FEATURE_DISABLED_BY_DEFAULT
};

// This data will be held in persistent memory in order for processes to
// locate and use histograms created elsewhere.
struct PersistentHistogramAllocator::PersistentHistogramData {
  int32_t histogram_type;
  int32_t flags;
  int32_t minimum;
  int32_t maximum;
  uint32_t bucket_count;
  PersistentMemoryAllocator::Reference ranges_ref;
  uint32_t ranges_checksum;
  PersistentMemoryAllocator::Reference counts_ref;
  HistogramSamples::Metadata samples_metadata;
  HistogramSamples::Metadata logged_metadata;

  // Space for the histogram name will be added during the actual allocation
  // request. This must be the last field of the structure. A zero-size array
  // or a "flexible" array would be preferred but is not (yet) valid C++.
  char name[1];
};

PersistentHistogramAllocator::PersistentHistogramAllocator(
    scoped_ptr<PersistentMemoryAllocator> memory)
    : memory_allocator_(std::move(memory)) {}

PersistentHistogramAllocator::~PersistentHistogramAllocator() {}

void PersistentHistogramAllocator::CreateIterator(Iterator* iter) {
  memory_allocator_->CreateIterator(&iter->memory_iter);
}

void PersistentHistogramAllocator::CreateTrackingHistograms(StringPiece name) {
  memory_allocator_->CreateTrackingHistograms(name);
}

void PersistentHistogramAllocator::UpdateTrackingHistograms() {
  memory_allocator_->UpdateTrackingHistograms();
}

// static
HistogramBase*
PersistentHistogramAllocator::GetCreateHistogramResultHistogram() {
  // Get the histogram in which create-results are stored. This is copied
  // almost exactly from the STATIC_HISTOGRAM_POINTER_BLOCK macro but with
  // added code to prevent recursion (a likely occurance because the creation
  // of a new a histogram can end up calling this.)
  static base::subtle::AtomicWord atomic_histogram_pointer = 0;
  HistogramBase* histogram_pointer =
      reinterpret_cast<HistogramBase*>(
          base::subtle::Acquire_Load(&atomic_histogram_pointer));
  if (!histogram_pointer) {
    // It's possible for multiple threads to make it here in parallel but
    // they'll always return the same result as there is a mutex in the Get.
    // The purpose of the "initialized" variable is just to ensure that
    // the same thread doesn't recurse which is also why it doesn't have
    // to be atomic.
    static bool initialized = false;
    if (!initialized) {
      initialized = true;
      if (g_allocator) {
        DLOG(WARNING) << "Creating the results-histogram inside persistent"
                      << " memory can cause future allocations to crash if"
                      << " that memory is ever released (for testing).";
      }

      histogram_pointer = LinearHistogram::FactoryGet(
          kResultHistogram, 1, CREATE_HISTOGRAM_MAX, CREATE_HISTOGRAM_MAX + 1,
          HistogramBase::kUmaTargetedHistogramFlag);
      base::subtle::Release_Store(
          &atomic_histogram_pointer,
          reinterpret_cast<base::subtle::AtomicWord>(histogram_pointer));
    }
  }
  return histogram_pointer;
}

// static
void PersistentHistogramAllocator::RecordCreateHistogramResult(
    CreateHistogramResultType result) {
  HistogramBase* result_histogram = GetCreateHistogramResultHistogram();
  if (result_histogram)
    result_histogram->Add(result);
}

// static
void PersistentHistogramAllocator::SetGlobalAllocator(
    scoped_ptr<PersistentHistogramAllocator> allocator) {
  // Releasing or changing an allocator is extremely dangerous because it
  // likely has histograms stored within it. If the backing memory is also
  // also released, future accesses to those histograms will seg-fault.
  CHECK(!g_allocator);
  g_allocator = allocator.release();

  size_t existing = StatisticsRecorder::GetHistogramCount();
  DLOG_IF(WARNING, existing)
      << existing
      << " histograms were created before persistence was enabled.";
}

// static
PersistentHistogramAllocator*
PersistentHistogramAllocator::GetGlobalAllocator() {
  return g_allocator;
}

// static
scoped_ptr<PersistentHistogramAllocator>
PersistentHistogramAllocator::ReleaseGlobalAllocatorForTesting() {
  PersistentHistogramAllocator* histogram_allocator = g_allocator;
  if (!histogram_allocator)
    return nullptr;
  PersistentMemoryAllocator* memory_allocator =
      histogram_allocator->memory_allocator();

  // Before releasing the memory, it's necessary to have the Statistics-
  // Recorder forget about the histograms contained therein; otherwise,
  // some operations will try to access them and the released memory.
  PersistentMemoryAllocator::Iterator iter;
  PersistentMemoryAllocator::Reference ref;
  uint32_t type_id;
  memory_allocator->CreateIterator(&iter);
  while ((ref = memory_allocator->GetNextIterable(&iter, &type_id)) != 0) {
    if (type_id == kTypeIdHistogram) {
      PersistentHistogramData* histogram_data =
          memory_allocator->GetAsObject<PersistentHistogramData>(
              ref, kTypeIdHistogram);
      DCHECK(histogram_data);
      StatisticsRecorder::ForgetHistogramForTesting(histogram_data->name);

      // If a test breaks here then a memory region containing a histogram
      // actively used by this code is being released back to the test.
      // If that memory segment were to be deleted, future calls to create
      // persistent histograms would crash. To avoid this, have the test call
      // the method GetCreateHistogramResultHistogram() *before* setting
      // the (temporary) memory allocator via SetGlobalAllocator() so that
      // histogram is instead allocated from the process heap.
      DCHECK_NE(kResultHistogram, histogram_data->name);
    }
  }

  g_allocator = nullptr;
  return make_scoped_ptr(histogram_allocator);
};

// static
void PersistentHistogramAllocator::CreateGlobalAllocatorOnPersistentMemory(
    void* base,
    size_t size,
    size_t page_size,
    uint64_t id,
    StringPiece name) {
  SetGlobalAllocator(make_scoped_ptr(new PersistentHistogramAllocator(
      make_scoped_ptr(new PersistentMemoryAllocator(
          base, size, page_size, id, name, false)))));
}

// static
void PersistentHistogramAllocator::CreateGlobalAllocatorOnLocalMemory(
    size_t size,
    uint64_t id,
    StringPiece name) {
  SetGlobalAllocator(make_scoped_ptr(new PersistentHistogramAllocator(
      make_scoped_ptr(new LocalPersistentMemoryAllocator(size, id, name)))));
}

// static
void PersistentHistogramAllocator::CreateGlobalAllocatorOnSharedMemory(
    size_t size,
    const SharedMemoryHandle& handle) {
  scoped_ptr<SharedMemory> shm(new SharedMemory(handle, /*readonly=*/false));
  if (!shm->Map(size)) {
    NOTREACHED();
    return;
  }

  SetGlobalAllocator(make_scoped_ptr(new PersistentHistogramAllocator(
      make_scoped_ptr(new SharedPersistentMemoryAllocator(
          std::move(shm), 0, StringPiece(), /*readonly=*/false)))));
}

// static
scoped_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram(
    PersistentHistogramData* histogram_data_ptr) {
  if (!histogram_data_ptr) {
    RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_METADATA_POINTER);
    NOTREACHED();
    return nullptr;
  }

  // Copy the histogram_data to local storage because anything in persistent
  // memory cannot be trusted as it could be changed at any moment by a
  // malicious actor that shares access. The contents of histogram_data are
  // validated below; the local copy is to ensure that the contents cannot
  // be externally changed between validation and use.
  PersistentHistogramData histogram_data = *histogram_data_ptr;

  HistogramBase::Sample* ranges_data =
      memory_allocator_->GetAsObject<HistogramBase::Sample>(
          histogram_data.ranges_ref, kTypeIdRangesArray);

  const uint32_t max_buckets =
      std::numeric_limits<uint32_t>::max() / sizeof(HistogramBase::Sample);
  size_t required_bytes =
      (histogram_data.bucket_count + 1) * sizeof(HistogramBase::Sample);
  size_t allocated_bytes =
      memory_allocator_->GetAllocSize(histogram_data.ranges_ref);
  if (!ranges_data || histogram_data.bucket_count < 2 ||
      histogram_data.bucket_count >= max_buckets ||
      allocated_bytes < required_bytes) {
    RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_RANGES_ARRAY);
    NOTREACHED();
    return nullptr;
  }

  scoped_ptr<const BucketRanges> created_ranges =
      CreateRangesFromData(ranges_data, histogram_data.ranges_checksum,
                           histogram_data.bucket_count + 1);
  if (!created_ranges) {
    RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_RANGES_ARRAY);
    NOTREACHED();
    return nullptr;
  }
  const BucketRanges* ranges =
      StatisticsRecorder::RegisterOrDeleteDuplicateRanges(
          created_ranges.release());

  HistogramBase::AtomicCount* counts_data =
      memory_allocator_->GetAsObject<HistogramBase::AtomicCount>(
          histogram_data.counts_ref, kTypeIdCountsArray);
  size_t counts_bytes =
      CalculateRequiredCountsBytes(histogram_data.bucket_count);
  if (!counts_data || !counts_bytes ||
      memory_allocator_->GetAllocSize(histogram_data.counts_ref) <
          counts_bytes) {
    RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_COUNTS_ARRAY);
    NOTREACHED();
    return nullptr;
  }

  // After the main "counts" array is a second array using for storing what
  // was previously logged. This is used to calculate the "delta" during
  // snapshot operations.
  HistogramBase::AtomicCount* logged_data =
      counts_data + histogram_data.bucket_count;

  std::string name(histogram_data_ptr->name);
  scoped_ptr<HistogramBase> histogram;
  switch (histogram_data.histogram_type) {
    case HISTOGRAM:
      histogram = Histogram::PersistentCreate(
          name, histogram_data.minimum, histogram_data.maximum, ranges,
          counts_data, logged_data, histogram_data.bucket_count,
          &histogram_data_ptr->samples_metadata,
          &histogram_data_ptr->logged_metadata);
      DCHECK(histogram);
      break;
    case LINEAR_HISTOGRAM:
      histogram = LinearHistogram::PersistentCreate(
          name, histogram_data.minimum, histogram_data.maximum, ranges,
          counts_data, logged_data, histogram_data.bucket_count,
          &histogram_data_ptr->samples_metadata,
          &histogram_data_ptr->logged_metadata);
      DCHECK(histogram);
      break;
    case BOOLEAN_HISTOGRAM:
      histogram = BooleanHistogram::PersistentCreate(
          name, ranges, counts_data, logged_data,
          &histogram_data_ptr->samples_metadata,
          &histogram_data_ptr->logged_metadata);
      DCHECK(histogram);
      break;
    case CUSTOM_HISTOGRAM:
      histogram = CustomHistogram::PersistentCreate(
          name, ranges, counts_data, logged_data, histogram_data.bucket_count,
          &histogram_data_ptr->samples_metadata,
          &histogram_data_ptr->logged_metadata);
      DCHECK(histogram);
      break;
    default:
      NOTREACHED();
  }

  if (histogram) {
    DCHECK_EQ(histogram_data.histogram_type, histogram->GetHistogramType());
    histogram->SetFlags(histogram_data.flags);
    RecordCreateHistogramResult(CREATE_HISTOGRAM_SUCCESS);
  } else {
    RecordCreateHistogramResult(CREATE_HISTOGRAM_UNKNOWN_TYPE);
  }

  return histogram;
}

scoped_ptr<HistogramBase> PersistentHistogramAllocator::GetHistogram(
    Reference ref) {
  // Unfortunately, the histogram "pickle" methods cannot be used as part of
  // the persistance because the deserialization methods always create local
  // count data (while these must reference the persistent counts) and always
  // add it to the local list of known histograms (while these may be simple
  // references to histograms in other processes).
  PersistentHistogramData* histogram_data =
      memory_allocator_->GetAsObject<PersistentHistogramData>(
          ref, kTypeIdHistogram);
  size_t length = memory_allocator_->GetAllocSize(ref);
  if (!histogram_data ||
      reinterpret_cast<char*>(histogram_data)[length - 1] != '\0') {
    RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_METADATA);
    NOTREACHED();
    return nullptr;
  }
  return CreateHistogram(histogram_data);
}

scoped_ptr<HistogramBase>
PersistentHistogramAllocator::GetNextHistogramWithIgnore(Iterator* iter,
                                                         Reference ignore) {
  PersistentMemoryAllocator::Reference ref;
  uint32_t type_id;
  while ((ref = memory_allocator_->GetNextIterable(&iter->memory_iter,
                                                   &type_id)) != 0) {
    if (ref == ignore)
      continue;
    if (type_id == kTypeIdHistogram)
      return GetHistogram(ref);
  }
  return nullptr;
}

void PersistentHistogramAllocator::FinalizeHistogram(Reference ref,
                                                     bool registered) {
  // If the created persistent histogram was registered then it needs to
  // be marked as "iterable" in order to be found by other processes.
  if (registered)
    memory_allocator_->MakeIterable(ref);
  // If it wasn't registered then a race condition must have caused
  // two to be created. The allocator does not support releasing the
  // acquired memory so just change the type to be empty.
  else
    memory_allocator_->SetType(ref, 0);
}

scoped_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram(
    HistogramType histogram_type,
    const std::string& name,
    int minimum,
    int maximum,
    const BucketRanges* bucket_ranges,
    int32_t flags,
    Reference* ref_ptr) {
  // If the allocator is corrupt, don't waste time trying anything else.
  // This also allows differentiating on the dashboard between allocations
  // failed due to a corrupt allocator and the number of process instances
  // with one, the latter being idicated by "newly corrupt", below.
  if (memory_allocator_->IsCorrupt()) {
    RecordCreateHistogramResult(CREATE_HISTOGRAM_ALLOCATOR_CORRUPT);
    return nullptr;
  }

  // If CalculateRequiredCountsBytes() returns zero then the bucket_count
  // was not valid.
  size_t bucket_count = bucket_ranges->bucket_count();
  size_t counts_bytes = CalculateRequiredCountsBytes(bucket_count);
  if (!counts_bytes) {
    NOTREACHED();
    return nullptr;
  }

  size_t ranges_bytes = (bucket_count + 1) * sizeof(HistogramBase::Sample);
  PersistentMemoryAllocator::Reference ranges_ref =
      memory_allocator_->Allocate(ranges_bytes, kTypeIdRangesArray);
  PersistentMemoryAllocator::Reference counts_ref =
      memory_allocator_->Allocate(counts_bytes, kTypeIdCountsArray);
  PersistentMemoryAllocator::Reference histogram_ref =
      memory_allocator_->Allocate(
          offsetof(PersistentHistogramData, name) + name.length() + 1,
          kTypeIdHistogram);
  HistogramBase::Sample* ranges_data =
      memory_allocator_->GetAsObject<HistogramBase::Sample>(ranges_ref,
                                                            kTypeIdRangesArray);
  PersistentHistogramData* histogram_data =
      memory_allocator_->GetAsObject<PersistentHistogramData>(histogram_ref,
                                                              kTypeIdHistogram);

  // Only continue here if all allocations were successful. If they weren't,
  // there is no way to free the space but that's not really a problem since
  // the allocations only fail because the space is full or corrupt and so
  // any future attempts will also fail.
  if (counts_ref && ranges_data && histogram_data) {
    strcpy(histogram_data->name, name.c_str());
    for (size_t i = 0; i < bucket_ranges->size(); ++i)
      ranges_data[i] = bucket_ranges->range(i);

    histogram_data->histogram_type = histogram_type;
    histogram_data->flags = flags;
    histogram_data->minimum = minimum;
    histogram_data->maximum = maximum;
    histogram_data->bucket_count = static_cast<uint32_t>(bucket_count);
    histogram_data->ranges_ref = ranges_ref;
    histogram_data->ranges_checksum = bucket_ranges->checksum();
    histogram_data->counts_ref = counts_ref;

    // Create the histogram using resources in persistent memory. This ends up
    // resolving the "ref" values stored in histogram_data instad of just
    // using what is already known above but avoids duplicating the switch
    // statement here and serves as a double-check that everything is
    // correct before commiting the new histogram to persistent space.
    scoped_ptr<HistogramBase> histogram = CreateHistogram(histogram_data);
    DCHECK(histogram);
    if (ref_ptr != nullptr)
      *ref_ptr = histogram_ref;

    // By storing the reference within the allocator to this histogram, the
    // next import (which will happen before the next histogram creation)
    // will know to skip it. See also the comment in ImportGlobalHistograms().
    subtle::NoBarrier_Store(&last_created_, histogram_ref);
    return histogram;
  }

  CreateHistogramResultType result;
  if (memory_allocator_->IsCorrupt()) {
    RecordCreateHistogramResult(CREATE_HISTOGRAM_ALLOCATOR_NEWLY_CORRUPT);
    result = CREATE_HISTOGRAM_ALLOCATOR_CORRUPT;
  } else if (memory_allocator_->IsFull()) {
    result = CREATE_HISTOGRAM_ALLOCATOR_FULL;
  } else {
    result = CREATE_HISTOGRAM_ALLOCATOR_ERROR;
  }
  RecordCreateHistogramResult(result);
  NOTREACHED() << "error=" << result;

  return nullptr;
}

// static
void PersistentHistogramAllocator::ImportGlobalHistograms() {
  // The lock protects against concurrent access to the iterator and is created
  // in a thread-safe manner when needed.
  static base::LazyInstance<base::Lock>::Leaky lock = LAZY_INSTANCE_INITIALIZER;

  if (g_allocator) {
    // TODO(bcwhite): Investigate a lock-free, thread-safe iterator.
    base::AutoLock auto_lock(lock.Get());

    // Each call resumes from where it last left off so a persistant iterator
    // is needed. This class has a constructor so even the definition has to
    // be protected by the lock in order to be thread-safe.
    static Iterator iter;
    if (iter.is_clear())
      g_allocator->CreateIterator(&iter);

    // Skip the import if it's the histogram that was last created. Should a
    // race condition cause the "last created" to be overwritten before it
    // is recognized here then the histogram will be created and be ignored
    // when it is detected as a duplicate by the statistics-recorder. This
    // simple check reduces the time of creating persistent histograms by
    // about 40%.
    Reference last_created =
        subtle::NoBarrier_Load(&g_allocator->last_created_);

    while (true) {
      scoped_ptr<HistogramBase> histogram =
          g_allocator->GetNextHistogramWithIgnore(&iter, last_created);
      if (!histogram)
        break;
      StatisticsRecorder::RegisterOrDeleteDuplicate(histogram.release());
    }
  }
}

}  // namespace base