summaryrefslogtreecommitdiffstats
path: root/base/metrics
diff options
context:
space:
mode:
authorbrettw@chromium.org <brettw@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-10-14 04:38:38 +0000
committerbrettw@chromium.org <brettw@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-10-14 04:38:38 +0000
commit835d7c811c98f179090c57a827a9c9baa2130435 (patch)
treeedcff6b2c9029c6f867f650d762837f2485d99fb /base/metrics
parent4b1deac7ba7e7a6bf0425e6ed4db26e0c29daa7f (diff)
downloadchromium_src-835d7c811c98f179090c57a827a9c9baa2130435.zip
chromium_src-835d7c811c98f179090c57a827a9c9baa2130435.tar.gz
chromium_src-835d7c811c98f179090c57a827a9c9baa2130435.tar.bz2
Move Stats, histograms, and field trial into a metrics subdirectory of base and
put them in the base namespace. TEST=it compiles BUG=none git-svn-id: svn://svn.chromium.org/chrome/trunk/src@62510 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'base/metrics')
-rw-r--r--base/metrics/field_trial.cc193
-rw-r--r--base/metrics/field_trial.h238
-rw-r--r--base/metrics/field_trial_unittest.cc233
-rw-r--r--base/metrics/histogram.cc935
-rw-r--r--base/metrics/histogram.h643
-rw-r--r--base/metrics/histogram_unittest.cc311
-rw-r--r--base/metrics/stats_counters.cc117
-rw-r--r--base/metrics/stats_counters.h196
-rw-r--r--base/metrics/stats_table.cc553
-rw-r--r--base/metrics/stats_table.h196
-rw-r--r--base/metrics/stats_table_unittest.cc411
11 files changed, 4026 insertions, 0 deletions
diff --git a/base/metrics/field_trial.cc b/base/metrics/field_trial.cc
new file mode 100644
index 0000000..63d9ed5
--- /dev/null
+++ b/base/metrics/field_trial.cc
@@ -0,0 +1,193 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/field_trial.h"
+
+#include "base/logging.h"
+#include "base/rand_util.h"
+#include "base/stringprintf.h"
+
+namespace base {
+
+// static
+const int FieldTrial::kNotParticipating = -1;
+
+// static
+const int FieldTrial::kAllRemainingProbability = -2;
+
+// static
+const char FieldTrialList::kPersistentStringSeparator('/');
+
+static const char kHistogramFieldTrialSeparator('_');
+
+//------------------------------------------------------------------------------
+// FieldTrial methods and members.
+
+FieldTrial::FieldTrial(const std::string& name,
+ const Probability total_probability)
+ : name_(name),
+ divisor_(total_probability),
+ random_(static_cast<Probability>(divisor_ * base::RandDouble())),
+ accumulated_group_probability_(0),
+ next_group_number_(0),
+ group_(kNotParticipating) {
+ FieldTrialList::Register(this);
+}
+
+int FieldTrial::AppendGroup(const std::string& name,
+ Probability group_probability) {
+ DCHECK(group_probability <= divisor_);
+ DCHECK(group_probability >=0 ||
+ group_probability == kAllRemainingProbability);
+ if (group_probability == kAllRemainingProbability)
+ accumulated_group_probability_ = divisor_;
+ else
+ accumulated_group_probability_ += group_probability;
+ DCHECK(accumulated_group_probability_ <= divisor_);
+ if (group_ == kNotParticipating && accumulated_group_probability_ > random_) {
+ // This is the group that crossed the random line, so we do the assignment.
+ group_ = next_group_number_;
+ if (name.empty())
+ base::StringAppendF(&group_name_, "%d", group_);
+ else
+ group_name_ = name;
+ }
+ return next_group_number_++;
+}
+
+// static
+std::string FieldTrial::MakeName(const std::string& name_prefix,
+ const std::string& trial_name) {
+ std::string big_string(name_prefix);
+ big_string.append(1, kHistogramFieldTrialSeparator);
+ return big_string.append(FieldTrialList::FindFullName(trial_name));
+}
+
+FieldTrial::~FieldTrial() {}
+
+//------------------------------------------------------------------------------
+// FieldTrialList methods and members.
+
+// static
+FieldTrialList* FieldTrialList::global_ = NULL;
+
+// static
+bool FieldTrialList::register_without_global_ = false;
+
+FieldTrialList::FieldTrialList() : application_start_time_(TimeTicks::Now()) {
+ DCHECK(!global_);
+ DCHECK(!register_without_global_);
+ global_ = this;
+}
+
+FieldTrialList::~FieldTrialList() {
+ AutoLock auto_lock(lock_);
+ while (!registered_.empty()) {
+ RegistrationList::iterator it = registered_.begin();
+ it->second->Release();
+ registered_.erase(it->first);
+ }
+ DCHECK(this == global_);
+ global_ = NULL;
+}
+
+// static
+void FieldTrialList::Register(FieldTrial* trial) {
+ if (!global_) {
+ register_without_global_ = true;
+ return;
+ }
+ AutoLock auto_lock(global_->lock_);
+ DCHECK(!global_->PreLockedFind(trial->name()));
+ trial->AddRef();
+ global_->registered_[trial->name()] = trial;
+}
+
+// static
+int FieldTrialList::FindValue(const std::string& name) {
+ FieldTrial* field_trial = Find(name);
+ if (field_trial)
+ return field_trial->group();
+ return FieldTrial::kNotParticipating;
+}
+
+// static
+std::string FieldTrialList::FindFullName(const std::string& name) {
+ FieldTrial* field_trial = Find(name);
+ if (field_trial)
+ return field_trial->group_name();
+ return "";
+}
+
+// static
+FieldTrial* FieldTrialList::Find(const std::string& name) {
+ if (!global_)
+ return NULL;
+ AutoLock auto_lock(global_->lock_);
+ return global_->PreLockedFind(name);
+}
+
+FieldTrial* FieldTrialList::PreLockedFind(const std::string& name) {
+ RegistrationList::iterator it = registered_.find(name);
+ if (registered_.end() == it)
+ return NULL;
+ return it->second;
+}
+
+// static
+void FieldTrialList::StatesToString(std::string* output) {
+ if (!global_)
+ return;
+ DCHECK(output->empty());
+ AutoLock auto_lock(global_->lock_);
+ for (RegistrationList::iterator it = global_->registered_.begin();
+ it != global_->registered_.end(); ++it) {
+ const std::string name = it->first;
+ const std::string group_name = it->second->group_name();
+ if (group_name.empty())
+ continue; // No definitive winner in this trial.
+ DCHECK_EQ(name.find(kPersistentStringSeparator), std::string::npos);
+ DCHECK_EQ(group_name.find(kPersistentStringSeparator), std::string::npos);
+ output->append(name);
+ output->append(1, kPersistentStringSeparator);
+ output->append(group_name);
+ output->append(1, kPersistentStringSeparator);
+ }
+}
+
+// static
+bool FieldTrialList::StringAugmentsState(const std::string& prior_state) {
+ DCHECK(global_);
+ if (prior_state.empty() || !global_)
+ return true;
+
+ size_t next_item = 0;
+ while (next_item < prior_state.length()) {
+ size_t name_end = prior_state.find(kPersistentStringSeparator, next_item);
+ if (name_end == prior_state.npos || next_item == name_end)
+ return false;
+ size_t group_name_end = prior_state.find(kPersistentStringSeparator,
+ name_end + 1);
+ if (group_name_end == prior_state.npos || name_end + 1 == group_name_end)
+ return false;
+ std::string name(prior_state, next_item, name_end - next_item);
+ std::string group_name(prior_state, name_end + 1,
+ group_name_end - name_end - 1);
+ next_item = group_name_end + 1;
+
+ FieldTrial *field_trial(FieldTrialList::Find(name));
+ if (field_trial) {
+ // In single process mode, we may have already created the field trial.
+ if (field_trial->group_name() != group_name)
+ return false;
+ continue;
+ }
+ const int kTotalProbability = 100;
+ field_trial = new FieldTrial(name, kTotalProbability);
+ field_trial->AppendGroup(group_name, kTotalProbability);
+ }
+ return true;
+}
+
+} // namespace base
diff --git a/base/metrics/field_trial.h b/base/metrics/field_trial.h
new file mode 100644
index 0000000..348a1a7
--- /dev/null
+++ b/base/metrics/field_trial.h
@@ -0,0 +1,238 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// FieldTrial is a class for handling details of statistical experiments
+// performed by actual users in the field (i.e., in a shipped or beta product).
+// All code is called exclusively on the UI thread currently.
+//
+// The simplest example is an experiment to see whether one of two options
+// produces "better" results across our user population. In that scenario, UMA
+// data is uploaded to aggregate the test results, and this FieldTrial class
+// manages the state of each such experiment (state == which option was
+// pseudo-randomly selected).
+//
+// States are typically generated randomly, either based on a one time
+// randomization (generated randomly once, and then persistently reused in the
+// client during each future run of the program), or by a startup randomization
+// (generated each time the application starts up, but held constant during the
+// duration of the process), or by continuous randomization across a run (where
+// the state can be recalculated again and again, many times during a process).
+// Only startup randomization is implemented thus far.
+
+//------------------------------------------------------------------------------
+// Example: Suppose we have an experiment involving memory, such as determining
+// the impact of some pruning algorithm.
+// We assume that we already have a histogram of memory usage, such as:
+
+// HISTOGRAM_COUNTS("Memory.RendererTotal", count);
+
+// Somewhere in main thread initialization code, we'd probably define an
+// instance of a FieldTrial, with code such as:
+
+// // Note, FieldTrials are reference counted, and persist automagically until
+// // process teardown, courtesy of their automatic registration in
+// // FieldTrialList.
+// scoped_refptr<FieldTrial> trial = new FieldTrial("MemoryExperiment", 1000);
+// int group1 = trial->AppendGroup("high_mem", 20); // 2% in high_mem group.
+// int group2 = trial->AppendGroup("low_mem", 20); // 2% in low_mem group.
+// // Take action depending of which group we randomly land in.
+// if (trial->group() == group1)
+// SetPruningAlgorithm(kType1); // Sample setting of browser state.
+// else if (trial->group() == group2)
+// SetPruningAlgorithm(kType2); // Sample alternate setting.
+
+// We then modify any histograms we wish to correlate with our experiment to
+// have slighly different names, depending on what group the trial instance
+// happened (randomly) to be assigned to:
+
+// HISTOGRAM_COUNTS(FieldTrial::MakeName("Memory.RendererTotal",
+// "MemoryExperiment").data(), count);
+
+// The above code will create 3 distinct histograms, with each run of the
+// application being assigned to of of the three groups, and for each group, the
+// correspondingly named histogram will be populated:
+
+// Memory.RendererTotal // 96% of users still fill this histogram.
+// Memory.RendererTotal_high_mem // 2% of users will fill this histogram.
+// Memory.RendererTotal_low_mem // 2% of users will fill this histogram.
+
+//------------------------------------------------------------------------------
+
+#ifndef BASE_METRICS_FIELD_TRIAL_H_
+#define BASE_METRICS_FIELD_TRIAL_H_
+#pragma once
+
+#include <map>
+#include <string>
+
+#include "base/lock.h"
+#include "base/ref_counted.h"
+#include "base/time.h"
+
+namespace base {
+
+class FieldTrial : public RefCounted<FieldTrial> {
+ public:
+ typedef int Probability; // Probability type for being selected in a trial.
+
+ // A return value to indicate that a given instance has not yet had a group
+ // assignment (and hence is not yet participating in the trial).
+ static const int kNotParticipating;
+
+ // Provide an easy way to assign all remaining probability to a group. Note
+ // that this will force an instance to participate, and make it illegal to
+ // attempt to probabalistically add any other groups to the trial. When doing
+ // A/B tests with timings, it is often best to define all groups, so that
+ // histograms will get unique names via the MakeName() methods.
+ static const Probability kAllRemainingProbability;
+
+ // The name is used to register the instance with the FieldTrialList class,
+ // and can be used to find the trial (only one trial can be present for each
+ // name).
+ // Group probabilities that are later supplied must sum to less than or equal
+ // to the total_probability.
+ FieldTrial(const std::string& name, Probability total_probability);
+
+ // Establish the name and probability of the next group in this trial.
+ // Sometimes, based on construction randomization, this call may causes the
+ // provided group to be *THE* group selected for use in this instance.
+ int AppendGroup(const std::string& name, Probability group_probability);
+
+ // Return the name of the FieldTrial (excluding the group name).
+ std::string name() const { return name_; }
+
+ // Return the randomly selected group number that was assigned.
+ // Return kNotParticipating if the instance is not participating in the
+ // experiment.
+ int group() const { return group_; }
+
+ // If the field trial is not in an experiment, this returns the empty string.
+ // if the group's name is empty, a name of "_" concatenated with the group
+ // number is used as the group name.
+ std::string group_name() const { return group_name_; }
+
+ // Helper function for the most common use: as an argument to specifiy the
+ // name of a HISTOGRAM. Use the original histogram name as the name_prefix.
+ static std::string MakeName(const std::string& name_prefix,
+ const std::string& trial_name);
+
+ private:
+ friend class RefCounted<FieldTrial>;
+
+ virtual ~FieldTrial();
+
+ // The name of the field trial, as can be found via the FieldTrialList.
+ // This is empty of the trial is not in the experiment.
+ const std::string name_;
+
+ // The maximum sum of all probabilities supplied, which corresponds to 100%.
+ // This is the scaling factor used to adjust supplied probabilities.
+ Probability divisor_;
+
+ // The randomly selected probability that is used to select a group (or have
+ // the instance not participate). It is the product of divisor_ and a random
+ // number between [0, 1).
+ Probability random_;
+
+ // Sum of the probabilities of all appended groups.
+ Probability accumulated_group_probability_;
+
+ int next_group_number_;
+
+ // The pseudo-randomly assigned group number.
+ // This is kNotParticipating if no group has been assigned.
+ int group_;
+
+ // A textual name for the randomly selected group, including the Trial name.
+ // If this Trial is not a member of an group, this string is empty.
+ std::string group_name_;
+
+ DISALLOW_COPY_AND_ASSIGN(FieldTrial);
+};
+
+//------------------------------------------------------------------------------
+// Class with a list of all active field trials. A trial is active if it has
+// been registered, which includes evaluating its state based on its probaility.
+// Only one instance of this class exists.
+class FieldTrialList {
+ public:
+ // Define a separator charactor to use when creating a persistent form of an
+ // instance. This is intended for use as a command line argument, passed to a
+ // second process to mimic our state (i.e., provide the same group name).
+ static const char kPersistentStringSeparator; // Currently a slash.
+
+ // This singleton holds the global list of registered FieldTrials.
+ FieldTrialList();
+ // Destructor Release()'s references to all registered FieldTrial instances.
+ ~FieldTrialList();
+
+ // Register() stores a pointer to the given trial in a global map.
+ // This method also AddRef's the indicated trial.
+ static void Register(FieldTrial* trial);
+
+ // The Find() method can be used to test to see if a named Trial was already
+ // registered, or to retrieve a pointer to it from the global map.
+ static FieldTrial* Find(const std::string& name);
+
+ static int FindValue(const std::string& name);
+
+ static std::string FindFullName(const std::string& name);
+
+ // Create a persistent representation of all FieldTrial instances for
+ // resurrection in another process. This allows randomization to be done in
+ // one process, and secondary processes can by synchronized on the result.
+ // The resulting string contains only the names, the trial name, and a "/"
+ // separator.
+ static void StatesToString(std::string* output);
+
+ // Use a previously generated state string (re: StatesToString()) augment the
+ // current list of field tests to include the supplied tests, and using a 100%
+ // probability for each test, force them to have the same group string. This
+ // is commonly used in a sub-process, to carry randomly selected state in a
+ // parent process into this sub-process.
+ // Currently only the group_name_ and name_ are restored.
+ static bool StringAugmentsState(const std::string& prior_state);
+
+ // The time of construction of the global map is recorded in a static variable
+ // and is commonly used by experiments to identify the time since the start
+ // of the application. In some experiments it may be useful to discount
+ // data that is gathered before the application has reached sufficient
+ // stability (example: most DLL have loaded, etc.)
+ static TimeTicks application_start_time() {
+ if (global_)
+ return global_->application_start_time_;
+ // For testing purposes only, or when we don't yet have a start time.
+ return TimeTicks::Now();
+ }
+
+ private:
+ // Helper function should be called only while holding lock_.
+ FieldTrial* PreLockedFind(const std::string& name);
+
+ // A map from FieldTrial names to the actual instances.
+ typedef std::map<std::string, FieldTrial*> RegistrationList;
+
+ static FieldTrialList* global_; // The singleton of this class.
+
+ // This will tell us if there is an attempt to register a field trial without
+ // creating the FieldTrialList. This is not an error, unless a FieldTrialList
+ // is created after that.
+ static bool register_without_global_;
+
+ // A helper value made availabel to users, that shows when the FieldTrialList
+ // was initialized. Note that this is a singleton instance, and hence is a
+ // good approximation to the start of the process.
+ TimeTicks application_start_time_;
+
+ // Lock for access to registered_.
+ Lock lock_;
+ RegistrationList registered_;
+
+ DISALLOW_COPY_AND_ASSIGN(FieldTrialList);
+};
+
+} // namespace base
+
+#endif // BASE_METRICS_FIELD_TRIAL_H_
+
diff --git a/base/metrics/field_trial_unittest.cc b/base/metrics/field_trial_unittest.cc
new file mode 100644
index 0000000..aea8e85
--- /dev/null
+++ b/base/metrics/field_trial_unittest.cc
@@ -0,0 +1,233 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Test of FieldTrial class
+
+#include "base/metrics/field_trial.h"
+
+#include "base/stringprintf.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+class FieldTrialTest : public testing::Test {
+ public:
+ FieldTrialTest() : trial_list_() { }
+
+ private:
+ FieldTrialList trial_list_;
+};
+
+// Test registration, and also check that destructors are called for trials
+// (and that Purify doesn't catch us leaking).
+TEST_F(FieldTrialTest, Registration) {
+ const char* name1 = "name 1 test";
+ const char* name2 = "name 2 test";
+ EXPECT_FALSE(FieldTrialList::Find(name1));
+ EXPECT_FALSE(FieldTrialList::Find(name2));
+
+ FieldTrial* trial1 = new FieldTrial(name1, 10);
+ EXPECT_EQ(FieldTrial::kNotParticipating, trial1->group());
+ EXPECT_EQ(name1, trial1->name());
+ EXPECT_EQ("", trial1->group_name());
+
+ trial1->AppendGroup("", 7);
+
+ EXPECT_EQ(trial1, FieldTrialList::Find(name1));
+ EXPECT_FALSE(FieldTrialList::Find(name2));
+
+ FieldTrial* trial2 = new FieldTrial(name2, 10);
+ EXPECT_EQ(FieldTrial::kNotParticipating, trial2->group());
+ EXPECT_EQ(name2, trial2->name());
+ EXPECT_EQ("", trial2->group_name());
+
+ trial2->AppendGroup("a first group", 7);
+
+ EXPECT_EQ(trial1, FieldTrialList::Find(name1));
+ EXPECT_EQ(trial2, FieldTrialList::Find(name2));
+ // Note: FieldTrialList should delete the objects at shutdown.
+}
+
+TEST_F(FieldTrialTest, AbsoluteProbabilities) {
+ char always_true[] = " always true";
+ char always_false[] = " always false";
+ for (int i = 1; i < 250; ++i) {
+ // Try lots of names, by changing the first character of the name.
+ always_true[0] = i;
+ always_false[0] = i;
+
+ FieldTrial* trial_true = new FieldTrial(always_true, 10);
+ const std::string winner = "TheWinner";
+ int winner_group = trial_true->AppendGroup(winner, 10);
+
+ EXPECT_EQ(winner_group, trial_true->group());
+ EXPECT_EQ(winner, trial_true->group_name());
+
+ FieldTrial* trial_false = new FieldTrial(always_false, 10);
+ int loser_group = trial_false->AppendGroup("ALoser", 0);
+
+ EXPECT_NE(loser_group, trial_false->group());
+ }
+}
+
+TEST_F(FieldTrialTest, RemainingProbability) {
+ // First create a test that hasn't had a winner yet.
+ const std::string winner = "Winner";
+ const std::string loser = "Loser";
+ scoped_refptr<FieldTrial> trial;
+ int counter = 0;
+ do {
+ std::string name = StringPrintf("trial%d", ++counter);
+ trial = new FieldTrial(name, 10);
+ trial->AppendGroup(loser, 5); // 50% chance of not being chosen.
+ } while (trial->group() != FieldTrial::kNotParticipating);
+
+ // Now add a winner with all remaining probability.
+ trial->AppendGroup(winner, FieldTrial::kAllRemainingProbability);
+
+ // And that winner should ALWAYS win.
+ EXPECT_EQ(winner, trial->group_name());
+}
+
+TEST_F(FieldTrialTest, FiftyFiftyProbability) {
+ // Check that even with small divisors, we have the proper probabilities, and
+ // all outcomes are possible. Since this is a 50-50 test, it should get both
+ // outcomes in a few tries, but we'll try no more than 100 times (and be flaky
+ // with probability around 1 in 2^99).
+ bool first_winner = false;
+ bool second_winner = false;
+ int counter = 0;
+ do {
+ std::string name = base::StringPrintf("FiftyFifty%d", ++counter);
+ scoped_refptr<FieldTrial> trial = new FieldTrial(name, 2);
+ trial->AppendGroup("first", 1); // 50% chance of being chosen.
+ if (trial->group() != FieldTrial::kNotParticipating) {
+ first_winner = true;
+ continue;
+ }
+ trial->AppendGroup("second", 1); // Always chosen at this point.
+ EXPECT_NE(FieldTrial::kNotParticipating, trial->group());
+ second_winner = true;
+ } while ((!second_winner || !first_winner) && counter < 100);
+ EXPECT_TRUE(second_winner);
+ EXPECT_TRUE(first_winner);
+}
+
+TEST_F(FieldTrialTest, MiddleProbabilities) {
+ char name[] = " same name";
+ bool false_event_seen = false;
+ bool true_event_seen = false;
+ for (int i = 1; i < 250; ++i) {
+ name[0] = i;
+ FieldTrial* trial = new FieldTrial(name, 10);
+ int might_win = trial->AppendGroup("MightWin", 5);
+
+ if (trial->group() == might_win) {
+ true_event_seen = true;
+ } else {
+ false_event_seen = true;
+ }
+ if (false_event_seen && true_event_seen)
+ return; // Successful test!!!
+ }
+ // Very surprising to get here. Probability should be around 1 in 2 ** 250.
+ // One of the following will fail.
+ EXPECT_TRUE(false_event_seen);
+ EXPECT_TRUE(true_event_seen);
+}
+
+TEST_F(FieldTrialTest, OneWinner) {
+ char name[] = "Some name";
+ int group_count(10);
+
+ FieldTrial* trial = new FieldTrial(name, group_count);
+ int winner_index(-2);
+ std::string winner_name;
+
+ for (int i = 1; i <= group_count; ++i) {
+ int might_win = trial->AppendGroup("", 1);
+
+ if (trial->group() == might_win) {
+ EXPECT_EQ(-2, winner_index);
+ winner_index = might_win;
+ StringAppendF(&winner_name, "%d", might_win);
+ EXPECT_EQ(winner_name, trial->group_name());
+ }
+ }
+ EXPECT_GE(winner_index, 0);
+ EXPECT_EQ(trial->group(), winner_index);
+ EXPECT_EQ(trial->group_name(), winner_name);
+}
+
+TEST_F(FieldTrialTest, Save) {
+ std::string save_string;
+
+ FieldTrial* trial = new FieldTrial("Some name", 10);
+ // There is no winner yet, so no textual group name is associated with trial.
+ EXPECT_EQ("", trial->group_name());
+ FieldTrialList::StatesToString(&save_string);
+ EXPECT_EQ("", save_string);
+ save_string.clear();
+
+ // Create a winning group.
+ trial->AppendGroup("Winner", 10);
+ FieldTrialList::StatesToString(&save_string);
+ EXPECT_EQ("Some name/Winner/", save_string);
+ save_string.clear();
+
+ // Create a second trial and winning group.
+ FieldTrial* trial2 = new FieldTrial("xxx", 10);
+ trial2->AppendGroup("yyyy", 10);
+
+ FieldTrialList::StatesToString(&save_string);
+ // We assume names are alphabetized... though this is not critical.
+ EXPECT_EQ("Some name/Winner/xxx/yyyy/", save_string);
+}
+
+TEST_F(FieldTrialTest, Restore) {
+ EXPECT_TRUE(FieldTrialList::Find("Some_name") == NULL);
+ EXPECT_TRUE(FieldTrialList::Find("xxx") == NULL);
+
+ FieldTrialList::StringAugmentsState("Some_name/Winner/xxx/yyyy/");
+
+ FieldTrial* trial = FieldTrialList::Find("Some_name");
+ ASSERT_NE(static_cast<FieldTrial*>(NULL), trial);
+ EXPECT_EQ("Winner", trial->group_name());
+ EXPECT_EQ("Some_name", trial->name());
+
+ trial = FieldTrialList::Find("xxx");
+ ASSERT_NE(static_cast<FieldTrial*>(NULL), trial);
+ EXPECT_EQ("yyyy", trial->group_name());
+ EXPECT_EQ("xxx", trial->name());
+}
+
+TEST_F(FieldTrialTest, BogusRestore) {
+ EXPECT_FALSE(FieldTrialList::StringAugmentsState("MissingSlash"));
+ EXPECT_FALSE(FieldTrialList::StringAugmentsState("MissingGroupName/"));
+ EXPECT_FALSE(FieldTrialList::StringAugmentsState("MissingFinalSlash/gname"));
+ EXPECT_FALSE(FieldTrialList::StringAugmentsState("/noname, only group/"));
+}
+
+TEST_F(FieldTrialTest, DuplicateRestore) {
+ FieldTrial* trial = new FieldTrial("Some name", 10);
+ trial->AppendGroup("Winner", 10);
+ std::string save_string;
+ FieldTrialList::StatesToString(&save_string);
+ EXPECT_EQ("Some name/Winner/", save_string);
+
+ // It is OK if we redundantly specify a winner.
+ EXPECT_TRUE(FieldTrialList::StringAugmentsState(save_string));
+
+ // But it is an error to try to change to a different winner.
+ EXPECT_FALSE(FieldTrialList::StringAugmentsState("Some name/Loser/"));
+}
+
+TEST_F(FieldTrialTest, MakeName) {
+ FieldTrial* trial = new FieldTrial("Field Trial", 10);
+ trial->AppendGroup("Winner", 10);
+ EXPECT_EQ("Histogram_Winner",
+ FieldTrial::MakeName("Histogram", "Field Trial"));
+}
+
+} // namespace base
diff --git a/base/metrics/histogram.cc b/base/metrics/histogram.cc
new file mode 100644
index 0000000..9746e9c
--- /dev/null
+++ b/base/metrics/histogram.cc
@@ -0,0 +1,935 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Histogram is an object that aggregates statistics, and can summarize them in
+// various forms, including ASCII graphical, HTML, and numerically (as a
+// vector of numbers corresponding to each of the aggregating buckets).
+// See header file for details and examples.
+
+#include "base/metrics/histogram.h"
+
+#include <math.h>
+
+#include <algorithm>
+#include <string>
+
+#include "base/lock.h"
+#include "base/logging.h"
+#include "base/pickle.h"
+#include "base/stringprintf.h"
+
+namespace base {
+
+typedef Histogram::Count Count;
+
+scoped_refptr<Histogram> Histogram::FactoryGet(const std::string& name,
+ Sample minimum, Sample maximum, size_t bucket_count, Flags flags) {
+ scoped_refptr<Histogram> histogram(NULL);
+
+ // Defensive code.
+ if (minimum <= 0)
+ minimum = 1;
+ if (maximum >= kSampleType_MAX)
+ maximum = kSampleType_MAX - 1;
+
+ if (!StatisticsRecorder::FindHistogram(name, &histogram)) {
+ histogram = new Histogram(name, minimum, maximum, bucket_count);
+ StatisticsRecorder::FindHistogram(name, &histogram);
+ }
+
+ DCHECK(HISTOGRAM == histogram->histogram_type());
+ DCHECK(histogram->HasConstructorArguments(minimum, maximum, bucket_count));
+ histogram->SetFlags(flags);
+ return histogram;
+}
+
+scoped_refptr<Histogram> Histogram::FactoryTimeGet(const std::string& name,
+ TimeDelta minimum,
+ TimeDelta maximum,
+ size_t bucket_count,
+ Flags flags) {
+ return FactoryGet(name, minimum.InMilliseconds(), maximum.InMilliseconds(),
+ bucket_count, flags);
+}
+
+Histogram::Histogram(const std::string& name, Sample minimum,
+ Sample maximum, size_t bucket_count)
+ : histogram_name_(name),
+ declared_min_(minimum),
+ declared_max_(maximum),
+ bucket_count_(bucket_count),
+ flags_(kNoFlags),
+ ranges_(bucket_count + 1, 0),
+ sample_() {
+ Initialize();
+}
+
+Histogram::Histogram(const std::string& name, TimeDelta minimum,
+ TimeDelta maximum, size_t bucket_count)
+ : histogram_name_(name),
+ declared_min_(static_cast<int> (minimum.InMilliseconds())),
+ declared_max_(static_cast<int> (maximum.InMilliseconds())),
+ bucket_count_(bucket_count),
+ flags_(kNoFlags),
+ ranges_(bucket_count + 1, 0),
+ sample_() {
+ Initialize();
+}
+
+Histogram::~Histogram() {
+ if (StatisticsRecorder::dump_on_exit()) {
+ std::string output;
+ WriteAscii(true, "\n", &output);
+ LOG(INFO) << output;
+ }
+
+ // Just to make sure most derived class did this properly...
+ DCHECK(ValidateBucketRanges());
+}
+
+bool Histogram::PrintEmptyBucket(size_t index) const {
+ return true;
+}
+
+void Histogram::Add(int value) {
+ if (value >= kSampleType_MAX)
+ value = kSampleType_MAX - 1;
+ if (value < 0)
+ value = 0;
+ size_t index = BucketIndex(value);
+ DCHECK(value >= ranges(index));
+ DCHECK(value < ranges(index + 1));
+ Accumulate(value, 1, index);
+}
+
+void Histogram::AddBoolean(bool value) {
+ DCHECK(false);
+}
+
+void Histogram::AddSampleSet(const SampleSet& sample) {
+ sample_.Add(sample);
+}
+
+void Histogram::SetRangeDescriptions(const DescriptionPair descriptions[]) {
+ DCHECK(false);
+}
+
+// The following methods provide a graphical histogram display.
+void Histogram::WriteHTMLGraph(std::string* output) const {
+ // TBD(jar) Write a nice HTML bar chart, with divs an mouse-overs etc.
+ output->append("<PRE>");
+ WriteAscii(true, "<br>", output);
+ output->append("</PRE>");
+}
+
+void Histogram::WriteAscii(bool graph_it, const std::string& newline,
+ std::string* output) const {
+ // Get local (stack) copies of all effectively volatile class data so that we
+ // are consistent across our output activities.
+ SampleSet snapshot;
+ SnapshotSample(&snapshot);
+ Count sample_count = snapshot.TotalCount();
+
+ WriteAsciiHeader(snapshot, sample_count, output);
+ output->append(newline);
+
+ // Prepare to normalize graphical rendering of bucket contents.
+ double max_size = 0;
+ if (graph_it)
+ max_size = GetPeakBucketSize(snapshot);
+
+ // Calculate space needed to print bucket range numbers. Leave room to print
+ // nearly the largest bucket range without sliding over the histogram.
+ size_t largest_non_empty_bucket = bucket_count() - 1;
+ while (0 == snapshot.counts(largest_non_empty_bucket)) {
+ if (0 == largest_non_empty_bucket)
+ break; // All buckets are empty.
+ --largest_non_empty_bucket;
+ }
+
+ // Calculate largest print width needed for any of our bucket range displays.
+ size_t print_width = 1;
+ for (size_t i = 0; i < bucket_count(); ++i) {
+ if (snapshot.counts(i)) {
+ size_t width = GetAsciiBucketRange(i).size() + 1;
+ if (width > print_width)
+ print_width = width;
+ }
+ }
+
+ int64 remaining = sample_count;
+ int64 past = 0;
+ // Output the actual histogram graph.
+ for (size_t i = 0; i < bucket_count(); ++i) {
+ Count current = snapshot.counts(i);
+ if (!current && !PrintEmptyBucket(i))
+ continue;
+ remaining -= current;
+ std::string range = GetAsciiBucketRange(i);
+ output->append(range);
+ for (size_t j = 0; range.size() + j < print_width + 1; ++j)
+ output->push_back(' ');
+ if (0 == current && i < bucket_count() - 1 && 0 == snapshot.counts(i + 1)) {
+ while (i < bucket_count() - 1 && 0 == snapshot.counts(i + 1))
+ ++i;
+ output->append("... ");
+ output->append(newline);
+ continue; // No reason to plot emptiness.
+ }
+ double current_size = GetBucketSize(current, i);
+ if (graph_it)
+ WriteAsciiBucketGraph(current_size, max_size, output);
+ WriteAsciiBucketContext(past, current, remaining, i, output);
+ output->append(newline);
+ past += current;
+ }
+ DCHECK(past == sample_count);
+}
+
+bool Histogram::ValidateBucketRanges() const {
+ // Standard assertions that all bucket ranges should satisfy.
+ DCHECK(ranges_.size() == bucket_count_ + 1);
+ DCHECK_EQ(ranges_[0], 0);
+ DCHECK(declared_min() == ranges_[1]);
+ DCHECK(declared_max() == ranges_[bucket_count_ - 1]);
+ DCHECK(kSampleType_MAX == ranges_[bucket_count_]);
+ return true;
+}
+
+void Histogram::Initialize() {
+ sample_.Resize(*this);
+ if (declared_min_ <= 0)
+ declared_min_ = 1;
+ if (declared_max_ >= kSampleType_MAX)
+ declared_max_ = kSampleType_MAX - 1;
+ DCHECK(declared_min_ <= declared_max_);
+ DCHECK_GT(bucket_count_, 1u);
+ size_t maximal_bucket_count = declared_max_ - declared_min_ + 2;
+ DCHECK(bucket_count_ <= maximal_bucket_count);
+ DCHECK_EQ(ranges_[0], 0);
+ ranges_[bucket_count_] = kSampleType_MAX;
+ InitializeBucketRange();
+ DCHECK(ValidateBucketRanges());
+ StatisticsRecorder::Register(this);
+}
+
+// Calculate what range of values are held in each bucket.
+// We have to be careful that we don't pick a ratio between starting points in
+// consecutive buckets that is sooo small, that the integer bounds are the same
+// (effectively making one bucket get no values). We need to avoid:
+// (ranges_[i] == ranges_[i + 1]
+// To avoid that, we just do a fine-grained bucket width as far as we need to
+// until we get a ratio that moves us along at least 2 units at a time. From
+// that bucket onward we do use the exponential growth of buckets.
+void Histogram::InitializeBucketRange() {
+ double log_max = log(static_cast<double>(declared_max()));
+ double log_ratio;
+ double log_next;
+ size_t bucket_index = 1;
+ Sample current = declared_min();
+ SetBucketRange(bucket_index, current);
+ while (bucket_count() > ++bucket_index) {
+ double log_current;
+ log_current = log(static_cast<double>(current));
+ // Calculate the count'th root of the range.
+ log_ratio = (log_max - log_current) / (bucket_count() - bucket_index);
+ // See where the next bucket would start.
+ log_next = log_current + log_ratio;
+ int next;
+ next = static_cast<int>(floor(exp(log_next) + 0.5));
+ if (next > current)
+ current = next;
+ else
+ ++current; // Just do a narrow bucket, and keep trying.
+ SetBucketRange(bucket_index, current);
+ }
+
+ DCHECK(bucket_count() == bucket_index);
+}
+
+size_t Histogram::BucketIndex(Sample value) const {
+ // Use simple binary search. This is very general, but there are better
+ // approaches if we knew that the buckets were linearly distributed.
+ DCHECK(ranges(0) <= value);
+ DCHECK(ranges(bucket_count()) > value);
+ size_t under = 0;
+ size_t over = bucket_count();
+ size_t mid;
+
+ do {
+ DCHECK(over >= under);
+ mid = (over + under)/2;
+ if (mid == under)
+ break;
+ if (ranges(mid) <= value)
+ under = mid;
+ else
+ over = mid;
+ } while (true);
+
+ DCHECK(ranges(mid) <= value && ranges(mid+1) > value);
+ return mid;
+}
+
+// Use the actual bucket widths (like a linear histogram) until the widths get
+// over some transition value, and then use that transition width. Exponentials
+// get so big so fast (and we don't expect to see a lot of entries in the large
+// buckets), so we need this to make it possible to see what is going on and
+// not have 0-graphical-height buckets.
+double Histogram::GetBucketSize(Count current, size_t i) const {
+ DCHECK(ranges(i + 1) > ranges(i));
+ static const double kTransitionWidth = 5;
+ double denominator = ranges(i + 1) - ranges(i);
+ if (denominator > kTransitionWidth)
+ denominator = kTransitionWidth; // Stop trying to normalize.
+ return current/denominator;
+}
+
+//------------------------------------------------------------------------------
+// The following two methods can be overridden to provide a thread safe
+// version of this class. The cost of locking is low... but an error in each
+// of these methods has minimal impact. For now, I'll leave this unlocked,
+// and I don't believe I can loose more than a count or two.
+// The vectors are NOT reallocated, so there is no risk of them moving around.
+
+// Update histogram data with new sample.
+void Histogram::Accumulate(Sample value, Count count, size_t index) {
+ // Note locking not done in this version!!!
+ sample_.Accumulate(value, count, index);
+}
+
+// Do a safe atomic snapshot of sample data.
+// This implementation assumes we are on a safe single thread.
+void Histogram::SnapshotSample(SampleSet* sample) const {
+ // Note locking not done in this version!!!
+ *sample = sample_;
+}
+
+bool Histogram::HasConstructorArguments(Sample minimum,
+ Sample maximum,
+ size_t bucket_count) {
+ return ((minimum == declared_min_) && (maximum == declared_max_) &&
+ (bucket_count == bucket_count_));
+}
+
+bool Histogram::HasConstructorTimeDeltaArguments(TimeDelta minimum,
+ TimeDelta maximum,
+ size_t bucket_count) {
+ return ((minimum.InMilliseconds() == declared_min_) &&
+ (maximum.InMilliseconds() == declared_max_) &&
+ (bucket_count == bucket_count_));
+}
+
+//------------------------------------------------------------------------------
+// Accessor methods
+
+void Histogram::SetBucketRange(size_t i, Sample value) {
+ DCHECK(bucket_count_ > i);
+ ranges_[i] = value;
+}
+
+//------------------------------------------------------------------------------
+// Private methods
+
+double Histogram::GetPeakBucketSize(const SampleSet& snapshot) const {
+ double max = 0;
+ for (size_t i = 0; i < bucket_count() ; ++i) {
+ double current_size = GetBucketSize(snapshot.counts(i), i);
+ if (current_size > max)
+ max = current_size;
+ }
+ return max;
+}
+
+void Histogram::WriteAsciiHeader(const SampleSet& snapshot,
+ Count sample_count,
+ std::string* output) const {
+ StringAppendF(output,
+ "Histogram: %s recorded %d samples",
+ histogram_name().c_str(),
+ sample_count);
+ if (0 == sample_count) {
+ DCHECK_EQ(snapshot.sum(), 0);
+ } else {
+ double average = static_cast<float>(snapshot.sum()) / sample_count;
+ double variance = static_cast<float>(snapshot.square_sum())/sample_count
+ - average * average;
+ double standard_deviation = sqrt(variance);
+
+ StringAppendF(output,
+ ", average = %.1f, standard deviation = %.1f",
+ average, standard_deviation);
+ }
+ if (flags_ & ~kHexRangePrintingFlag)
+ StringAppendF(output, " (flags = 0x%x)", flags_ & ~kHexRangePrintingFlag);
+}
+
+void Histogram::WriteAsciiBucketContext(const int64 past,
+ const Count current,
+ const int64 remaining,
+ const size_t i,
+ std::string* output) const {
+ double scaled_sum = (past + current + remaining) / 100.0;
+ WriteAsciiBucketValue(current, scaled_sum, output);
+ if (0 < i) {
+ double percentage = past / scaled_sum;
+ StringAppendF(output, " {%3.1f%%}", percentage);
+ }
+}
+
+const std::string Histogram::GetAsciiBucketRange(size_t i) const {
+ std::string result;
+ if (kHexRangePrintingFlag & flags_)
+ StringAppendF(&result, "%#x", ranges(i));
+ else
+ StringAppendF(&result, "%d", ranges(i));
+ return result;
+}
+
+void Histogram::WriteAsciiBucketValue(Count current, double scaled_sum,
+ std::string* output) const {
+ StringAppendF(output, " (%d = %3.1f%%)", current, current/scaled_sum);
+}
+
+void Histogram::WriteAsciiBucketGraph(double current_size, double max_size,
+ std::string* output) const {
+ const int k_line_length = 72; // Maximal horizontal width of graph.
+ int x_count = static_cast<int>(k_line_length * (current_size / max_size)
+ + 0.5);
+ int x_remainder = k_line_length - x_count;
+
+ while (0 < x_count--)
+ output->append("-");
+ output->append("O");
+ while (0 < x_remainder--)
+ output->append(" ");
+}
+
+// static
+std::string Histogram::SerializeHistogramInfo(const Histogram& histogram,
+ const SampleSet& snapshot) {
+ DCHECK(histogram.histogram_type() != NOT_VALID_IN_RENDERER);
+
+ Pickle pickle;
+ pickle.WriteString(histogram.histogram_name());
+ pickle.WriteInt(histogram.declared_min());
+ pickle.WriteInt(histogram.declared_max());
+ pickle.WriteSize(histogram.bucket_count());
+ pickle.WriteInt(histogram.histogram_type());
+ pickle.WriteInt(histogram.flags());
+
+ snapshot.Serialize(&pickle);
+ return std::string(static_cast<const char*>(pickle.data()), pickle.size());
+}
+
+// static
+bool Histogram::DeserializeHistogramInfo(const std::string& histogram_info) {
+ if (histogram_info.empty()) {
+ return false;
+ }
+
+ Pickle pickle(histogram_info.data(),
+ static_cast<int>(histogram_info.size()));
+ void* iter = NULL;
+ size_t bucket_count;
+ int declared_min;
+ int declared_max;
+ int histogram_type;
+ int pickle_flags;
+ std::string histogram_name;
+ SampleSet sample;
+
+ if (!pickle.ReadString(&iter, &histogram_name) ||
+ !pickle.ReadInt(&iter, &declared_min) ||
+ !pickle.ReadInt(&iter, &declared_max) ||
+ !pickle.ReadSize(&iter, &bucket_count) ||
+ !pickle.ReadInt(&iter, &histogram_type) ||
+ !pickle.ReadInt(&iter, &pickle_flags) ||
+ !sample.Histogram::SampleSet::Deserialize(&iter, pickle)) {
+ LOG(ERROR) << "Pickle error decoding Histogram: " << histogram_name;
+ return false;
+ }
+ DCHECK(pickle_flags & kIPCSerializationSourceFlag);
+ // Since these fields may have come from an untrusted renderer, do additional
+ // checks above and beyond those in Histogram::Initialize()
+ if (declared_max <= 0 || declared_min <= 0 || declared_max < declared_min ||
+ INT_MAX / sizeof(Count) <= bucket_count || bucket_count < 2) {
+ LOG(ERROR) << "Values error decoding Histogram: " << histogram_name;
+ return false;
+ }
+
+ Flags flags = static_cast<Flags>(pickle_flags & ~kIPCSerializationSourceFlag);
+
+ DCHECK(histogram_type != NOT_VALID_IN_RENDERER);
+
+ scoped_refptr<Histogram> render_histogram(NULL);
+
+ if (histogram_type == HISTOGRAM) {
+ render_histogram = Histogram::FactoryGet(
+ histogram_name, declared_min, declared_max, bucket_count, flags);
+ } else if (histogram_type == LINEAR_HISTOGRAM) {
+ render_histogram = LinearHistogram::FactoryGet(
+ histogram_name, declared_min, declared_max, bucket_count, flags);
+ } else if (histogram_type == BOOLEAN_HISTOGRAM) {
+ render_histogram = BooleanHistogram::FactoryGet(histogram_name, flags);
+ } else {
+ LOG(ERROR) << "Error Deserializing Histogram Unknown histogram_type: " <<
+ histogram_type;
+ return false;
+ }
+
+ DCHECK(declared_min == render_histogram->declared_min());
+ DCHECK(declared_max == render_histogram->declared_max());
+ DCHECK(bucket_count == render_histogram->bucket_count());
+ DCHECK(histogram_type == render_histogram->histogram_type());
+
+ if (render_histogram->flags() & kIPCSerializationSourceFlag) {
+ DLOG(INFO) << "Single process mode, histogram observed and not copied: " <<
+ histogram_name;
+ } else {
+ DCHECK(flags == (flags & render_histogram->flags()));
+ render_histogram->AddSampleSet(sample);
+ }
+
+ return true;
+}
+
+//------------------------------------------------------------------------------
+// Methods for the Histogram::SampleSet class
+//------------------------------------------------------------------------------
+
+Histogram::SampleSet::SampleSet()
+ : counts_(),
+ sum_(0),
+ square_sum_(0) {
+}
+
+Histogram::SampleSet::~SampleSet() {
+}
+
+void Histogram::SampleSet::Resize(const Histogram& histogram) {
+ counts_.resize(histogram.bucket_count(), 0);
+}
+
+void Histogram::SampleSet::CheckSize(const Histogram& histogram) const {
+ DCHECK(counts_.size() == histogram.bucket_count());
+}
+
+
+void Histogram::SampleSet::Accumulate(Sample value, Count count,
+ size_t index) {
+ DCHECK(count == 1 || count == -1);
+ counts_[index] += count;
+ sum_ += count * value;
+ square_sum_ += (count * value) * static_cast<int64>(value);
+ DCHECK_GE(counts_[index], 0);
+ DCHECK_GE(sum_, 0);
+ DCHECK_GE(square_sum_, 0);
+}
+
+Count Histogram::SampleSet::TotalCount() const {
+ Count total = 0;
+ for (Counts::const_iterator it = counts_.begin();
+ it != counts_.end();
+ ++it) {
+ total += *it;
+ }
+ return total;
+}
+
+void Histogram::SampleSet::Add(const SampleSet& other) {
+ DCHECK(counts_.size() == other.counts_.size());
+ sum_ += other.sum_;
+ square_sum_ += other.square_sum_;
+ for (size_t index = 0; index < counts_.size(); ++index)
+ counts_[index] += other.counts_[index];
+}
+
+void Histogram::SampleSet::Subtract(const SampleSet& other) {
+ DCHECK(counts_.size() == other.counts_.size());
+ // Note: Race conditions in snapshotting a sum or square_sum may lead to
+ // (temporary) negative values when snapshots are later combined (and deltas
+ // calculated). As a result, we don't currently CHCEK() for positive values.
+ sum_ -= other.sum_;
+ square_sum_ -= other.square_sum_;
+ for (size_t index = 0; index < counts_.size(); ++index) {
+ counts_[index] -= other.counts_[index];
+ DCHECK_GE(counts_[index], 0);
+ }
+}
+
+bool Histogram::SampleSet::Serialize(Pickle* pickle) const {
+ pickle->WriteInt64(sum_);
+ pickle->WriteInt64(square_sum_);
+ pickle->WriteSize(counts_.size());
+
+ for (size_t index = 0; index < counts_.size(); ++index) {
+ pickle->WriteInt(counts_[index]);
+ }
+
+ return true;
+}
+
+bool Histogram::SampleSet::Deserialize(void** iter, const Pickle& pickle) {
+ DCHECK_EQ(counts_.size(), 0u);
+ DCHECK_EQ(sum_, 0);
+ DCHECK_EQ(square_sum_, 0);
+
+ size_t counts_size;
+
+ if (!pickle.ReadInt64(iter, &sum_) ||
+ !pickle.ReadInt64(iter, &square_sum_) ||
+ !pickle.ReadSize(iter, &counts_size)) {
+ return false;
+ }
+
+ if (counts_size == 0)
+ return false;
+
+ for (size_t index = 0; index < counts_size; ++index) {
+ int i;
+ if (!pickle.ReadInt(iter, &i))
+ return false;
+ counts_.push_back(i);
+ }
+
+ return true;
+}
+
+//------------------------------------------------------------------------------
+// LinearHistogram: This histogram uses a traditional set of evenly spaced
+// buckets.
+//------------------------------------------------------------------------------
+
+scoped_refptr<Histogram> LinearHistogram::FactoryGet(const std::string& name,
+ Sample minimum,
+ Sample maximum,
+ size_t bucket_count,
+ Flags flags) {
+ scoped_refptr<Histogram> histogram(NULL);
+
+ if (minimum <= 0)
+ minimum = 1;
+ if (maximum >= kSampleType_MAX)
+ maximum = kSampleType_MAX - 1;
+
+ if (!StatisticsRecorder::FindHistogram(name, &histogram)) {
+ histogram = new LinearHistogram(name, minimum, maximum, bucket_count);
+ StatisticsRecorder::FindHistogram(name, &histogram);
+ }
+
+ DCHECK(LINEAR_HISTOGRAM == histogram->histogram_type());
+ DCHECK(histogram->HasConstructorArguments(minimum, maximum, bucket_count));
+ histogram->SetFlags(flags);
+ return histogram;
+}
+
+scoped_refptr<Histogram> LinearHistogram::FactoryTimeGet(
+ const std::string& name,
+ TimeDelta minimum,
+ TimeDelta maximum,
+ size_t bucket_count,
+ Flags flags) {
+ return FactoryGet(name, minimum.InMilliseconds(), maximum.InMilliseconds(),
+ bucket_count, flags);
+}
+
+LinearHistogram::~LinearHistogram() {
+}
+
+LinearHistogram::LinearHistogram(const std::string& name,
+ Sample minimum,
+ Sample maximum,
+ size_t bucket_count)
+ : Histogram(name, minimum >= 1 ? minimum : 1, maximum, bucket_count) {
+ InitializeBucketRange();
+ DCHECK(ValidateBucketRanges());
+}
+
+LinearHistogram::LinearHistogram(const std::string& name,
+ TimeDelta minimum,
+ TimeDelta maximum,
+ size_t bucket_count)
+ : Histogram(name, minimum >= TimeDelta::FromMilliseconds(1) ?
+ minimum : TimeDelta::FromMilliseconds(1),
+ maximum, bucket_count) {
+ // Do a "better" (different) job at init than a base classes did...
+ InitializeBucketRange();
+ DCHECK(ValidateBucketRanges());
+}
+
+Histogram::ClassType LinearHistogram::histogram_type() const {
+ return LINEAR_HISTOGRAM;
+}
+
+void LinearHistogram::SetRangeDescriptions(
+ const DescriptionPair descriptions[]) {
+ for (int i =0; descriptions[i].description; ++i) {
+ bucket_description_[descriptions[i].sample] = descriptions[i].description;
+ }
+}
+
+const std::string LinearHistogram::GetAsciiBucketRange(size_t i) const {
+ int range = ranges(i);
+ BucketDescriptionMap::const_iterator it = bucket_description_.find(range);
+ if (it == bucket_description_.end())
+ return Histogram::GetAsciiBucketRange(i);
+ return it->second;
+}
+
+bool LinearHistogram::PrintEmptyBucket(size_t index) const {
+ return bucket_description_.find(ranges(index)) == bucket_description_.end();
+}
+
+
+void LinearHistogram::InitializeBucketRange() {
+ DCHECK_GT(declared_min(), 0); // 0 is the underflow bucket here.
+ double min = declared_min();
+ double max = declared_max();
+ size_t i;
+ for (i = 1; i < bucket_count(); ++i) {
+ double linear_range = (min * (bucket_count() -1 - i) + max * (i - 1)) /
+ (bucket_count() - 2);
+ SetBucketRange(i, static_cast<int> (linear_range + 0.5));
+ }
+}
+
+double LinearHistogram::GetBucketSize(Count current, size_t i) const {
+ DCHECK(ranges(i + 1) > ranges(i));
+ // Adjacent buckets with different widths would have "surprisingly" many (few)
+ // samples in a histogram if we didn't normalize this way.
+ double denominator = ranges(i + 1) - ranges(i);
+ return current/denominator;
+}
+
+//------------------------------------------------------------------------------
+// This section provides implementation for BooleanHistogram.
+//------------------------------------------------------------------------------
+
+scoped_refptr<Histogram> BooleanHistogram::FactoryGet(const std::string& name,
+ Flags flags) {
+ scoped_refptr<Histogram> histogram(NULL);
+
+ if (!StatisticsRecorder::FindHistogram(name, &histogram)) {
+ histogram = new BooleanHistogram(name);
+ StatisticsRecorder::FindHistogram(name, &histogram);
+ }
+
+ DCHECK(BOOLEAN_HISTOGRAM == histogram->histogram_type());
+ histogram->SetFlags(flags);
+ return histogram;
+}
+
+Histogram::ClassType BooleanHistogram::histogram_type() const {
+ return BOOLEAN_HISTOGRAM;
+}
+
+void BooleanHistogram::AddBoolean(bool value) {
+ Add(value ? 1 : 0);
+}
+
+BooleanHistogram::BooleanHistogram(const std::string& name)
+ : LinearHistogram(name, 1, 2, 3) {
+}
+
+//------------------------------------------------------------------------------
+// CustomHistogram:
+//------------------------------------------------------------------------------
+
+scoped_refptr<Histogram> CustomHistogram::FactoryGet(
+ const std::string& name,
+ const std::vector<int>& custom_ranges,
+ Flags flags) {
+ scoped_refptr<Histogram> histogram(NULL);
+
+ // Remove the duplicates in the custom ranges array.
+ std::vector<int> ranges = custom_ranges;
+ ranges.push_back(0); // Ensure we have a zero value.
+ std::sort(ranges.begin(), ranges.end());
+ ranges.erase(std::unique(ranges.begin(), ranges.end()), ranges.end());
+ if (ranges.size() <= 1) {
+ DCHECK(false);
+ // Note that we pushed a 0 in above, so for defensive code....
+ ranges.push_back(1); // Put in some data so we can index to [1].
+ }
+
+ DCHECK_LT(ranges.back(), kSampleType_MAX);
+
+ if (!StatisticsRecorder::FindHistogram(name, &histogram)) {
+ histogram = new CustomHistogram(name, ranges);
+ StatisticsRecorder::FindHistogram(name, &histogram);
+ }
+
+ DCHECK_EQ(histogram->histogram_type(), CUSTOM_HISTOGRAM);
+ DCHECK(histogram->HasConstructorArguments(ranges[1], ranges.back(),
+ ranges.size()));
+ histogram->SetFlags(flags);
+ return histogram;
+}
+
+Histogram::ClassType CustomHistogram::histogram_type() const {
+ return CUSTOM_HISTOGRAM;
+}
+
+CustomHistogram::CustomHistogram(const std::string& name,
+ const std::vector<int>& custom_ranges)
+ : Histogram(name, custom_ranges[1], custom_ranges.back(),
+ custom_ranges.size()) {
+ DCHECK_GT(custom_ranges.size(), 1u);
+ DCHECK_EQ(custom_ranges[0], 0);
+ ranges_vector_ = &custom_ranges;
+ InitializeBucketRange();
+ ranges_vector_ = NULL;
+ DCHECK(ValidateBucketRanges());
+}
+
+void CustomHistogram::InitializeBucketRange() {
+ DCHECK(ranges_vector_->size() <= bucket_count());
+ for (size_t index = 0; index < ranges_vector_->size(); ++index)
+ SetBucketRange(index, (*ranges_vector_)[index]);
+}
+
+double CustomHistogram::GetBucketSize(Count current, size_t i) const {
+ return 1;
+}
+
+//------------------------------------------------------------------------------
+// The next section handles global (central) support for all histograms, as well
+// as startup/teardown of this service.
+//------------------------------------------------------------------------------
+
+// This singleton instance should be started during the single threaded portion
+// of main(), and hence it is not thread safe. It initializes globals to
+// provide support for all future calls.
+StatisticsRecorder::StatisticsRecorder() {
+ DCHECK(!histograms_);
+ lock_ = new Lock;
+ histograms_ = new HistogramMap;
+}
+
+StatisticsRecorder::~StatisticsRecorder() {
+ DCHECK(histograms_);
+
+ if (dump_on_exit_) {
+ std::string output;
+ WriteGraph("", &output);
+ LOG(INFO) << output;
+ }
+ // Clean up.
+ delete histograms_;
+ histograms_ = NULL;
+ delete lock_;
+ lock_ = NULL;
+}
+
+// static
+bool StatisticsRecorder::WasStarted() {
+ return NULL != histograms_;
+}
+
+// Note: We can't accept a ref_ptr to |histogram| because we *might* not keep a
+// reference, and we are called while in the Histogram constructor. In that
+// scenario, a ref_ptr would have incremented the ref count when the histogram
+// was passed to us, decremented it when we returned, and the instance would be
+// destroyed before assignment (when value was returned by new).
+// static
+void StatisticsRecorder::Register(Histogram* histogram) {
+ if (!histograms_)
+ return;
+ const std::string name = histogram->histogram_name();
+ AutoLock auto_lock(*lock_);
+ // Avoid overwriting a previous registration.
+ if (histograms_->end() == histograms_->find(name))
+ (*histograms_)[name] = histogram;
+}
+
+// static
+void StatisticsRecorder::WriteHTMLGraph(const std::string& query,
+ std::string* output) {
+ if (!histograms_)
+ return;
+ output->append("<html><head><title>About Histograms");
+ if (!query.empty())
+ output->append(" - " + query);
+ output->append("</title>"
+ // We'd like the following no-cache... but it doesn't work.
+ // "<META HTTP-EQUIV=\"Pragma\" CONTENT=\"no-cache\">"
+ "</head><body>");
+
+ Histograms snapshot;
+ GetSnapshot(query, &snapshot);
+ for (Histograms::iterator it = snapshot.begin();
+ it != snapshot.end();
+ ++it) {
+ (*it)->WriteHTMLGraph(output);
+ output->append("<br><hr><br>");
+ }
+ output->append("</body></html>");
+}
+
+// static
+void StatisticsRecorder::WriteGraph(const std::string& query,
+ std::string* output) {
+ if (!histograms_)
+ return;
+ if (query.length())
+ StringAppendF(output, "Collections of histograms for %s\n", query.c_str());
+ else
+ output->append("Collections of all histograms\n");
+
+ Histograms snapshot;
+ GetSnapshot(query, &snapshot);
+ for (Histograms::iterator it = snapshot.begin();
+ it != snapshot.end();
+ ++it) {
+ (*it)->WriteAscii(true, "\n", output);
+ output->append("\n");
+ }
+}
+
+// static
+void StatisticsRecorder::GetHistograms(Histograms* output) {
+ if (!histograms_)
+ return;
+ AutoLock auto_lock(*lock_);
+ for (HistogramMap::iterator it = histograms_->begin();
+ histograms_->end() != it;
+ ++it) {
+ DCHECK(it->second->histogram_name() == it->first);
+ output->push_back(it->second);
+ }
+}
+
+bool StatisticsRecorder::FindHistogram(const std::string& name,
+ scoped_refptr<Histogram>* histogram) {
+ if (!histograms_)
+ return false;
+ AutoLock auto_lock(*lock_);
+ HistogramMap::iterator it = histograms_->find(name);
+ if (histograms_->end() == it)
+ return false;
+ *histogram = it->second;
+ return true;
+}
+
+// private static
+void StatisticsRecorder::GetSnapshot(const std::string& query,
+ Histograms* snapshot) {
+ AutoLock auto_lock(*lock_);
+ for (HistogramMap::iterator it = histograms_->begin();
+ histograms_->end() != it;
+ ++it) {
+ if (it->first.find(query) != std::string::npos)
+ snapshot->push_back(it->second);
+ }
+}
+
+// static
+StatisticsRecorder::HistogramMap* StatisticsRecorder::histograms_ = NULL;
+// static
+Lock* StatisticsRecorder::lock_ = NULL;
+// static
+bool StatisticsRecorder::dump_on_exit_ = false;
+
+} // namespace base
diff --git a/base/metrics/histogram.h b/base/metrics/histogram.h
new file mode 100644
index 0000000..20f67c2
--- /dev/null
+++ b/base/metrics/histogram.h
@@ -0,0 +1,643 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Histogram is an object that aggregates statistics, and can summarize them in
+// various forms, including ASCII graphical, HTML, and numerically (as a
+// vector of numbers corresponding to each of the aggregating buckets).
+
+// It supports calls to accumulate either time intervals (which are processed
+// as integral number of milliseconds), or arbitrary integral units.
+
+// The default layout of buckets is exponential. For example, buckets might
+// contain (sequentially) the count of values in the following intervals:
+// [0,1), [1,2), [2,4), [4,8), [8,16), [16,32), [32,64), [64,infinity)
+// That bucket allocation would actually result from construction of a histogram
+// for values between 1 and 64, with 8 buckets, such as:
+// Histogram count(L"some name", 1, 64, 8);
+// Note that the underflow bucket [0,1) and the overflow bucket [64,infinity)
+// are not counted by the constructor in the user supplied "bucket_count"
+// argument.
+// The above example has an exponential ratio of 2 (doubling the bucket width
+// in each consecutive bucket. The Histogram class automatically calculates
+// the smallest ratio that it can use to construct the number of buckets
+// selected in the constructor. An another example, if you had 50 buckets,
+// and millisecond time values from 1 to 10000, then the ratio between
+// consecutive bucket widths will be approximately somewhere around the 50th
+// root of 10000. This approach provides very fine grain (narrow) buckets
+// at the low end of the histogram scale, but allows the histogram to cover a
+// gigantic range with the addition of very few buckets.
+
+#ifndef BASE_METRICS_HISTOGRAM_H_
+#define BASE_METRICS_HISTOGRAM_H_
+#pragma once
+
+#include <map>
+#include <string>
+#include <vector>
+
+#include "base/ref_counted.h"
+#include "base/logging.h"
+#include "base/time.h"
+
+class Lock;
+class Pickle;
+
+namespace base {
+
+//------------------------------------------------------------------------------
+// Provide easy general purpose histogram in a macro, just like stats counters.
+// The first four macros use 50 buckets.
+
+#define HISTOGRAM_TIMES(name, sample) HISTOGRAM_CUSTOM_TIMES( \
+ name, sample, base::TimeDelta::FromMilliseconds(1), \
+ base::TimeDelta::FromSeconds(10), 50)
+
+#define HISTOGRAM_COUNTS(name, sample) HISTOGRAM_CUSTOM_COUNTS( \
+ name, sample, 1, 1000000, 50)
+
+#define HISTOGRAM_COUNTS_100(name, sample) HISTOGRAM_CUSTOM_COUNTS( \
+ name, sample, 1, 100, 50)
+
+#define HISTOGRAM_COUNTS_10000(name, sample) HISTOGRAM_CUSTOM_COUNTS( \
+ name, sample, 1, 10000, 50)
+
+#define HISTOGRAM_CUSTOM_COUNTS(name, sample, min, max, bucket_count) do { \
+ static scoped_refptr<base::Histogram> counter = \
+ base::Histogram::FactoryGet(name, min, max, bucket_count, \
+ base::Histogram::kNoFlags); \
+ DCHECK_EQ(name, counter->histogram_name()); \
+ if (counter.get()) counter->Add(sample); \
+ } while (0)
+
+#define HISTOGRAM_PERCENTAGE(name, under_one_hundred) \
+ HISTOGRAM_ENUMERATION(name, under_one_hundred, 101)
+
+// For folks that need real specific times, use this to select a precise range
+// of times you want plotted, and the number of buckets you want used.
+#define HISTOGRAM_CUSTOM_TIMES(name, sample, min, max, bucket_count) do { \
+ static scoped_refptr<base::Histogram> counter = \
+ base::Histogram::FactoryTimeGet(name, min, max, bucket_count, \
+ base::Histogram::kNoFlags); \
+ DCHECK_EQ(name, counter->histogram_name()); \
+ if (counter.get()) counter->AddTime(sample); \
+ } while (0)
+
+// DO NOT USE THIS. It is being phased out, in favor of HISTOGRAM_CUSTOM_TIMES.
+#define HISTOGRAM_CLIPPED_TIMES(name, sample, min, max, bucket_count) do { \
+ static scoped_refptr<base::Histogram> counter = \
+ base::Histogram::FactoryTimeGet(name, min, max, bucket_count, \
+ base::Histogram::kNoFlags); \
+ DCHECK_EQ(name, counter->histogram_name()); \
+ if ((sample) < (max) && counter.get()) counter->AddTime(sample); \
+ } while (0)
+
+// Support histograming of an enumerated value. The samples should always be
+// less than boundary_value.
+
+#define HISTOGRAM_ENUMERATION(name, sample, boundary_value) do { \
+ static scoped_refptr<base::Histogram> counter = \
+ base::LinearHistogram::FactoryGet(name, 1, boundary_value, \
+ boundary_value + 1, \
+ base::Histogram::kNoFlags); \
+ DCHECK_EQ(name, counter->histogram_name()); \
+ if (counter.get()) counter->Add(sample); \
+ } while (0)
+
+#define HISTOGRAM_CUSTOM_ENUMERATION(name, sample, custom_ranges) do { \
+ static scoped_refptr<base::Histogram> counter = \
+ base::CustomHistogram::FactoryGet(name, custom_ranges, \
+ base::Histogram::kNoFlags); \
+ DCHECK_EQ(name, counter->histogram_name()); \
+ if (counter.get()) counter->Add(sample); \
+ } while (0)
+
+
+//------------------------------------------------------------------------------
+// Define Debug vs non-debug flavors of macros.
+#ifndef NDEBUG
+
+#define DHISTOGRAM_TIMES(name, sample) HISTOGRAM_TIMES(name, sample)
+#define DHISTOGRAM_COUNTS(name, sample) HISTOGRAM_COUNTS(name, sample)
+#define DHISTOGRAM_PERCENTAGE(name, under_one_hundred) HISTOGRAM_PERCENTAGE(\
+ name, under_one_hundred)
+#define DHISTOGRAM_CUSTOM_TIMES(name, sample, min, max, bucket_count) \
+ HISTOGRAM_CUSTOM_TIMES(name, sample, min, max, bucket_count)
+#define DHISTOGRAM_CLIPPED_TIMES(name, sample, min, max, bucket_count) \
+ HISTOGRAM_CLIPPED_TIMES(name, sample, min, max, bucket_count)
+#define DHISTOGRAM_CUSTOM_COUNTS(name, sample, min, max, bucket_count) \
+ HISTOGRAM_CUSTOM_COUNTS(name, sample, min, max, bucket_count)
+#define DHISTOGRAM_ENUMERATION(name, sample, boundary_value) \
+ HISTOGRAM_ENUMERATION(name, sample, boundary_value)
+#define DHISTOGRAM_CUSTOM_ENUMERATION(name, sample, custom_ranges) \
+ HISTOGRAM_CUSTOM_ENUMERATION(name, sample, custom_ranges)
+
+#else // NDEBUG
+
+#define DHISTOGRAM_TIMES(name, sample) do {} while (0)
+#define DHISTOGRAM_COUNTS(name, sample) do {} while (0)
+#define DHISTOGRAM_PERCENTAGE(name, under_one_hundred) do {} while (0)
+#define DHISTOGRAM_CUSTOM_TIMES(name, sample, min, max, bucket_count) \
+ do {} while (0)
+#define DHISTOGRAM_CLIPPED_TIMES(name, sample, min, max, bucket_count) \
+ do {} while (0)
+#define DHISTOGRAM_CUSTOM_COUNTS(name, sample, min, max, bucket_count) \
+ do {} while (0)
+#define DHISTOGRAM_ENUMERATION(name, sample, boundary_value) do {} while (0)
+#define DHISTOGRAM_CUSTOM_ENUMERATION(name, sample, custom_ranges) \
+ do {} while (0)
+
+#endif // NDEBUG
+
+//------------------------------------------------------------------------------
+// The following macros provide typical usage scenarios for callers that wish
+// to record histogram data, and have the data submitted/uploaded via UMA.
+// Not all systems support such UMA, but if they do, the following macros
+// should work with the service.
+
+#define UMA_HISTOGRAM_TIMES(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES( \
+ name, sample, base::TimeDelta::FromMilliseconds(1), \
+ base::TimeDelta::FromSeconds(10), 50)
+
+#define UMA_HISTOGRAM_MEDIUM_TIMES(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES( \
+ name, sample, base::TimeDelta::FromMilliseconds(10), \
+ base::TimeDelta::FromMinutes(3), 50)
+
+// Use this macro when times can routinely be much longer than 10 seconds.
+#define UMA_HISTOGRAM_LONG_TIMES(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES( \
+ name, sample, base::TimeDelta::FromMilliseconds(1), \
+ base::TimeDelta::FromHours(1), 50)
+
+#define UMA_HISTOGRAM_CUSTOM_TIMES(name, sample, min, max, bucket_count) do { \
+ static scoped_refptr<base::Histogram> counter = \
+ base::Histogram::FactoryTimeGet(name, min, max, bucket_count, \
+ base::Histogram::kUmaTargetedHistogramFlag); \
+ DCHECK_EQ(name, counter->histogram_name()); \
+ if (counter.get()) counter->AddTime(sample); \
+ } while (0)
+
+// DO NOT USE THIS. It is being phased out, in favor of HISTOGRAM_CUSTOM_TIMES.
+#define UMA_HISTOGRAM_CLIPPED_TIMES(name, sample, min, max, bucket_count) do { \
+ static scoped_refptr<base::Histogram> counter = \
+ base::Histogram::FactoryTimeGet(name, min, max, bucket_count, \
+ base::Histogram::kUmaTargetedHistogramFlag); \
+ DCHECK_EQ(name, counter->histogram_name()); \
+ if ((sample) < (max) && counter.get()) counter->AddTime(sample); \
+ } while (0)
+
+#define UMA_HISTOGRAM_COUNTS(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
+ name, sample, 1, 1000000, 50)
+
+#define UMA_HISTOGRAM_COUNTS_100(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
+ name, sample, 1, 100, 50)
+
+#define UMA_HISTOGRAM_COUNTS_10000(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
+ name, sample, 1, 10000, 50)
+
+#define UMA_HISTOGRAM_CUSTOM_COUNTS(name, sample, min, max, bucket_count) do { \
+ static scoped_refptr<base::Histogram> counter = \
+ base::Histogram::FactoryGet(name, min, max, bucket_count, \
+ base::Histogram::kUmaTargetedHistogramFlag); \
+ DCHECK_EQ(name, counter->histogram_name()); \
+ if (counter.get()) counter->Add(sample); \
+ } while (0)
+
+#define UMA_HISTOGRAM_MEMORY_KB(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
+ name, sample, 1000, 500000, 50)
+
+#define UMA_HISTOGRAM_MEMORY_MB(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
+ name, sample, 1, 1000, 50)
+
+#define UMA_HISTOGRAM_PERCENTAGE(name, under_one_hundred) \
+ UMA_HISTOGRAM_ENUMERATION(name, under_one_hundred, 101)
+
+#define UMA_HISTOGRAM_ENUMERATION(name, sample, boundary_value) do { \
+ static scoped_refptr<base::Histogram> counter = \
+ base::LinearHistogram::FactoryGet(name, 1, boundary_value, \
+ boundary_value + 1, base::Histogram::kUmaTargetedHistogramFlag); \
+ DCHECK_EQ(name, counter->histogram_name()); \
+ if (counter.get()) counter->Add(sample); \
+ } while (0)
+
+#define UMA_HISTOGRAM_CUSTOM_ENUMERATION(name, sample, custom_ranges) do { \
+ static scoped_refptr<base::Histogram> counter = \
+ base::CustomHistogram::FactoryGet(name, custom_ranges, \
+ base::Histogram::kUmaTargetedHistogramFlag); \
+ DCHECK_EQ(name, counter->histogram_name()); \
+ if (counter.get()) counter->Add(sample); \
+ } while (0)
+
+//------------------------------------------------------------------------------
+
+class BooleanHistogram;
+class CustomHistogram;
+class Histogram;
+class LinearHistogram;
+
+class Histogram : public base::RefCountedThreadSafe<Histogram> {
+ public:
+ typedef int Sample; // Used for samples (and ranges of samples).
+ typedef int Count; // Used to count samples in a bucket.
+ static const Sample kSampleType_MAX = INT_MAX;
+
+ typedef std::vector<Count> Counts;
+ typedef std::vector<Sample> Ranges;
+
+ /* These enums are meant to facilitate deserialization of renderer histograms
+ into the browser. */
+ enum ClassType {
+ HISTOGRAM,
+ LINEAR_HISTOGRAM,
+ BOOLEAN_HISTOGRAM,
+ CUSTOM_HISTOGRAM,
+ NOT_VALID_IN_RENDERER
+ };
+
+ enum BucketLayout {
+ EXPONENTIAL,
+ LINEAR,
+ CUSTOM
+ };
+
+ enum Flags {
+ kNoFlags = 0,
+ kUmaTargetedHistogramFlag = 0x1, // Histogram should be UMA uploaded.
+
+ // Indicate that the histogram was pickled to be sent across an IPC Channel.
+ // If we observe this flag on a histogram being aggregated into after IPC,
+ // then we are running in a single process mode, and the aggregation should
+ // not take place (as we would be aggregating back into the source
+ // histogram!).
+ kIPCSerializationSourceFlag = 0x10,
+
+ kHexRangePrintingFlag = 0x8000, // Fancy bucket-naming supported.
+ };
+
+ struct DescriptionPair {
+ Sample sample;
+ const char* description; // Null means end of a list of pairs.
+ };
+
+ //----------------------------------------------------------------------------
+ // Statistic values, developed over the life of the histogram.
+
+ class SampleSet {
+ public:
+ explicit SampleSet();
+ ~SampleSet();
+
+ // Adjust size of counts_ for use with given histogram.
+ void Resize(const Histogram& histogram);
+ void CheckSize(const Histogram& histogram) const;
+
+ // Accessor for histogram to make routine additions.
+ void Accumulate(Sample value, Count count, size_t index);
+
+ // Accessor methods.
+ Count counts(size_t i) const { return counts_[i]; }
+ Count TotalCount() const;
+ int64 sum() const { return sum_; }
+ int64 square_sum() const { return square_sum_; }
+
+ // Arithmetic manipulation of corresponding elements of the set.
+ void Add(const SampleSet& other);
+ void Subtract(const SampleSet& other);
+
+ bool Serialize(Pickle* pickle) const;
+ bool Deserialize(void** iter, const Pickle& pickle);
+
+ protected:
+ // Actual histogram data is stored in buckets, showing the count of values
+ // that fit into each bucket.
+ Counts counts_;
+
+ // Save simple stats locally. Note that this MIGHT get done in base class
+ // without shared memory at some point.
+ int64 sum_; // sum of samples.
+ int64 square_sum_; // sum of squares of samples.
+ };
+ //----------------------------------------------------------------------------
+ // minimum should start from 1. 0 is invalid as a minimum. 0 is an implicit
+ // default underflow bucket.
+ static scoped_refptr<Histogram> FactoryGet(const std::string& name,
+ Sample minimum, Sample maximum, size_t bucket_count, Flags flags);
+ static scoped_refptr<Histogram> FactoryTimeGet(const std::string& name,
+ base::TimeDelta minimum, base::TimeDelta maximum, size_t bucket_count,
+ Flags flags);
+
+ void Add(int value);
+
+ // This method is an interface, used only by BooleanHistogram.
+ virtual void AddBoolean(bool value);
+
+ // Accept a TimeDelta to increment.
+ void AddTime(TimeDelta time) {
+ Add(static_cast<int>(time.InMilliseconds()));
+ }
+
+ void AddSampleSet(const SampleSet& sample);
+
+ // This method is an interface, used only by LinearHistogram.
+ virtual void SetRangeDescriptions(const DescriptionPair descriptions[]);
+
+ // The following methods provide graphical histogram displays.
+ void WriteHTMLGraph(std::string* output) const;
+ void WriteAscii(bool graph_it, const std::string& newline,
+ std::string* output) const;
+
+ // Support generic flagging of Histograms.
+ // 0x1 Currently used to mark this histogram to be recorded by UMA..
+ // 0x8000 means print ranges in hex.
+ void SetFlags(Flags flags) { flags_ = static_cast<Flags> (flags_ | flags); }
+ void ClearFlags(Flags flags) { flags_ = static_cast<Flags>(flags_ & ~flags); }
+ int flags() const { return flags_; }
+
+ // Convenience methods for serializing/deserializing the histograms.
+ // Histograms from Renderer process are serialized and sent to the browser.
+ // Browser process reconstructs the histogram from the pickled version
+ // accumulates the browser-side shadow copy of histograms (that mirror
+ // histograms created in the renderer).
+
+ // Serialize the given snapshot of a Histogram into a String. Uses
+ // Pickle class to flatten the object.
+ static std::string SerializeHistogramInfo(const Histogram& histogram,
+ const SampleSet& snapshot);
+ // The following method accepts a list of pickled histograms and
+ // builds a histogram and updates shadow copy of histogram data in the
+ // browser process.
+ static bool DeserializeHistogramInfo(const std::string& histogram_info);
+
+ //----------------------------------------------------------------------------
+ // Accessors for factory constuction, serialization and testing.
+ //----------------------------------------------------------------------------
+ virtual ClassType histogram_type() const { return HISTOGRAM; }
+ const std::string& histogram_name() const { return histogram_name_; }
+ Sample declared_min() const { return declared_min_; }
+ Sample declared_max() const { return declared_max_; }
+ virtual Sample ranges(size_t i) const { return ranges_[i];}
+ virtual size_t bucket_count() const { return bucket_count_; }
+ // Snapshot the current complete set of sample data.
+ // Override with atomic/locked snapshot if needed.
+ virtual void SnapshotSample(SampleSet* sample) const;
+
+ virtual bool HasConstructorArguments(Sample minimum, Sample maximum,
+ size_t bucket_count);
+
+ virtual bool HasConstructorTimeDeltaArguments(TimeDelta minimum,
+ TimeDelta maximum,
+ size_t bucket_count);
+
+ protected:
+ friend class base::RefCountedThreadSafe<Histogram>;
+ Histogram(const std::string& name, Sample minimum,
+ Sample maximum, size_t bucket_count);
+ Histogram(const std::string& name, TimeDelta minimum,
+ TimeDelta maximum, size_t bucket_count);
+
+ virtual ~Histogram();
+
+ // Method to override to skip the display of the i'th bucket if it's empty.
+ virtual bool PrintEmptyBucket(size_t index) const;
+
+ //----------------------------------------------------------------------------
+ // Methods to override to create histogram with different bucket widths.
+ //----------------------------------------------------------------------------
+ // Initialize ranges_ mapping.
+ virtual void InitializeBucketRange();
+ // Find bucket to increment for sample value.
+ virtual size_t BucketIndex(Sample value) const;
+ // Get normalized size, relative to the ranges_[i].
+ virtual double GetBucketSize(Count current, size_t i) const;
+
+ // Return a string description of what goes in a given bucket.
+ // Most commonly this is the numeric value, but in derived classes it may
+ // be a name (or string description) given to the bucket.
+ virtual const std::string GetAsciiBucketRange(size_t it) const;
+
+ //----------------------------------------------------------------------------
+ // Methods to override to create thread safe histogram.
+ //----------------------------------------------------------------------------
+ // Update all our internal data, including histogram
+ virtual void Accumulate(Sample value, Count count, size_t index);
+
+ //----------------------------------------------------------------------------
+ // Accessors for derived classes.
+ //----------------------------------------------------------------------------
+ void SetBucketRange(size_t i, Sample value);
+
+ // Validate that ranges_ was created sensibly (top and bottom range
+ // values relate properly to the declared_min_ and declared_max_)..
+ bool ValidateBucketRanges() const;
+
+ private:
+ // Post constructor initialization.
+ void Initialize();
+
+ //----------------------------------------------------------------------------
+ // Helpers for emitting Ascii graphic. Each method appends data to output.
+
+ // Find out how large the (graphically) the largest bucket will appear to be.
+ double GetPeakBucketSize(const SampleSet& snapshot) const;
+
+ // Write a common header message describing this histogram.
+ void WriteAsciiHeader(const SampleSet& snapshot,
+ Count sample_count, std::string* output) const;
+
+ // Write information about previous, current, and next buckets.
+ // Information such as cumulative percentage, etc.
+ void WriteAsciiBucketContext(const int64 past, const Count current,
+ const int64 remaining, const size_t i,
+ std::string* output) const;
+
+ // Write textual description of the bucket contents (relative to histogram).
+ // Output is the count in the buckets, as well as the percentage.
+ void WriteAsciiBucketValue(Count current, double scaled_sum,
+ std::string* output) const;
+
+ // Produce actual graph (set of blank vs non blank char's) for a bucket.
+ void WriteAsciiBucketGraph(double current_size, double max_size,
+ std::string* output) const;
+
+ //----------------------------------------------------------------------------
+ // Invariant values set at/near construction time
+
+ // ASCII version of original name given to the constructor. All identically
+ // named instances will be coalesced cross-project.
+ const std::string histogram_name_;
+ Sample declared_min_; // Less than this goes into counts_[0]
+ Sample declared_max_; // Over this goes into counts_[bucket_count_ - 1].
+ size_t bucket_count_; // Dimension of counts_[].
+
+ // Flag the histogram for recording by UMA via metric_services.h.
+ Flags flags_;
+
+ // For each index, show the least value that can be stored in the
+ // corresponding bucket. We also append one extra element in this array,
+ // containing kSampleType_MAX, to make calculations easy.
+ // The dimension of ranges_ is bucket_count + 1.
+ Ranges ranges_;
+
+ // Finally, provide the state that changes with the addition of each new
+ // sample.
+ SampleSet sample_;
+
+ DISALLOW_COPY_AND_ASSIGN(Histogram);
+};
+
+//------------------------------------------------------------------------------
+
+// LinearHistogram is a more traditional histogram, with evenly spaced
+// buckets.
+class LinearHistogram : public Histogram {
+ public:
+ virtual ClassType histogram_type() const;
+
+ // Store a list of number/text values for use in rendering the histogram.
+ // The last element in the array has a null in its "description" slot.
+ virtual void SetRangeDescriptions(const DescriptionPair descriptions[]);
+
+ /* minimum should start from 1. 0 is as minimum is invalid. 0 is an implicit
+ default underflow bucket. */
+ static scoped_refptr<Histogram> FactoryGet(const std::string& name,
+ Sample minimum, Sample maximum, size_t bucket_count, Flags flags);
+ static scoped_refptr<Histogram> FactoryTimeGet(const std::string& name,
+ TimeDelta minimum, TimeDelta maximum, size_t bucket_count,
+ Flags flags);
+
+ virtual ~LinearHistogram();
+
+ protected:
+ LinearHistogram(const std::string& name, Sample minimum,
+ Sample maximum, size_t bucket_count);
+
+ LinearHistogram(const std::string& name, TimeDelta minimum,
+ TimeDelta maximum, size_t bucket_count);
+
+ // Initialize ranges_ mapping.
+ virtual void InitializeBucketRange();
+ virtual double GetBucketSize(Count current, size_t i) const;
+
+ // If we have a description for a bucket, then return that. Otherwise
+ // let parent class provide a (numeric) description.
+ virtual const std::string GetAsciiBucketRange(size_t i) const;
+
+ // Skip printing of name for numeric range if we have a name (and if this is
+ // an empty bucket).
+ virtual bool PrintEmptyBucket(size_t index) const;
+
+ private:
+ // For some ranges, we store a printable description of a bucket range.
+ // If there is no desciption, then GetAsciiBucketRange() uses parent class
+ // to provide a description.
+ typedef std::map<Sample, std::string> BucketDescriptionMap;
+ BucketDescriptionMap bucket_description_;
+
+ DISALLOW_COPY_AND_ASSIGN(LinearHistogram);
+};
+
+//------------------------------------------------------------------------------
+
+// BooleanHistogram is a histogram for booleans.
+class BooleanHistogram : public LinearHistogram {
+ public:
+ static scoped_refptr<Histogram> FactoryGet(const std::string& name,
+ Flags flags);
+
+ virtual ClassType histogram_type() const;
+
+ virtual void AddBoolean(bool value);
+
+ private:
+ explicit BooleanHistogram(const std::string& name);
+
+ DISALLOW_COPY_AND_ASSIGN(BooleanHistogram);
+};
+
+//------------------------------------------------------------------------------
+
+// CustomHistogram is a histogram for a set of custom integers.
+class CustomHistogram : public Histogram {
+ public:
+ virtual ClassType histogram_type() const;
+
+ static scoped_refptr<Histogram> FactoryGet(const std::string& name,
+ const std::vector<int>& custom_ranges, Flags flags);
+
+ protected:
+ CustomHistogram(const std::string& name,
+ const std::vector<int>& custom_ranges);
+
+ // Initialize ranges_ mapping.
+ virtual void InitializeBucketRange();
+ virtual double GetBucketSize(Count current, size_t i) const;
+
+ private:
+ // Temporary pointer used during construction/initialization, and then NULLed.
+ const std::vector<int>* ranges_vector_;
+
+ DISALLOW_COPY_AND_ASSIGN(CustomHistogram);
+};
+
+//------------------------------------------------------------------------------
+// StatisticsRecorder handles all histograms in the system. It provides a
+// general place for histograms to register, and supports a global API for
+// accessing (i.e., dumping, or graphing) the data in all the histograms.
+
+class StatisticsRecorder {
+ public:
+ typedef std::vector<scoped_refptr<Histogram> > Histograms;
+
+ StatisticsRecorder();
+
+ ~StatisticsRecorder();
+
+ // Find out if histograms can now be registered into our list.
+ static bool WasStarted();
+
+ // Register, or add a new histogram to the collection of statistics.
+ static void Register(Histogram* histogram);
+
+ // Methods for printing histograms. Only histograms which have query as
+ // a substring are written to output (an empty string will process all
+ // registered histograms).
+ static void WriteHTMLGraph(const std::string& query, std::string* output);
+ static void WriteGraph(const std::string& query, std::string* output);
+
+ // Method for extracting histograms which were marked for use by UMA.
+ static void GetHistograms(Histograms* output);
+
+ // Find a histogram by name. It matches the exact name. This method is thread
+ // safe. If a matching histogram is not found, then the |histogram| is
+ // not changed.
+ static bool FindHistogram(const std::string& query,
+ scoped_refptr<Histogram>* histogram);
+
+ static bool dump_on_exit() { return dump_on_exit_; }
+
+ static void set_dump_on_exit(bool enable) { dump_on_exit_ = enable; }
+
+ // GetSnapshot copies some of the pointers to registered histograms into the
+ // caller supplied vector (Histograms). Only histograms with names matching
+ // query are returned. The query must be a substring of histogram name for its
+ // pointer to be copied.
+ static void GetSnapshot(const std::string& query, Histograms* snapshot);
+
+
+ private:
+ // We keep all registered histograms in a map, from name to histogram.
+ typedef std::map<std::string, scoped_refptr<Histogram> > HistogramMap;
+
+ static HistogramMap* histograms_;
+
+ // lock protects access to the above map.
+ static Lock* lock_;
+
+ // Dump all known histograms to log.
+ static bool dump_on_exit_;
+
+ DISALLOW_COPY_AND_ASSIGN(StatisticsRecorder);
+};
+
+} // namespace base
+
+#endif // BASE_METRICS_HISTOGRAM_H_
diff --git a/base/metrics/histogram_unittest.cc b/base/metrics/histogram_unittest.cc
new file mode 100644
index 0000000..e7e3983
--- /dev/null
+++ b/base/metrics/histogram_unittest.cc
@@ -0,0 +1,311 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Test of Histogram class
+
+#include "base/metrics/histogram.h"
+#include "base/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace {
+
+class HistogramTest : public testing::Test {
+};
+
+// Check for basic syntax and use.
+TEST(HistogramTest, StartupShutdownTest) {
+ // Try basic construction
+ scoped_refptr<Histogram> histogram = Histogram::FactoryGet(
+ "TestHistogram", 1, 1000, 10, Histogram::kNoFlags);
+ scoped_refptr<Histogram> histogram1 = Histogram::FactoryGet(
+ "Test1Histogram", 1, 1000, 10, Histogram::kNoFlags);
+
+ scoped_refptr<Histogram> linear_histogram = LinearHistogram::FactoryGet(
+ "TestLinearHistogram", 1, 1000, 10, Histogram::kNoFlags);
+ scoped_refptr<Histogram> linear_histogram1 = LinearHistogram::FactoryGet(
+ "Test1LinearHistogram", 1, 1000, 10, Histogram::kNoFlags);
+
+ std::vector<int> custom_ranges;
+ custom_ranges.push_back(1);
+ custom_ranges.push_back(5);
+ custom_ranges.push_back(10);
+ custom_ranges.push_back(20);
+ custom_ranges.push_back(30);
+ scoped_refptr<Histogram> custom_histogram = CustomHistogram::FactoryGet(
+ "TestCustomHistogram", custom_ranges, Histogram::kNoFlags);
+ scoped_refptr<Histogram> custom_histogram1 = CustomHistogram::FactoryGet(
+ "Test1CustomHistogram", custom_ranges, Histogram::kNoFlags);
+
+ // Use standard macros (but with fixed samples)
+ HISTOGRAM_TIMES("Test2Histogram", TimeDelta::FromDays(1));
+ HISTOGRAM_COUNTS("Test3Histogram", 30);
+
+ DHISTOGRAM_TIMES("Test4Histogram", TimeDelta::FromDays(1));
+ DHISTOGRAM_COUNTS("Test5Histogram", 30);
+
+ HISTOGRAM_ENUMERATION("Test6Histogram", 129, 130);
+
+ // Try to construct samples.
+ Histogram::SampleSet sample1;
+ Histogram::SampleSet sample2;
+
+ // Use copy constructor of SampleSet
+ sample1 = sample2;
+ Histogram::SampleSet sample3(sample1);
+
+ // Finally test a statistics recorder, without really using it.
+ StatisticsRecorder recorder;
+}
+
+// Repeat with a recorder present to register with.
+TEST(HistogramTest, RecordedStartupTest) {
+ // Test a statistics recorder, by letting histograms register.
+ StatisticsRecorder recorder; // This initializes the global state.
+
+ StatisticsRecorder::Histograms histograms;
+ EXPECT_EQ(0U, histograms.size());
+ StatisticsRecorder::GetHistograms(&histograms); // Load up lists
+ EXPECT_EQ(0U, histograms.size());
+
+ // Try basic construction
+ scoped_refptr<Histogram> histogram = Histogram::FactoryGet(
+ "TestHistogram", 1, 1000, 10, Histogram::kNoFlags);
+ histograms.clear();
+ StatisticsRecorder::GetHistograms(&histograms); // Load up lists
+ EXPECT_EQ(1U, histograms.size());
+ scoped_refptr<Histogram> histogram1 = Histogram::FactoryGet(
+ "Test1Histogram", 1, 1000, 10, Histogram::kNoFlags);
+ histograms.clear();
+ StatisticsRecorder::GetHistograms(&histograms); // Load up lists
+ EXPECT_EQ(2U, histograms.size());
+
+ scoped_refptr<Histogram> linear_histogram = LinearHistogram::FactoryGet(
+ "TestLinearHistogram", 1, 1000, 10, Histogram::kNoFlags);
+ histograms.clear();
+ StatisticsRecorder::GetHistograms(&histograms); // Load up lists
+ EXPECT_EQ(3U, histograms.size());
+
+ scoped_refptr<Histogram> linear_histogram1 = LinearHistogram::FactoryGet(
+ "Test1LinearHistogram", 1, 1000, 10, Histogram::kNoFlags);
+ histograms.clear();
+ StatisticsRecorder::GetHistograms(&histograms); // Load up lists
+ EXPECT_EQ(4U, histograms.size());
+
+ std::vector<int> custom_ranges;
+ custom_ranges.push_back(1);
+ custom_ranges.push_back(5);
+ custom_ranges.push_back(10);
+ custom_ranges.push_back(20);
+ custom_ranges.push_back(30);
+ scoped_refptr<Histogram> custom_histogram = CustomHistogram::FactoryGet(
+ "TestCustomHistogram", custom_ranges, Histogram::kNoFlags);
+ scoped_refptr<Histogram> custom_histogram1 = CustomHistogram::FactoryGet(
+ "TestCustomHistogram", custom_ranges, Histogram::kNoFlags);
+
+ histograms.clear();
+ StatisticsRecorder::GetHistograms(&histograms); // Load up lists
+ EXPECT_EQ(5U, histograms.size());
+
+ // Use standard macros (but with fixed samples)
+ HISTOGRAM_TIMES("Test2Histogram", TimeDelta::FromDays(1));
+ HISTOGRAM_COUNTS("Test3Histogram", 30);
+ histograms.clear();
+ StatisticsRecorder::GetHistograms(&histograms); // Load up lists
+ EXPECT_EQ(7U, histograms.size());
+
+ HISTOGRAM_ENUMERATION("TestEnumerationHistogram", 20, 200);
+ histograms.clear();
+ StatisticsRecorder::GetHistograms(&histograms); // Load up lists
+ EXPECT_EQ(8U, histograms.size());
+
+ DHISTOGRAM_TIMES("Test4Histogram", TimeDelta::FromDays(1));
+ DHISTOGRAM_COUNTS("Test5Histogram", 30);
+ histograms.clear();
+ StatisticsRecorder::GetHistograms(&histograms); // Load up lists
+#ifndef NDEBUG
+ EXPECT_EQ(10U, histograms.size());
+#else
+ EXPECT_EQ(8U, histograms.size());
+#endif
+}
+
+TEST(HistogramTest, RangeTest) {
+ StatisticsRecorder recorder;
+ StatisticsRecorder::Histograms histograms;
+
+ recorder.GetHistograms(&histograms);
+ EXPECT_EQ(0U, histograms.size());
+
+ scoped_refptr<Histogram> histogram = Histogram::FactoryGet(
+ "Histogram", 1, 64, 8, Histogram::kNoFlags); // As per header file.
+ // Check that we got a nice exponential when there was enough rooom.
+ EXPECT_EQ(0, histogram->ranges(0));
+ int power_of_2 = 1;
+ for (int i = 1; i < 8; i++) {
+ EXPECT_EQ(power_of_2, histogram->ranges(i));
+ power_of_2 *= 2;
+ }
+ EXPECT_EQ(INT_MAX, histogram->ranges(8));
+
+ scoped_refptr<Histogram> short_histogram = Histogram::FactoryGet(
+ "Histogram Shortened", 1, 7, 8, Histogram::kNoFlags);
+ // Check that when the number of buckets is short, we get a linear histogram
+ // for lack of space to do otherwise.
+ for (int i = 0; i < 8; i++)
+ EXPECT_EQ(i, short_histogram->ranges(i));
+ EXPECT_EQ(INT_MAX, short_histogram->ranges(8));
+
+ scoped_refptr<Histogram> linear_histogram = LinearHistogram::FactoryGet(
+ "Linear", 1, 7, 8, Histogram::kNoFlags);
+ // We also get a nice linear set of bucket ranges when we ask for it
+ for (int i = 0; i < 8; i++)
+ EXPECT_EQ(i, linear_histogram->ranges(i));
+ EXPECT_EQ(INT_MAX, linear_histogram->ranges(8));
+
+ scoped_refptr<Histogram> linear_broad_histogram = LinearHistogram::FactoryGet(
+ "Linear widened", 2, 14, 8, Histogram::kNoFlags);
+ // ...but when the list has more space, then the ranges naturally spread out.
+ for (int i = 0; i < 8; i++)
+ EXPECT_EQ(2 * i, linear_broad_histogram->ranges(i));
+ EXPECT_EQ(INT_MAX, linear_broad_histogram->ranges(8));
+
+ scoped_refptr<Histogram> transitioning_histogram =
+ Histogram::FactoryGet("LinearAndExponential", 1, 32, 15,
+ Histogram::kNoFlags);
+ // When space is a little tight, we transition from linear to exponential.
+ EXPECT_EQ(0, transitioning_histogram->ranges(0));
+ EXPECT_EQ(1, transitioning_histogram->ranges(1));
+ EXPECT_EQ(2, transitioning_histogram->ranges(2));
+ EXPECT_EQ(3, transitioning_histogram->ranges(3));
+ EXPECT_EQ(4, transitioning_histogram->ranges(4));
+ EXPECT_EQ(5, transitioning_histogram->ranges(5));
+ EXPECT_EQ(6, transitioning_histogram->ranges(6));
+ EXPECT_EQ(7, transitioning_histogram->ranges(7));
+ EXPECT_EQ(9, transitioning_histogram->ranges(8));
+ EXPECT_EQ(11, transitioning_histogram->ranges(9));
+ EXPECT_EQ(14, transitioning_histogram->ranges(10));
+ EXPECT_EQ(17, transitioning_histogram->ranges(11));
+ EXPECT_EQ(21, transitioning_histogram->ranges(12));
+ EXPECT_EQ(26, transitioning_histogram->ranges(13));
+ EXPECT_EQ(32, transitioning_histogram->ranges(14));
+ EXPECT_EQ(INT_MAX, transitioning_histogram->ranges(15));
+
+ std::vector<int> custom_ranges;
+ custom_ranges.push_back(0);
+ custom_ranges.push_back(9);
+ custom_ranges.push_back(10);
+ custom_ranges.push_back(11);
+ custom_ranges.push_back(300);
+ scoped_refptr<Histogram> test_custom_histogram = CustomHistogram::FactoryGet(
+ "TestCustomRangeHistogram", custom_ranges, Histogram::kNoFlags);
+
+ EXPECT_EQ(custom_ranges[0], test_custom_histogram->ranges(0));
+ EXPECT_EQ(custom_ranges[1], test_custom_histogram->ranges(1));
+ EXPECT_EQ(custom_ranges[2], test_custom_histogram->ranges(2));
+ EXPECT_EQ(custom_ranges[3], test_custom_histogram->ranges(3));
+ EXPECT_EQ(custom_ranges[4], test_custom_histogram->ranges(4));
+
+ recorder.GetHistograms(&histograms);
+ EXPECT_EQ(6U, histograms.size());
+}
+
+TEST(HistogramTest, CustomRangeTest) {
+ StatisticsRecorder recorder;
+ StatisticsRecorder::Histograms histograms;
+
+ // Check that missing leading zero is handled by an auto-insertion.
+ std::vector<int> custom_ranges;
+ // Don't include a zero.
+ custom_ranges.push_back(9);
+ custom_ranges.push_back(10);
+ custom_ranges.push_back(11);
+ scoped_refptr<Histogram> test_custom_histogram = CustomHistogram::FactoryGet(
+ "TestCustomRangeHistogram", custom_ranges, Histogram::kNoFlags);
+
+ EXPECT_EQ(0, test_custom_histogram->ranges(0)); // Auto added
+ EXPECT_EQ(custom_ranges[0], test_custom_histogram->ranges(1));
+ EXPECT_EQ(custom_ranges[1], test_custom_histogram->ranges(2));
+ EXPECT_EQ(custom_ranges[2], test_custom_histogram->ranges(3));
+
+ // Check that unsorted data with dups is handled gracefully.
+ const int kSmall = 7;
+ const int kMid = 8;
+ const int kBig = 9;
+ custom_ranges.clear();
+ custom_ranges.push_back(kBig);
+ custom_ranges.push_back(kMid);
+ custom_ranges.push_back(kSmall);
+ custom_ranges.push_back(kSmall);
+ custom_ranges.push_back(kMid);
+ custom_ranges.push_back(0); // Push an explicit zero.
+ custom_ranges.push_back(kBig);
+
+ scoped_refptr<Histogram> unsorted_histogram = CustomHistogram::FactoryGet(
+ "TestCustomUnsortedDupedHistogram", custom_ranges, Histogram::kNoFlags);
+ EXPECT_EQ(0, unsorted_histogram->ranges(0));
+ EXPECT_EQ(kSmall, unsorted_histogram->ranges(1));
+ EXPECT_EQ(kMid, unsorted_histogram->ranges(2));
+ EXPECT_EQ(kBig, unsorted_histogram->ranges(3));
+}
+
+
+// Make sure histogram handles out-of-bounds data gracefully.
+TEST(HistogramTest, BoundsTest) {
+ const size_t kBucketCount = 50;
+ scoped_refptr<Histogram> histogram = Histogram::FactoryGet(
+ "Bounded", 10, 100, kBucketCount, Histogram::kNoFlags);
+
+ // Put two samples "out of bounds" above and below.
+ histogram->Add(5);
+ histogram->Add(-50);
+
+ histogram->Add(100);
+ histogram->Add(10000);
+
+ // Verify they landed in the underflow, and overflow buckets.
+ Histogram::SampleSet sample;
+ histogram->SnapshotSample(&sample);
+ EXPECT_EQ(2, sample.counts(0));
+ EXPECT_EQ(0, sample.counts(1));
+ size_t array_size = histogram->bucket_count();
+ EXPECT_EQ(kBucketCount, array_size);
+ EXPECT_EQ(0, sample.counts(array_size - 2));
+ EXPECT_EQ(2, sample.counts(array_size - 1));
+}
+
+// Check to be sure samples land as expected is "correct" buckets.
+TEST(HistogramTest, BucketPlacementTest) {
+ scoped_refptr<Histogram> histogram = Histogram::FactoryGet(
+ "Histogram", 1, 64, 8, Histogram::kNoFlags); // As per header file.
+
+ // Check that we got a nice exponential since there was enough rooom.
+ EXPECT_EQ(0, histogram->ranges(0));
+ int power_of_2 = 1;
+ for (int i = 1; i < 8; i++) {
+ EXPECT_EQ(power_of_2, histogram->ranges(i));
+ power_of_2 *= 2;
+ }
+ EXPECT_EQ(INT_MAX, histogram->ranges(8));
+
+ // Add i+1 samples to the i'th bucket.
+ histogram->Add(0);
+ power_of_2 = 1;
+ for (int i = 1; i < 8; i++) {
+ for (int j = 0; j <= i; j++)
+ histogram->Add(power_of_2);
+ power_of_2 *= 2;
+ }
+ // Leave overflow bucket empty.
+
+ // Check to see that the bucket counts reflect our additions.
+ Histogram::SampleSet sample;
+ histogram->SnapshotSample(&sample);
+ EXPECT_EQ(INT_MAX, histogram->ranges(8));
+ for (int i = 0; i < 8; i++)
+ EXPECT_EQ(i + 1, sample.counts(i));
+}
+
+} // namespace
+} // namespace base
diff --git a/base/metrics/stats_counters.cc b/base/metrics/stats_counters.cc
new file mode 100644
index 0000000..958f048
--- /dev/null
+++ b/base/metrics/stats_counters.cc
@@ -0,0 +1,117 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/stats_counters.h"
+
+namespace base {
+
+StatsCounter::StatsCounter(const std::string& name)
+ : counter_id_(-1) {
+ // We prepend the name with 'c:' to indicate that it is a counter.
+ name_ = "c:";
+ name_.append(name);
+}
+
+StatsCounter::~StatsCounter() {
+}
+
+void StatsCounter::Set(int value) {
+ int* loc = GetPtr();
+ if (loc)
+ *loc = value;
+}
+
+void StatsCounter::Add(int value) {
+ int* loc = GetPtr();
+ if (loc)
+ (*loc) += value;
+}
+
+StatsCounter::StatsCounter()
+ : counter_id_(-1) {
+}
+
+int* StatsCounter::GetPtr() {
+ StatsTable* table = StatsTable::current();
+ if (!table)
+ return NULL;
+
+ // If counter_id_ is -1, then we haven't looked it up yet.
+ if (counter_id_ == -1) {
+ counter_id_ = table->FindCounter(name_);
+ if (table->GetSlot() == 0) {
+ if (!table->RegisterThread("")) {
+ // There is no room for this thread. This thread
+ // cannot use counters.
+ counter_id_ = 0;
+ return NULL;
+ }
+ }
+ }
+
+ // If counter_id_ is > 0, then we have a valid counter.
+ if (counter_id_ > 0)
+ return table->GetLocation(counter_id_, table->GetSlot());
+
+ // counter_id_ was zero, which means the table is full.
+ return NULL;
+}
+
+
+StatsCounterTimer::StatsCounterTimer(const std::string& name) {
+ // we prepend the name with 't:' to indicate that it is a timer.
+ name_ = "t:";
+ name_.append(name);
+}
+
+StatsCounterTimer::~StatsCounterTimer() {
+}
+
+void StatsCounterTimer::Start() {
+ if (!Enabled())
+ return;
+ start_time_ = TimeTicks::Now();
+ stop_time_ = TimeTicks();
+}
+
+// Stop the timer and record the results.
+void StatsCounterTimer::Stop() {
+ if (!Enabled() || !Running())
+ return;
+ stop_time_ = TimeTicks::Now();
+ Record();
+}
+
+// Returns true if the timer is running.
+bool StatsCounterTimer::Running() {
+ return Enabled() && !start_time_.is_null() && stop_time_.is_null();
+}
+
+// Accept a TimeDelta to increment.
+void StatsCounterTimer::AddTime(TimeDelta time) {
+ Add(static_cast<int>(time.InMilliseconds()));
+}
+
+void StatsCounterTimer::Record() {
+ AddTime(stop_time_ - start_time_);
+}
+
+
+StatsRate::StatsRate(const std::string& name)
+ : StatsCounterTimer(name),
+ counter_(name),
+ largest_add_(std::string(" ").append(name).append("MAX")) {
+}
+
+StatsRate::~StatsRate() {
+}
+
+void StatsRate::Add(int value) {
+ counter_.Increment();
+ StatsCounterTimer::Add(value);
+ if (value > largest_add_.value())
+ largest_add_.Set(value);
+}
+
+} // namespace base
diff --git a/base/metrics/stats_counters.h b/base/metrics/stats_counters.h
new file mode 100644
index 0000000..2de2b73
--- /dev/null
+++ b/base/metrics/stats_counters.h
@@ -0,0 +1,196 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_STATS_COUNTERS_H_
+#define BASE_METRICS_STATS_COUNTERS_H_
+#pragma once
+
+#include <string>
+
+#include "base/metrics/stats_table.h"
+#include "base/time.h"
+
+namespace base {
+
+// StatsCounters are dynamically created values which can be tracked in
+// the StatsTable. They are designed to be lightweight to create and
+// easy to use.
+//
+// Since StatsCounters can be created dynamically by name, there is
+// a hash table lookup to find the counter in the table. A StatsCounter
+// object can be created once and used across multiple threads safely.
+//
+// Example usage:
+// {
+// StatsCounter request_count("RequestCount");
+// request_count.Increment();
+// }
+//
+// Note that creating counters on the stack does work, however creating
+// the counter object requires a hash table lookup. For inner loops, it
+// may be better to create the counter either as a member of another object
+// (or otherwise outside of the loop) for maximum performance.
+//
+// Internally, a counter represents a value in a row of a StatsTable.
+// The row has a 32bit value for each process/thread in the table and also
+// a name (stored in the table metadata).
+//
+// NOTE: In order to make stats_counters usable in lots of different code,
+// avoid any dependencies inside this header file.
+//
+
+//------------------------------------------------------------------------------
+// Define macros for ease of use. They also allow us to change definitions
+// as the implementation varies, or depending on compile options.
+//------------------------------------------------------------------------------
+// First provide generic macros, which exist in production as well as debug.
+#define STATS_COUNTER(name, delta) do { \
+ static base::StatsCounter counter(name); \
+ counter.Add(delta); \
+} while (0)
+
+#define SIMPLE_STATS_COUNTER(name) STATS_COUNTER(name, 1)
+
+#define RATE_COUNTER(name, duration) do { \
+ static base::StatsRate hit_count(name); \
+ hit_count.AddTime(duration); \
+} while (0)
+
+// Define Debug vs non-debug flavors of macros.
+#ifndef NDEBUG
+
+#define DSTATS_COUNTER(name, delta) STATS_COUNTER(name, delta)
+#define DSIMPLE_STATS_COUNTER(name) SIMPLE_STATS_COUNTER(name)
+#define DRATE_COUNTER(name, duration) RATE_COUNTER(name, duration)
+
+#else // NDEBUG
+
+#define DSTATS_COUNTER(name, delta) do {} while (0)
+#define DSIMPLE_STATS_COUNTER(name) do {} while (0)
+#define DRATE_COUNTER(name, duration) do {} while (0)
+
+#endif // NDEBUG
+
+//------------------------------------------------------------------------------
+// StatsCounter represents a counter in the StatsTable class.
+class StatsCounter {
+ public:
+ // Create a StatsCounter object.
+ explicit StatsCounter(const std::string& name);
+ virtual ~StatsCounter();
+
+ // Sets the counter to a specific value.
+ void Set(int value);
+
+ // Increments the counter.
+ void Increment() {
+ Add(1);
+ }
+
+ virtual void Add(int value);
+
+ // Decrements the counter.
+ void Decrement() {
+ Add(-1);
+ }
+
+ void Subtract(int value) {
+ Add(-value);
+ }
+
+ // Is this counter enabled?
+ // Returns false if table is full.
+ bool Enabled() {
+ return GetPtr() != NULL;
+ }
+
+ int value() {
+ int* loc = GetPtr();
+ if (loc) return *loc;
+ return 0;
+ }
+
+ protected:
+ StatsCounter();
+
+ // Returns the cached address of this counter location.
+ int* GetPtr();
+
+ std::string name_;
+ // The counter id in the table. We initialize to -1 (an invalid value)
+ // and then cache it once it has been looked up. The counter_id is
+ // valid across all threads and processes.
+ int32 counter_id_;
+};
+
+
+// A StatsCounterTimer is a StatsCounter which keeps a timer during
+// the scope of the StatsCounterTimer. On destruction, it will record
+// its time measurement.
+class StatsCounterTimer : protected StatsCounter {
+ public:
+ // Constructs and starts the timer.
+ explicit StatsCounterTimer(const std::string& name);
+ virtual ~StatsCounterTimer();
+
+ // Start the timer.
+ void Start();
+
+ // Stop the timer and record the results.
+ void Stop();
+
+ // Returns true if the timer is running.
+ bool Running();
+
+ // Accept a TimeDelta to increment.
+ virtual void AddTime(TimeDelta time);
+
+ protected:
+ // Compute the delta between start and stop, in milliseconds.
+ void Record();
+
+ TimeTicks start_time_;
+ TimeTicks stop_time_;
+};
+
+// A StatsRate is a timer that keeps a count of the number of intervals added so
+// that several statistics can be produced:
+// min, max, avg, count, total
+class StatsRate : public StatsCounterTimer {
+ public:
+ // Constructs and starts the timer.
+ explicit StatsRate(const std::string& name);
+ virtual ~StatsRate();
+
+ virtual void Add(int value);
+
+ private:
+ StatsCounter counter_;
+ StatsCounter largest_add_;
+};
+
+
+// Helper class for scoping a timer or rate.
+template<class T> class StatsScope {
+ public:
+ explicit StatsScope<T>(T& timer)
+ : timer_(timer) {
+ timer_.Start();
+ }
+
+ ~StatsScope() {
+ timer_.Stop();
+ }
+
+ void Stop() {
+ timer_.Stop();
+ }
+
+ private:
+ T& timer_;
+};
+
+} // namespace base
+
+#endif // BASE_METRICS_STATS_COUNTERS_H_
diff --git a/base/metrics/stats_table.cc b/base/metrics/stats_table.cc
new file mode 100644
index 0000000..98b2f57
--- /dev/null
+++ b/base/metrics/stats_table.cc
@@ -0,0 +1,553 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/stats_table.h"
+
+#include "base/logging.h"
+#include "base/platform_thread.h"
+#include "base/process_util.h"
+#include "base/scoped_ptr.h"
+#include "base/shared_memory.h"
+#include "base/string_piece.h"
+#include "base/string_util.h"
+#include "base/thread_local_storage.h"
+#include "base/utf_string_conversions.h"
+
+#if defined(OS_POSIX)
+#include "errno.h"
+#endif
+
+namespace base {
+
+// The StatsTable uses a shared memory segment that is laid out as follows
+//
+// +-------------------------------------------+
+// | Version | Size | MaxCounters | MaxThreads |
+// +-------------------------------------------+
+// | Thread names table |
+// +-------------------------------------------+
+// | Thread TID table |
+// +-------------------------------------------+
+// | Thread PID table |
+// +-------------------------------------------+
+// | Counter names table |
+// +-------------------------------------------+
+// | Data |
+// +-------------------------------------------+
+//
+// The data layout is a grid, where the columns are the thread_ids and the
+// rows are the counter_ids.
+//
+// If the first character of the thread_name is '\0', then that column is
+// empty.
+// If the first character of the counter_name is '\0', then that row is
+// empty.
+//
+// About Locking:
+// This class is designed to be both multi-thread and multi-process safe.
+// Aside from initialization, this is done by partitioning the data which
+// each thread uses so that no locking is required. However, to allocate
+// the rows and columns of the table to particular threads, locking is
+// required.
+//
+// At the shared-memory level, we have a lock. This lock protects the
+// shared-memory table only, and is used when we create new counters (e.g.
+// use rows) or when we register new threads (e.g. use columns). Reading
+// data from the table does not require any locking at the shared memory
+// level.
+//
+// Each process which accesses the table will create a StatsTable object.
+// The StatsTable maintains a hash table of the existing counters in the
+// table for faster lookup. Since the hash table is process specific,
+// each process maintains its own cache. We avoid complexity here by never
+// de-allocating from the hash table. (Counters are dynamically added,
+// but not dynamically removed).
+
+// In order for external viewers to be able to read our shared memory,
+// we all need to use the same size ints.
+COMPILE_ASSERT(sizeof(int)==4, expect_4_byte_ints);
+
+namespace {
+
+// An internal version in case we ever change the format of this
+// file, and so that we can identify our table.
+const int kTableVersion = 0x13131313;
+
+// The name for un-named counters and threads in the table.
+const char kUnknownName[] = "<unknown>";
+
+// Calculates delta to align an offset to the size of an int
+inline int AlignOffset(int offset) {
+ return (sizeof(int) - (offset % sizeof(int))) % sizeof(int);
+}
+
+inline int AlignedSize(int size) {
+ return size + AlignOffset(size);
+}
+
+} // namespace
+
+// The StatsTable::Private maintains convenience pointers into the
+// shared memory segment. Use this class to keep the data structure
+// clean and accessible.
+class StatsTable::Private {
+ public:
+ // Various header information contained in the memory mapped segment.
+ struct TableHeader {
+ int version;
+ int size;
+ int max_counters;
+ int max_threads;
+ };
+
+ // Construct a new Private based on expected size parameters, or
+ // return NULL on failure.
+ static Private* New(const std::string& name, int size,
+ int max_threads, int max_counters);
+
+ SharedMemory* shared_memory() { return &shared_memory_; }
+
+ // Accessors for our header pointers
+ TableHeader* table_header() const { return table_header_; }
+ int version() const { return table_header_->version; }
+ int size() const { return table_header_->size; }
+ int max_counters() const { return table_header_->max_counters; }
+ int max_threads() const { return table_header_->max_threads; }
+
+ // Accessors for our tables
+ char* thread_name(int slot_id) const {
+ return &thread_names_table_[
+ (slot_id-1) * (StatsTable::kMaxThreadNameLength)];
+ }
+ PlatformThreadId* thread_tid(int slot_id) const {
+ return &(thread_tid_table_[slot_id-1]);
+ }
+ int* thread_pid(int slot_id) const {
+ return &(thread_pid_table_[slot_id-1]);
+ }
+ char* counter_name(int counter_id) const {
+ return &counter_names_table_[
+ (counter_id-1) * (StatsTable::kMaxCounterNameLength)];
+ }
+ int* row(int counter_id) const {
+ return &data_table_[(counter_id-1) * max_threads()];
+ }
+
+ private:
+ // Constructor is private because you should use New() instead.
+ Private() {}
+
+ // Initializes the table on first access. Sets header values
+ // appropriately and zeroes all counters.
+ void InitializeTable(void* memory, int size, int max_counters,
+ int max_threads);
+
+ // Initializes our in-memory pointers into a pre-created StatsTable.
+ void ComputeMappedPointers(void* memory);
+
+ SharedMemory shared_memory_;
+ TableHeader* table_header_;
+ char* thread_names_table_;
+ PlatformThreadId* thread_tid_table_;
+ int* thread_pid_table_;
+ char* counter_names_table_;
+ int* data_table_;
+};
+
+// static
+StatsTable::Private* StatsTable::Private::New(const std::string& name,
+ int size,
+ int max_threads,
+ int max_counters) {
+ scoped_ptr<Private> priv(new Private());
+ if (!priv->shared_memory_.Create(name, false, true, size))
+ return NULL;
+ if (!priv->shared_memory_.Map(size))
+ return NULL;
+ void* memory = priv->shared_memory_.memory();
+
+ TableHeader* header = static_cast<TableHeader*>(memory);
+
+ // If the version does not match, then assume the table needs
+ // to be initialized.
+ if (header->version != kTableVersion)
+ priv->InitializeTable(memory, size, max_counters, max_threads);
+
+ // We have a valid table, so compute our pointers.
+ priv->ComputeMappedPointers(memory);
+
+ return priv.release();
+}
+
+void StatsTable::Private::InitializeTable(void* memory, int size,
+ int max_counters,
+ int max_threads) {
+ // Zero everything.
+ memset(memory, 0, size);
+
+ // Initialize the header.
+ TableHeader* header = static_cast<TableHeader*>(memory);
+ header->version = kTableVersion;
+ header->size = size;
+ header->max_counters = max_counters;
+ header->max_threads = max_threads;
+}
+
+void StatsTable::Private::ComputeMappedPointers(void* memory) {
+ char* data = static_cast<char*>(memory);
+ int offset = 0;
+
+ table_header_ = reinterpret_cast<TableHeader*>(data);
+ offset += sizeof(*table_header_);
+ offset += AlignOffset(offset);
+
+ // Verify we're looking at a valid StatsTable.
+ DCHECK_EQ(table_header_->version, kTableVersion);
+
+ thread_names_table_ = reinterpret_cast<char*>(data + offset);
+ offset += sizeof(char) *
+ max_threads() * StatsTable::kMaxThreadNameLength;
+ offset += AlignOffset(offset);
+
+ thread_tid_table_ = reinterpret_cast<PlatformThreadId*>(data + offset);
+ offset += sizeof(int) * max_threads();
+ offset += AlignOffset(offset);
+
+ thread_pid_table_ = reinterpret_cast<int*>(data + offset);
+ offset += sizeof(int) * max_threads();
+ offset += AlignOffset(offset);
+
+ counter_names_table_ = reinterpret_cast<char*>(data + offset);
+ offset += sizeof(char) *
+ max_counters() * StatsTable::kMaxCounterNameLength;
+ offset += AlignOffset(offset);
+
+ data_table_ = reinterpret_cast<int*>(data + offset);
+ offset += sizeof(int) * max_threads() * max_counters();
+
+ DCHECK_EQ(offset, size());
+}
+
+// TLSData carries the data stored in the TLS slots for the
+// StatsTable. This is used so that we can properly cleanup when the
+// thread exits and return the table slot.
+//
+// Each thread that calls RegisterThread in the StatsTable will have
+// a TLSData stored in its TLS.
+struct StatsTable::TLSData {
+ StatsTable* table;
+ int slot;
+};
+
+// We keep a singleton table which can be easily accessed.
+StatsTable* StatsTable::global_table_ = NULL;
+
+StatsTable::StatsTable(const std::string& name, int max_threads,
+ int max_counters)
+ : impl_(NULL),
+ tls_index_(SlotReturnFunction) {
+ int table_size =
+ AlignedSize(sizeof(Private::TableHeader)) +
+ AlignedSize((max_counters * sizeof(char) * kMaxCounterNameLength)) +
+ AlignedSize((max_threads * sizeof(char) * kMaxThreadNameLength)) +
+ AlignedSize(max_threads * sizeof(int)) +
+ AlignedSize(max_threads * sizeof(int)) +
+ AlignedSize((sizeof(int) * (max_counters * max_threads)));
+
+ impl_ = Private::New(name, table_size, max_threads, max_counters);
+
+ if (!impl_)
+ PLOG(ERROR) << "StatsTable did not initialize";
+}
+
+StatsTable::~StatsTable() {
+ // Before we tear down our copy of the table, be sure to
+ // unregister our thread.
+ UnregisterThread();
+
+ // Return ThreadLocalStorage. At this point, if any registered threads
+ // still exist, they cannot Unregister.
+ tls_index_.Free();
+
+ // Cleanup our shared memory.
+ delete impl_;
+
+ // If we are the global table, unregister ourselves.
+ if (global_table_ == this)
+ global_table_ = NULL;
+}
+
+int StatsTable::RegisterThread(const std::string& name) {
+ int slot = 0;
+ if (!impl_)
+ return 0;
+
+ // Registering a thread requires that we lock the shared memory
+ // so that two threads don't grab the same slot. Fortunately,
+ // thread creation shouldn't happen in inner loops.
+ {
+ SharedMemoryAutoLock lock(impl_->shared_memory());
+ slot = FindEmptyThread();
+ if (!slot) {
+ return 0;
+ }
+
+ // We have space, so consume a column in the table.
+ std::string thread_name = name;
+ if (name.empty())
+ thread_name = kUnknownName;
+ strlcpy(impl_->thread_name(slot), thread_name.c_str(),
+ kMaxThreadNameLength);
+ *(impl_->thread_tid(slot)) = PlatformThread::CurrentId();
+ *(impl_->thread_pid(slot)) = GetCurrentProcId();
+ }
+
+ // Set our thread local storage.
+ TLSData* data = new TLSData;
+ data->table = this;
+ data->slot = slot;
+ tls_index_.Set(data);
+ return slot;
+}
+
+StatsTable::TLSData* StatsTable::GetTLSData() const {
+ TLSData* data =
+ static_cast<TLSData*>(tls_index_.Get());
+ if (!data)
+ return NULL;
+
+ DCHECK(data->slot);
+ DCHECK_EQ(data->table, this);
+ return data;
+}
+
+void StatsTable::UnregisterThread() {
+ UnregisterThread(GetTLSData());
+}
+
+void StatsTable::UnregisterThread(TLSData* data) {
+ if (!data)
+ return;
+ DCHECK(impl_);
+
+ // Mark the slot free by zeroing out the thread name.
+ char* name = impl_->thread_name(data->slot);
+ *name = '\0';
+
+ // Remove the calling thread's TLS so that it cannot use the slot.
+ tls_index_.Set(NULL);
+ delete data;
+}
+
+void StatsTable::SlotReturnFunction(void* data) {
+ // This is called by the TLS destructor, which on some platforms has
+ // already cleared the TLS info, so use the tls_data argument
+ // rather than trying to fetch it ourselves.
+ TLSData* tls_data = static_cast<TLSData*>(data);
+ if (tls_data) {
+ DCHECK(tls_data->table);
+ tls_data->table->UnregisterThread(tls_data);
+ }
+}
+
+int StatsTable::CountThreadsRegistered() const {
+ if (!impl_)
+ return 0;
+
+ // Loop through the shared memory and count the threads that are active.
+ // We intentionally do not lock the table during the operation.
+ int count = 0;
+ for (int index = 1; index <= impl_->max_threads(); index++) {
+ char* name = impl_->thread_name(index);
+ if (*name != '\0')
+ count++;
+ }
+ return count;
+}
+
+int StatsTable::GetSlot() const {
+ TLSData* data = GetTLSData();
+ if (!data)
+ return 0;
+ return data->slot;
+}
+
+int StatsTable::FindEmptyThread() const {
+ // Note: the API returns slots numbered from 1..N, although
+ // internally, the array is 0..N-1. This is so that we can return
+ // zero as "not found".
+ //
+ // The reason for doing this is because the thread 'slot' is stored
+ // in TLS, which is always initialized to zero, not -1. If 0 were
+ // returned as a valid slot number, it would be confused with the
+ // uninitialized state.
+ if (!impl_)
+ return 0;
+
+ int index = 1;
+ for (; index <= impl_->max_threads(); index++) {
+ char* name = impl_->thread_name(index);
+ if (!*name)
+ break;
+ }
+ if (index > impl_->max_threads())
+ return 0; // The table is full.
+ return index;
+}
+
+int StatsTable::FindCounterOrEmptyRow(const std::string& name) const {
+ // Note: the API returns slots numbered from 1..N, although
+ // internally, the array is 0..N-1. This is so that we can return
+ // zero as "not found".
+ //
+ // There isn't much reason for this other than to be consistent
+ // with the way we track columns for thread slots. (See comments
+ // in FindEmptyThread for why it is done this way).
+ if (!impl_)
+ return 0;
+
+ int free_slot = 0;
+ for (int index = 1; index <= impl_->max_counters(); index++) {
+ char* row_name = impl_->counter_name(index);
+ if (!*row_name && !free_slot)
+ free_slot = index; // save that we found a free slot
+ else if (!strncmp(row_name, name.c_str(), kMaxCounterNameLength))
+ return index;
+ }
+ return free_slot;
+}
+
+int StatsTable::FindCounter(const std::string& name) {
+ // Note: the API returns counters numbered from 1..N, although
+ // internally, the array is 0..N-1. This is so that we can return
+ // zero as "not found".
+ if (!impl_)
+ return 0;
+
+ // Create a scope for our auto-lock.
+ {
+ AutoLock scoped_lock(counters_lock_);
+
+ // Attempt to find the counter.
+ CountersMap::const_iterator iter;
+ iter = counters_.find(name);
+ if (iter != counters_.end())
+ return iter->second;
+ }
+
+ // Counter does not exist, so add it.
+ return AddCounter(name);
+}
+
+int StatsTable::AddCounter(const std::string& name) {
+ if (!impl_)
+ return 0;
+
+ int counter_id = 0;
+ {
+ // To add a counter to the shared memory, we need the
+ // shared memory lock.
+ SharedMemoryAutoLock lock(impl_->shared_memory());
+
+ // We have space, so create a new counter.
+ counter_id = FindCounterOrEmptyRow(name);
+ if (!counter_id)
+ return 0;
+
+ std::string counter_name = name;
+ if (name.empty())
+ counter_name = kUnknownName;
+ strlcpy(impl_->counter_name(counter_id), counter_name.c_str(),
+ kMaxCounterNameLength);
+ }
+
+ // now add to our in-memory cache
+ {
+ AutoLock lock(counters_lock_);
+ counters_[name] = counter_id;
+ }
+ return counter_id;
+}
+
+int* StatsTable::GetLocation(int counter_id, int slot_id) const {
+ if (!impl_)
+ return NULL;
+ if (slot_id > impl_->max_threads())
+ return NULL;
+
+ int* row = impl_->row(counter_id);
+ return &(row[slot_id-1]);
+}
+
+const char* StatsTable::GetRowName(int index) const {
+ if (!impl_)
+ return NULL;
+
+ return impl_->counter_name(index);
+}
+
+int StatsTable::GetRowValue(int index, int pid) const {
+ if (!impl_)
+ return 0;
+
+ int rv = 0;
+ int* row = impl_->row(index);
+ for (int slot_id = 0; slot_id < impl_->max_threads(); slot_id++) {
+ if (pid == 0 || *impl_->thread_pid(slot_id) == pid)
+ rv += row[slot_id];
+ }
+ return rv;
+}
+
+int StatsTable::GetRowValue(int index) const {
+ return GetRowValue(index, 0);
+}
+
+int StatsTable::GetCounterValue(const std::string& name, int pid) {
+ if (!impl_)
+ return 0;
+
+ int row = FindCounter(name);
+ if (!row)
+ return 0;
+ return GetRowValue(row, pid);
+}
+
+int StatsTable::GetCounterValue(const std::string& name) {
+ return GetCounterValue(name, 0);
+}
+
+int StatsTable::GetMaxCounters() const {
+ if (!impl_)
+ return 0;
+ return impl_->max_counters();
+}
+
+int StatsTable::GetMaxThreads() const {
+ if (!impl_)
+ return 0;
+ return impl_->max_threads();
+}
+
+int* StatsTable::FindLocation(const char* name) {
+ // Get the static StatsTable
+ StatsTable *table = StatsTable::current();
+ if (!table)
+ return NULL;
+
+ // Get the slot for this thread. Try to register
+ // it if none exists.
+ int slot = table->GetSlot();
+ if (!slot && !(slot = table->RegisterThread("")))
+ return NULL;
+
+ // Find the counter id for the counter.
+ std::string str_name(name);
+ int counter = table->FindCounter(str_name);
+
+ // Now we can find the location in the table.
+ return table->GetLocation(counter, slot);
+}
+
+} // namespace base
diff --git a/base/metrics/stats_table.h b/base/metrics/stats_table.h
new file mode 100644
index 0000000..e83039c
--- /dev/null
+++ b/base/metrics/stats_table.h
@@ -0,0 +1,196 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// A StatsTable is a table of statistics. It can be used across multiple
+// processes and threads, maintaining cheap statistics counters without
+// locking.
+//
+// The goal is to make it very cheap and easy for developers to add
+// counters to code, without having to build one-off utilities or mechanisms
+// to track the counters, and also to allow a single "view" to display
+// the contents of all counters.
+//
+// To achieve this, StatsTable creates a shared memory segment to store
+// the data for the counters. Upon creation, it has a specific size
+// which governs the maximum number of counters and concurrent
+// threads/processes which can use it.
+//
+
+#ifndef BASE_METRICS_STATS_TABLE_H_
+#define BASE_METRICS_STATS_TABLE_H_
+#pragma once
+
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/hash_tables.h"
+#include "base/lock.h"
+#include "base/thread_local_storage.h"
+
+namespace base {
+
+class StatsTable {
+ public:
+ // Create a new StatsTable.
+ // If a StatsTable already exists with the specified name, this StatsTable
+ // will use the same shared memory segment as the original. Otherwise,
+ // a new StatsTable is created and all counters are zeroed.
+ //
+ // name is the name of the StatsTable to use.
+ //
+ // max_threads is the maximum number of threads the table will support.
+ // If the StatsTable already exists, this number is ignored.
+ //
+ // max_counters is the maximum number of counters the table will support.
+ // If the StatsTable already exists, this number is ignored.
+ StatsTable(const std::string& name, int max_threads, int max_counters);
+
+ // Destroys the StatsTable. When the last StatsTable is destroyed
+ // (across all processes), the StatsTable is removed from disk.
+ ~StatsTable();
+
+ // For convenience, we create a static table. This is generally
+ // used automatically by the counters.
+ static StatsTable* current() { return global_table_; }
+
+ // Set the global table for use in this process.
+ static void set_current(StatsTable* value) { global_table_ = value; }
+
+ // Get the slot id for the calling thread. Returns 0 if no
+ // slot is assigned.
+ int GetSlot() const;
+
+ // All threads that contribute data to the table must register with the
+ // table first. This function will set thread local storage for the
+ // thread containing the location in the table where this thread will
+ // write its counter data.
+ //
+ // name is just a debugging tag to label the thread, and it does not
+ // need to be unique. It will be truncated to kMaxThreadNameLength-1
+ // characters.
+ //
+ // On success, returns the slot id for this thread. On failure,
+ // returns 0.
+ int RegisterThread(const std::string& name);
+
+ // Returns the number of threads currently registered. This is really not
+ // useful except for diagnostics and debugging.
+ int CountThreadsRegistered() const;
+
+ // Find a counter in the StatsTable.
+ //
+ // Returns an id for the counter which can be used to call GetLocation().
+ // If the counter does not exist, attempts to create a row for the new
+ // counter. If there is no space in the table for the new counter,
+ // returns 0.
+ int FindCounter(const std::string& name);
+
+ // TODO(mbelshe): implement RemoveCounter.
+
+ // Gets the location of a particular value in the table based on
+ // the counter id and slot id.
+ int* GetLocation(int counter_id, int slot_id) const;
+
+ // Gets the counter name at a particular row. If the row is empty,
+ // returns NULL.
+ const char* GetRowName(int index) const;
+
+ // Gets the sum of the values for a particular row.
+ int GetRowValue(int index) const;
+
+ // Gets the sum of the values for a particular row for a given pid.
+ int GetRowValue(int index, int pid) const;
+
+ // Gets the sum of the values for a particular counter. If the counter
+ // does not exist, creates the counter.
+ int GetCounterValue(const std::string& name);
+
+ // Gets the sum of the values for a particular counter for a given pid.
+ // If the counter does not exist, creates the counter.
+ int GetCounterValue(const std::string& name, int pid);
+
+ // The maxinum number of counters/rows in the table.
+ int GetMaxCounters() const;
+
+ // The maxinum number of threads/columns in the table.
+ int GetMaxThreads() const;
+
+ // The maximum length (in characters) of a Thread's name including
+ // null terminator, as stored in the shared memory.
+ static const int kMaxThreadNameLength = 32;
+
+ // The maximum length (in characters) of a Counter's name including
+ // null terminator, as stored in the shared memory.
+ static const int kMaxCounterNameLength = 32;
+
+ // Convenience function to lookup a counter location for a
+ // counter by name for the calling thread. Will register
+ // the thread if it is not already registered.
+ static int* FindLocation(const char *name);
+
+ private:
+ class Private;
+ struct TLSData;
+
+ // Returns the space occupied by a thread in the table. Generally used
+ // if a thread terminates but the process continues. This function
+ // does not zero out the thread's counters.
+ // Cannot be used inside a posix tls destructor.
+ void UnregisterThread();
+
+ // This variant expects the tls data to be passed in, so it is safe to
+ // call from inside a posix tls destructor (see doc for pthread_key_create).
+ void UnregisterThread(TLSData* tls_data);
+
+ // The SlotReturnFunction is called at thread exit for each thread
+ // which used the StatsTable.
+ static void SlotReturnFunction(void* data);
+
+ // Locates a free slot in the table. Returns a number > 0 on success,
+ // or 0 on failure. The caller must hold the shared_memory lock when
+ // calling this function.
+ int FindEmptyThread() const;
+
+ // Locates a counter in the table or finds an empty row. Returns a
+ // number > 0 on success, or 0 on failure. The caller must hold the
+ // shared_memory_lock when calling this function.
+ int FindCounterOrEmptyRow(const std::string& name) const;
+
+ // Internal function to add a counter to the StatsTable. Assumes that
+ // the counter does not already exist in the table.
+ //
+ // name is a unique identifier for this counter, and will be truncated
+ // to kMaxCounterNameLength-1 characters.
+ //
+ // On success, returns the counter_id for the newly added counter.
+ // On failure, returns 0.
+ int AddCounter(const std::string& name);
+
+ // Get the TLS data for the calling thread. Returns NULL if none is
+ // initialized.
+ TLSData* GetTLSData() const;
+
+ typedef hash_map<std::string, int> CountersMap;
+
+ Private* impl_;
+
+ // The counters_lock_ protects the counters_ hash table.
+ Lock counters_lock_;
+
+ // The counters_ hash map is an in-memory hash of the counters.
+ // It is used for quick lookup of counters, but is cannot be used
+ // as a substitute for what is in the shared memory. Even though
+ // we don't have a counter in our hash table, another process may
+ // have created it.
+ CountersMap counters_;
+ TLSSlot tls_index_;
+
+ static StatsTable* global_table_;
+
+ DISALLOW_COPY_AND_ASSIGN(StatsTable);
+};
+
+} // namespace base
+
+#endif // BASE_METRICS_STATS_TABLE_H_
diff --git a/base/metrics/stats_table_unittest.cc b/base/metrics/stats_table_unittest.cc
new file mode 100644
index 0000000..c9eb9a2
--- /dev/null
+++ b/base/metrics/stats_table_unittest.cc
@@ -0,0 +1,411 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/platform_thread.h"
+#include "base/simple_thread.h"
+#include "base/shared_memory.h"
+#include "base/metrics/stats_table.h"
+#include "base/metrics/stats_counters.h"
+#include "base/string_piece.h"
+#include "base/string_util.h"
+#include "base/test/multiprocess_test.h"
+#include "base/utf_string_conversions.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/multiprocess_func_list.h"
+
+namespace base {
+
+class StatsTableTest : public MultiProcessTest {
+ public:
+ void DeleteShmem(const std::string& name) {
+ SharedMemory mem;
+ mem.Delete(name);
+ }
+};
+
+// Open a StatsTable and verify that we can write to each of the
+// locations in the table.
+TEST_F(StatsTableTest, VerifySlots) {
+ const std::string kTableName = "VerifySlotsStatTable";
+ const int kMaxThreads = 1;
+ const int kMaxCounter = 5;
+ DeleteShmem(kTableName);
+ StatsTable table(kTableName, kMaxThreads, kMaxCounter);
+
+ // Register a single thread.
+ std::string thread_name = "mainThread";
+ int slot_id = table.RegisterThread(thread_name);
+ EXPECT_NE(slot_id, 0);
+
+ // Fill up the table with counters.
+ std::string counter_base_name = "counter";
+ for (int index = 0; index < kMaxCounter; index++) {
+ std::string counter_name = counter_base_name;
+ StringAppendF(&counter_name, "counter.ctr%d", index);
+ int counter_id = table.FindCounter(counter_name);
+ EXPECT_GT(counter_id, 0);
+ }
+
+ // Try to allocate an additional thread. Verify it fails.
+ slot_id = table.RegisterThread("too many threads");
+ EXPECT_EQ(slot_id, 0);
+
+ // Try to allocate an additional counter. Verify it fails.
+ int counter_id = table.FindCounter(counter_base_name);
+ EXPECT_EQ(counter_id, 0);
+
+ DeleteShmem(kTableName);
+}
+
+// CounterZero will continually be set to 0.
+const std::string kCounterZero = "CounterZero";
+// Counter1313 will continually be set to 1313.
+const std::string kCounter1313 = "Counter1313";
+// CounterIncrement will be incremented each time.
+const std::string kCounterIncrement = "CounterIncrement";
+// CounterDecrement will be decremented each time.
+const std::string kCounterDecrement = "CounterDecrement";
+// CounterMixed will be incremented by odd numbered threads and
+// decremented by even threads.
+const std::string kCounterMixed = "CounterMixed";
+// The number of thread loops that we will do.
+const int kThreadLoops = 100;
+
+class StatsTableThread : public SimpleThread {
+ public:
+ StatsTableThread(std::string name, int id)
+ : SimpleThread(name),
+ id_(id) {}
+ virtual void Run();
+ private:
+ int id_;
+};
+
+void StatsTableThread::Run() {
+ // Each thread will open the shared memory and set counters
+ // concurrently in a loop. We'll use some pauses to
+ // mixup the thread scheduling.
+
+ StatsCounter zero_counter(kCounterZero);
+ StatsCounter lucky13_counter(kCounter1313);
+ StatsCounter increment_counter(kCounterIncrement);
+ StatsCounter decrement_counter(kCounterDecrement);
+ for (int index = 0; index < kThreadLoops; index++) {
+ StatsCounter mixed_counter(kCounterMixed); // create this one in the loop
+ zero_counter.Set(0);
+ lucky13_counter.Set(1313);
+ increment_counter.Increment();
+ decrement_counter.Decrement();
+ if (id_ % 2)
+ mixed_counter.Decrement();
+ else
+ mixed_counter.Increment();
+ PlatformThread::Sleep(index % 10); // short wait
+ }
+}
+
+// Create a few threads and have them poke on their counters.
+// Flaky, http://crbug.com/10611.
+TEST_F(StatsTableTest, FLAKY_MultipleThreads) {
+ // Create a stats table.
+ const std::string kTableName = "MultipleThreadStatTable";
+ const int kMaxThreads = 20;
+ const int kMaxCounter = 5;
+ DeleteShmem(kTableName);
+ StatsTable table(kTableName, kMaxThreads, kMaxCounter);
+ StatsTable::set_current(&table);
+
+ EXPECT_EQ(0, table.CountThreadsRegistered());
+
+ // Spin up a set of threads to go bang on the various counters.
+ // After we join the threads, we'll make sure the counters
+ // contain the values we expected.
+ StatsTableThread* threads[kMaxThreads];
+
+ // Spawn the threads.
+ for (int index = 0; index < kMaxThreads; index++) {
+ threads[index] = new StatsTableThread("MultipleThreadsTest", index);
+ threads[index]->Start();
+ }
+
+ // Wait for the threads to finish.
+ for (int index = 0; index < kMaxThreads; index++) {
+ threads[index]->Join();
+ delete threads[index];
+ }
+
+ StatsCounter zero_counter(kCounterZero);
+ StatsCounter lucky13_counter(kCounter1313);
+ StatsCounter increment_counter(kCounterIncrement);
+ StatsCounter decrement_counter(kCounterDecrement);
+ StatsCounter mixed_counter(kCounterMixed);
+
+ // Verify the various counters are correct.
+ std::string name;
+ name = "c:" + kCounterZero;
+ EXPECT_EQ(0, table.GetCounterValue(name));
+ name = "c:" + kCounter1313;
+ EXPECT_EQ(1313 * kMaxThreads,
+ table.GetCounterValue(name));
+ name = "c:" + kCounterIncrement;
+ EXPECT_EQ(kMaxThreads * kThreadLoops,
+ table.GetCounterValue(name));
+ name = "c:" + kCounterDecrement;
+ EXPECT_EQ(-kMaxThreads * kThreadLoops,
+ table.GetCounterValue(name));
+ name = "c:" + kCounterMixed;
+ EXPECT_EQ((kMaxThreads % 2) * kThreadLoops,
+ table.GetCounterValue(name));
+ EXPECT_EQ(0, table.CountThreadsRegistered());
+
+ DeleteShmem(kTableName);
+}
+
+const std::string kMPTableName = "MultipleProcessStatTable";
+
+MULTIPROCESS_TEST_MAIN(StatsTableMultipleProcessMain) {
+ // Each process will open the shared memory and set counters
+ // concurrently in a loop. We'll use some pauses to
+ // mixup the scheduling.
+
+ StatsTable table(kMPTableName, 0, 0);
+ StatsTable::set_current(&table);
+ StatsCounter zero_counter(kCounterZero);
+ StatsCounter lucky13_counter(kCounter1313);
+ StatsCounter increment_counter(kCounterIncrement);
+ StatsCounter decrement_counter(kCounterDecrement);
+ for (int index = 0; index < kThreadLoops; index++) {
+ zero_counter.Set(0);
+ lucky13_counter.Set(1313);
+ increment_counter.Increment();
+ decrement_counter.Decrement();
+ PlatformThread::Sleep(index % 10); // short wait
+ }
+ return 0;
+}
+
+// Create a few processes and have them poke on their counters.
+// This test is slow and flaky http://crbug.com/10611
+TEST_F(StatsTableTest, FLAKY_MultipleProcesses) {
+ // Create a stats table.
+ const int kMaxProcs = 20;
+ const int kMaxCounter = 5;
+ DeleteShmem(kMPTableName);
+ StatsTable table(kMPTableName, kMaxProcs, kMaxCounter);
+ StatsTable::set_current(&table);
+ EXPECT_EQ(0, table.CountThreadsRegistered());
+
+ // Spin up a set of processes to go bang on the various counters.
+ // After we join the processes, we'll make sure the counters
+ // contain the values we expected.
+ ProcessHandle procs[kMaxProcs];
+
+ // Spawn the processes.
+ for (int16 index = 0; index < kMaxProcs; index++) {
+ procs[index] = this->SpawnChild("StatsTableMultipleProcessMain", false);
+ EXPECT_NE(kNullProcessHandle, procs[index]);
+ }
+
+ // Wait for the processes to finish.
+ for (int index = 0; index < kMaxProcs; index++) {
+ EXPECT_TRUE(WaitForSingleProcess(procs[index], 60 * 1000));
+ CloseProcessHandle(procs[index]);
+ }
+
+ StatsCounter zero_counter(kCounterZero);
+ StatsCounter lucky13_counter(kCounter1313);
+ StatsCounter increment_counter(kCounterIncrement);
+ StatsCounter decrement_counter(kCounterDecrement);
+
+ // Verify the various counters are correct.
+ std::string name;
+ name = "c:" + kCounterZero;
+ EXPECT_EQ(0, table.GetCounterValue(name));
+ name = "c:" + kCounter1313;
+ EXPECT_EQ(1313 * kMaxProcs,
+ table.GetCounterValue(name));
+ name = "c:" + kCounterIncrement;
+ EXPECT_EQ(kMaxProcs * kThreadLoops,
+ table.GetCounterValue(name));
+ name = "c:" + kCounterDecrement;
+ EXPECT_EQ(-kMaxProcs * kThreadLoops,
+ table.GetCounterValue(name));
+ EXPECT_EQ(0, table.CountThreadsRegistered());
+
+ DeleteShmem(kMPTableName);
+}
+
+class MockStatsCounter : public StatsCounter {
+ public:
+ explicit MockStatsCounter(const std::string& name)
+ : StatsCounter(name) {}
+ int* Pointer() { return GetPtr(); }
+};
+
+// Test some basic StatsCounter operations
+TEST_F(StatsTableTest, StatsCounter) {
+ // Create a stats table.
+ const std::string kTableName = "StatTable";
+ const int kMaxThreads = 20;
+ const int kMaxCounter = 5;
+ DeleteShmem(kTableName);
+ StatsTable table(kTableName, kMaxThreads, kMaxCounter);
+ StatsTable::set_current(&table);
+
+ MockStatsCounter foo("foo");
+
+ // Test initial state.
+ EXPECT_TRUE(foo.Enabled());
+ ASSERT_NE(foo.Pointer(), static_cast<int*>(0));
+ EXPECT_EQ(0, *(foo.Pointer()));
+ EXPECT_EQ(0, table.GetCounterValue("c:foo"));
+
+ // Test Increment.
+ while (*(foo.Pointer()) < 123) foo.Increment();
+ EXPECT_EQ(123, table.GetCounterValue("c:foo"));
+ foo.Add(0);
+ EXPECT_EQ(123, table.GetCounterValue("c:foo"));
+ foo.Add(-1);
+ EXPECT_EQ(122, table.GetCounterValue("c:foo"));
+
+ // Test Set.
+ foo.Set(0);
+ EXPECT_EQ(0, table.GetCounterValue("c:foo"));
+ foo.Set(100);
+ EXPECT_EQ(100, table.GetCounterValue("c:foo"));
+ foo.Set(-1);
+ EXPECT_EQ(-1, table.GetCounterValue("c:foo"));
+ foo.Set(0);
+ EXPECT_EQ(0, table.GetCounterValue("c:foo"));
+
+ // Test Decrement.
+ foo.Subtract(1);
+ EXPECT_EQ(-1, table.GetCounterValue("c:foo"));
+ foo.Subtract(0);
+ EXPECT_EQ(-1, table.GetCounterValue("c:foo"));
+ foo.Subtract(-1);
+ EXPECT_EQ(0, table.GetCounterValue("c:foo"));
+
+ DeleteShmem(kTableName);
+}
+
+class MockStatsCounterTimer : public StatsCounterTimer {
+ public:
+ explicit MockStatsCounterTimer(const std::string& name)
+ : StatsCounterTimer(name) {}
+
+ TimeTicks start_time() { return start_time_; }
+ TimeTicks stop_time() { return stop_time_; }
+};
+
+// Test some basic StatsCounterTimer operations
+TEST_F(StatsTableTest, StatsCounterTimer) {
+ // Create a stats table.
+ const std::string kTableName = "StatTable";
+ const int kMaxThreads = 20;
+ const int kMaxCounter = 5;
+ StatsTable table(kTableName, kMaxThreads, kMaxCounter);
+ StatsTable::set_current(&table);
+
+ MockStatsCounterTimer bar("bar");
+
+ // Test initial state.
+ EXPECT_FALSE(bar.Running());
+ EXPECT_TRUE(bar.start_time().is_null());
+ EXPECT_TRUE(bar.stop_time().is_null());
+
+ const int kRunMs = 100;
+
+ // Do some timing.
+ bar.Start();
+ PlatformThread::Sleep(kRunMs);
+ bar.Stop();
+ EXPECT_GT(table.GetCounterValue("t:bar"), 0);
+ EXPECT_LE(kRunMs, table.GetCounterValue("t:bar"));
+
+ // Verify that timing again is additive.
+ bar.Start();
+ PlatformThread::Sleep(kRunMs);
+ bar.Stop();
+ EXPECT_GT(table.GetCounterValue("t:bar"), 0);
+ EXPECT_LE(kRunMs * 2, table.GetCounterValue("t:bar"));
+}
+
+// Test some basic StatsRate operations
+TEST_F(StatsTableTest, StatsRate) {
+ // Create a stats table.
+ const std::string kTableName = "StatTable";
+ const int kMaxThreads = 20;
+ const int kMaxCounter = 5;
+ StatsTable table(kTableName, kMaxThreads, kMaxCounter);
+ StatsTable::set_current(&table);
+
+ StatsRate baz("baz");
+
+ // Test initial state.
+ EXPECT_FALSE(baz.Running());
+ EXPECT_EQ(0, table.GetCounterValue("c:baz"));
+ EXPECT_EQ(0, table.GetCounterValue("t:baz"));
+
+ const int kRunMs = 100;
+
+ // Do some timing.
+ baz.Start();
+ PlatformThread::Sleep(kRunMs);
+ baz.Stop();
+ EXPECT_EQ(1, table.GetCounterValue("c:baz"));
+ EXPECT_LE(kRunMs, table.GetCounterValue("t:baz"));
+
+ // Verify that timing again is additive.
+ baz.Start();
+ PlatformThread::Sleep(kRunMs);
+ baz.Stop();
+ EXPECT_EQ(2, table.GetCounterValue("c:baz"));
+ EXPECT_LE(kRunMs * 2, table.GetCounterValue("t:baz"));
+}
+
+// Test some basic StatsScope operations
+TEST_F(StatsTableTest, StatsScope) {
+ // Create a stats table.
+ const std::string kTableName = "StatTable";
+ const int kMaxThreads = 20;
+ const int kMaxCounter = 5;
+ DeleteShmem(kTableName);
+ StatsTable table(kTableName, kMaxThreads, kMaxCounter);
+ StatsTable::set_current(&table);
+
+ StatsCounterTimer foo("foo");
+ StatsRate bar("bar");
+
+ // Test initial state.
+ EXPECT_EQ(0, table.GetCounterValue("t:foo"));
+ EXPECT_EQ(0, table.GetCounterValue("t:bar"));
+ EXPECT_EQ(0, table.GetCounterValue("c:bar"));
+
+ const int kRunMs = 100;
+
+ // Try a scope.
+ {
+ StatsScope<StatsCounterTimer> timer(foo);
+ StatsScope<StatsRate> timer2(bar);
+ PlatformThread::Sleep(kRunMs);
+ }
+ EXPECT_LE(kRunMs, table.GetCounterValue("t:foo"));
+ EXPECT_LE(kRunMs, table.GetCounterValue("t:bar"));
+ EXPECT_EQ(1, table.GetCounterValue("c:bar"));
+
+ // Try a second scope.
+ {
+ StatsScope<StatsCounterTimer> timer(foo);
+ StatsScope<StatsRate> timer2(bar);
+ PlatformThread::Sleep(kRunMs);
+ }
+ EXPECT_LE(kRunMs * 2, table.GetCounterValue("t:foo"));
+ EXPECT_LE(kRunMs * 2, table.GetCounterValue("t:bar"));
+ EXPECT_EQ(2, table.GetCounterValue("c:bar"));
+
+ DeleteShmem(kTableName);
+}
+
+} // namespace base