summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjar@chromium.org <jar@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2011-12-04 07:19:10 +0000
committerjar@chromium.org <jar@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2011-12-04 07:19:10 +0000
commitb6b2b89a89a78a563232ae7a4fdc42b7d1663a07 (patch)
treef3b0110b8dc4246cb668f015f21a348f8d188caf
parentc6562f4b883b37a288205a206bdadf9a37978bf3 (diff)
downloadchromium_src-b6b2b89a89a78a563232ae7a4fdc42b7d1663a07.zip
chromium_src-b6b2b89a89a78a563232ae7a4fdc42b7d1663a07.tar.gz
chromium_src-b6b2b89a89a78a563232ae7a4fdc42b7d1663a07.tar.bz2
Support incremental-max and sample in Profiler data
I also did some cleaning and refactoring in tracked_objects. We had a lot of functionality that has migrated to JS, that we didn't need (a lot of acccessors that are supplanted by the ToValue() methods. I'm anticipating that we'll move to an asynhcronous collecting of data from the profiler, so that we can bounce around to various threads and more cleanly collect samples (without risking races during data snapshots). Several of the refactors are heading in that direction. r=rtenneti tbr=jam (for microscopic content change) BUG=106291,106293 Review URL: http://codereview.chromium.org/8775061 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@112928 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r--base/tracked_objects.cc244
-rw-r--r--base/tracked_objects.h263
-rw-r--r--base/tracked_objects_unittest.cc148
-rw-r--r--chrome/browser/metrics/tracking_synchronizer.cc2
-rw-r--r--content/common/child_thread.cc2
5 files changed, 380 insertions, 279 deletions
diff --git a/base/tracked_objects.cc b/base/tracked_objects.cc
index b7580be..99f6746 100644
--- a/base/tracked_objects.cc
+++ b/base/tracked_objects.cc
@@ -33,86 +33,134 @@ static const ThreadData::Status kInitialStartupState = ThreadData::ACTIVE;
//------------------------------------------------------------------------------
// DeathData tallies durations when a death takes place.
-void DeathData::RecordDeath(DurationInt queue_duration,
- DurationInt run_duration) {
+DeathData::DeathData() {
+ Clear();
+}
+
+DeathData::DeathData(int count) {
+ Clear();
+ count_ = count;
+}
+
+// TODO(jar): I need to see if this macro to optimize branching is worth it.
+//
+// This macro has no branching, so it is surely fast, and is equivalent to:
+// if (assign_it)
+// target = source;
+// We use a macro rather than a template to force this to inline.
+// Related code for calculating max is discussed on the web.
+#define CONDITIONAL_ASSIGN(assign_it, target, source) \
+ ((target) ^= ((target) ^ (source)) & -static_cast<DurationInt>(assign_it))
+
+void DeathData::RecordDeath(const DurationInt queue_duration,
+ const DurationInt run_duration,
+ int32 random_number) {
+ queue_duration_sum_ += queue_duration;
+ run_duration_sum_ += run_duration;
++count_;
- queue_time_.AddDuration(queue_duration);
- run_time_.AddDuration(run_duration);
+
+ // Take a uniformly distributed sample over all durations ever supplied.
+ // The probability that we (instead) use this new sample is 1/count_. This
+ // results in a completely uniform selection of the sample.
+ // We ignore the fact that we correlated our selection of a sample of run
+ // and queue times.
+ bool take_sample = 0 == (random_number % count_);
+ CONDITIONAL_ASSIGN(take_sample, queue_duration_sample_, queue_duration);
+ CONDITIONAL_ASSIGN(take_sample, run_duration_sample_, run_duration);
+
+ CONDITIONAL_ASSIGN(queue_duration_max_ < queue_duration, queue_duration_max_,
+ queue_duration);
+ CONDITIONAL_ASSIGN(run_duration_max_ < run_duration, run_duration_max_,
+ run_duration);
+ // Ensure we got the macros right.
+ DCHECK_GE(queue_duration_max_, queue_duration);
+ DCHECK_GE(run_duration_max_, run_duration);
+ DCHECK(!take_sample || run_duration_sample_ == run_duration);
+ DCHECK(!take_sample || queue_duration_sample_ == queue_duration);
}
-DurationInt DeathData::AverageMsRunDuration() const {
- return run_time_.AverageMsDuration(count_);
+int DeathData::count() const { return count_; }
+
+DurationInt DeathData::run_duration_sum() const { return run_duration_sum_; }
+
+DurationInt DeathData::run_duration_max() const { return run_duration_max_; }
+
+DurationInt DeathData::run_duration_sample() const {
+ return run_duration_sample_;
}
-DurationInt DeathData::AverageMsQueueDuration() const {
- return queue_time_.AverageMsDuration(count_);
+DurationInt DeathData::queue_duration_sum() const {
+ return queue_duration_sum_;
}
-void DeathData::AddDeathData(const DeathData& other) {
- count_ += other.count_;
- queue_time_.AddData(other.queue_time_);
- run_time_.AddData(other.run_time_);
+DurationInt DeathData::queue_duration_max() const {
+ return queue_duration_max_;
}
+DurationInt DeathData::queue_duration_sample() const {
+ return queue_duration_sample_;
+}
+
+
base::DictionaryValue* DeathData::ToValue() const {
base::DictionaryValue* dictionary = new base::DictionaryValue;
dictionary->Set("count", base::Value::CreateIntegerValue(count_));
dictionary->Set("run_ms",
- base::Value::CreateIntegerValue(run_time_.duration()));
- dictionary->Set("queue_ms",
- base::Value::CreateIntegerValue(queue_time_.duration()));
+ base::Value::CreateIntegerValue(run_duration_sum()));
dictionary->Set("run_ms_max",
- base::Value::CreateIntegerValue(run_time_.max()));
+ base::Value::CreateIntegerValue(run_duration_max()));
+ dictionary->Set("run_ms_sample",
+ base::Value::CreateIntegerValue(run_duration_sample()));
+ dictionary->Set("queue_ms",
+ base::Value::CreateIntegerValue(queue_duration_sum()));
dictionary->Set("queue_ms_max",
- base::Value::CreateIntegerValue(queue_time_.max()));
+ base::Value::CreateIntegerValue(queue_duration_max()));
+ dictionary->Set("queue_ms_sample",
+ base::Value::CreateIntegerValue(queue_duration_sample()));
return dictionary;
}
-void DeathData::Clear() {
- count_ = 0;
- run_time_.Clear();
- queue_time_.Clear();
-}
-
-//------------------------------------------------------------------------------
-
-void DeathData::Data::AddData(const Data& other) {
- duration_ += other.duration_;
- if (max_ > other.max_)
- return;
- max_ = other.max_;
+void DeathData::ResetMax() {
+ run_duration_max_ = 0;
+ queue_duration_max_ = 0;
}
-void DeathData::Data::AddDuration(DurationInt duration) {
- duration_ += duration;
- if (max_ > duration)
- return;
- max_ = duration;
-}
-
-DurationInt DeathData::Data::AverageMsDuration(int count) const {
- if (duration_ == 0 || !count)
- return 0;
- return (duration_ + count / 2) / count;
+void DeathData::Clear() {
+ count_ = 0;
+ run_duration_sum_ = 0;
+ run_duration_max_ = 0;
+ run_duration_sample_ = 0;
+ queue_duration_sum_ = 0;
+ queue_duration_max_ = 0;
+ queue_duration_sample_ = 0;
}
-void DeathData::Data::Clear() {
- duration_ = 0;
- max_ = 0;
-}
//------------------------------------------------------------------------------
BirthOnThread::BirthOnThread(const Location& location,
const ThreadData& current)
: location_(location),
- birth_thread_(&current) {}
+ birth_thread_(&current) {
+}
+
+const Location BirthOnThread::location() const { return location_; }
+const ThreadData* BirthOnThread::birth_thread() const { return birth_thread_; }
//------------------------------------------------------------------------------
Births::Births(const Location& location, const ThreadData& current)
: BirthOnThread(location, current),
birth_count_(1) { }
+int Births::birth_count() const { return birth_count_; }
+
+void Births::RecordBirth() { ++birth_count_; }
+
+void Births::ForgetBirth() { --birth_count_; }
+
+void Births::Clear() { birth_count_ = 0; }
+
//------------------------------------------------------------------------------
-// ThreadData maintains the central data for all births and deaths.
+// ThreadData maintains the central data for all births and deaths on a single
+// thread.
// TODO(jar): We should pull all these static vars together, into a struct, and
// optimize layout so that we benefit from locality of reference during accesses
@@ -170,6 +218,10 @@ ThreadData::ThreadData(int thread_number)
ThreadData::~ThreadData() {}
void ThreadData::PushToHeadOfList() {
+ // Toss in a hint of randomness (atop the uniniitalized value).
+ random_number_ += static_cast<int32>(this - static_cast<ThreadData*>(0));
+ random_number_ ^= (Now() - TrackedTime()).InMilliseconds();
+
DCHECK(!next_);
base::AutoLock lock(*list_lock_.Pointer());
incarnation_count_for_pool_ = incarnation_counter_;
@@ -178,6 +230,14 @@ void ThreadData::PushToHeadOfList() {
}
// static
+ThreadData* ThreadData::first() {
+ base::AutoLock lock(*list_lock_.Pointer());
+ return all_thread_data_list_head_;
+}
+
+ThreadData* ThreadData::next() const { return next_; }
+
+// static
void ThreadData::InitializeThreadContext(const std::string& suggested_name) {
if (!Initialize()) // Always initialize if needed.
return;
@@ -252,8 +312,10 @@ void ThreadData::OnThreadTerminationCleanup() {
}
// static
-base::DictionaryValue* ThreadData::ToValue() {
+base::DictionaryValue* ThreadData::ToValue(bool reset_max) {
DataCollector collected_data; // Gather data.
+ // Request multiple calls to collected_data.Append() for all threads.
+ SendAllMaps(reset_max, &collected_data);
collected_data.AddListOfLivingObjects(); // Add births that are still alive.
base::ListValue* list = collected_data.ToValue();
base::DictionaryValue* dictionary = new base::DictionaryValue();
@@ -279,6 +341,12 @@ Births* ThreadData::TallyABirth(const Location& location) {
void ThreadData::TallyADeath(const Births& birth,
DurationInt queue_duration,
DurationInt run_duration) {
+ // Stir in some randomness, plus add constant in case durations are zero.
+ const DurationInt kSomePrimeNumber = 4294967279;
+ random_number_ += queue_duration + run_duration + kSomePrimeNumber;
+ // An address is going to have some randomness to it as well ;-).
+ random_number_ ^= static_cast<int32>(&birth - reinterpret_cast<Births*>(0));
+
DeathMap::iterator it = death_map_.find(&birth);
DeathData* death_data;
if (it != death_map_.end()) {
@@ -287,7 +355,7 @@ void ThreadData::TallyADeath(const Births& birth,
base::AutoLock lock(map_lock_); // Lock as the map may get relocated now.
death_data = &death_map_[&birth];
} // Release lock ASAP.
- death_data->RecordDeath(queue_duration, run_duration);
+ death_data->RecordDeath(queue_duration, run_duration, random_number_);
}
// static
@@ -409,26 +477,46 @@ void ThreadData::TallyRunInAScopedRegionIfTracking(
current_thread_data->TallyADeath(*birth, queue_duration, run_duration);
}
-// static
-ThreadData* ThreadData::first() {
- base::AutoLock lock(*list_lock_.Pointer());
- return all_thread_data_list_head_;
-}
+const std::string ThreadData::thread_name() const { return thread_name_; }
// This may be called from another thread.
-void ThreadData::SnapshotBirthMap(BirthMap *output) const {
+void ThreadData::SnapshotMaps(bool reset_max,
+ BirthMap* birth_map,
+ DeathMap* death_map) {
base::AutoLock lock(map_lock_);
for (BirthMap::const_iterator it = birth_map_.begin();
it != birth_map_.end(); ++it)
- (*output)[it->first] = it->second;
+ (*birth_map)[it->first] = it->second;
+ for (DeathMap::iterator it = death_map_.begin();
+ it != death_map_.end(); ++it) {
+ (*death_map)[it->first] = it->second;
+ if (reset_max)
+ it->second.ResetMax();
+ }
}
-// This may be called from another thread.
-void ThreadData::SnapshotDeathMap(DeathMap *output) const {
- base::AutoLock lock(map_lock_);
- for (DeathMap::const_iterator it = death_map_.begin();
- it != death_map_.end(); ++it)
- (*output)[it->first] = it->second;
+// static
+void ThreadData::SendAllMaps(bool reset_max, class DataCollector* target) {
+ if (!kTrackAllTaskObjects)
+ return; // Not compiled in.
+ // Get an unchanging copy of a ThreadData list.
+ ThreadData* my_list = ThreadData::first();
+
+ // Gather data serially.
+ // This hackish approach *can* get some slighly corrupt tallies, as we are
+ // grabbing values without the protection of a lock, but it has the advantage
+ // of working even with threads that don't have message loops. If a user
+ // sees any strangeness, they can always just run their stats gathering a
+ // second time.
+ for (ThreadData* thread_data = my_list;
+ thread_data;
+ thread_data = thread_data->next()) {
+ // Get copy of data.
+ ThreadData::BirthMap birth_map;
+ ThreadData::DeathMap death_map;
+ thread_data->SnapshotMaps(reset_max, &birth_map, &death_map);
+ target->Append(*thread_data, birth_map, death_map);
+ }
}
// static
@@ -543,7 +631,7 @@ void ThreadData::ShutdownSingleThreadedCleanup(bool leak) {
all_thread_data_list_head_ = NULL;
++incarnation_counter_;
// To be clean, break apart the retired worker list (though we leak them).
- while(first_retired_worker_) {
+ while (first_retired_worker_) {
ThreadData* worker = first_retired_worker_;
CHECK_GT(worker->worker_thread_number_, 0);
first_retired_worker_ = worker->next_retired_worker_;
@@ -617,36 +705,14 @@ base::DictionaryValue* Snapshot::ToValue() const {
//------------------------------------------------------------------------------
// DataCollector
-DataCollector::DataCollector() {
- if (!kTrackAllTaskObjects)
- return; // Not compiled in.
-
- // Get an unchanging copy of a ThreadData list.
- ThreadData* my_list = ThreadData::first();
-
- // Gather data serially.
- // This hackish approach *can* get some slighly corrupt tallies, as we are
- // grabbing values without the protection of a lock, but it has the advantage
- // of working even with threads that don't have message loops. If a user
- // sees any strangeness, they can always just run their stats gathering a
- // second time.
- for (ThreadData* thread_data = my_list;
- thread_data;
- thread_data = thread_data->next()) {
- Append(*thread_data);
- }
-}
+DataCollector::DataCollector() {}
DataCollector::~DataCollector() {
}
-void DataCollector::Append(const ThreadData& thread_data) {
- // Get copy of data.
- ThreadData::BirthMap birth_map;
- thread_data.SnapshotBirthMap(&birth_map);
- ThreadData::DeathMap death_map;
- thread_data.SnapshotDeathMap(&death_map);
-
+void DataCollector::Append(const ThreadData &thread_data,
+ const ThreadData::BirthMap &birth_map,
+ const ThreadData::DeathMap &death_map) {
for (ThreadData::DeathMap::const_iterator it = death_map.begin();
it != death_map.end(); ++it) {
collection_.push_back(Snapshot(*it->first, thread_data, it->second));
diff --git a/base/tracked_objects.h b/base/tracked_objects.h
index 29b2900..1eca68b 100644
--- a/base/tracked_objects.h
+++ b/base/tracked_objects.h
@@ -12,6 +12,7 @@
#include <vector>
#include "base/base_export.h"
+#include "base/gtest_prod_util.h"
#include "base/lazy_instance.h"
#include "base/location.h"
#include "base/profiler/tracked_time.h"
@@ -197,8 +198,8 @@ class BASE_EXPORT BirthOnThread {
public:
BirthOnThread(const Location& location, const ThreadData& current);
- const Location location() const { return location_; }
- const ThreadData* birth_thread() const { return birth_thread_; }
+ const Location location() const;
+ const ThreadData* birth_thread() const;
private:
// File/lineno of birth. This defines the essence of the task, as the context
@@ -220,17 +221,17 @@ class BASE_EXPORT Births: public BirthOnThread {
public:
Births(const Location& location, const ThreadData& current);
- int birth_count() const { return birth_count_; }
+ int birth_count() const;
// When we have a birth we update the count for this BirhPLace.
- void RecordBirth() { ++birth_count_; }
+ void RecordBirth();
// When a birthplace is changed (updated), we need to decrement the counter
// for the old instance.
- void ForgetBirth() { --birth_count_; } // We corrected a birth place.
+ void ForgetBirth();
// Hack to quickly reset all counts to zero.
- void Clear() { birth_count_ = 0; }
+ void Clear();
private:
// The number of births on this thread for our location_.
@@ -247,70 +248,49 @@ class BASE_EXPORT Births: public BirthOnThread {
class BASE_EXPORT DeathData {
public:
// Default initializer.
- DeathData() : count_(0) {}
+ DeathData();
// When deaths have not yet taken place, and we gather data from all the
// threads, we create DeathData stats that tally the number of births without
- // a corrosponding death.
- explicit DeathData(int count)
- : count_(count) {}
+ // a corresponding death.
+ explicit DeathData(int count);
// Update stats for a task destruction (death) that had a Run() time of
// |duration|, and has had a queueing delay of |queue_duration|.
- void RecordDeath(DurationInt queue_duration,
- DurationInt run_duration);
-
- // Metrics accessors.
- int count() const { return count_; }
- DurationInt run_duration() const { return run_time_.duration(); }
- DurationInt AverageMsRunDuration() const;
- DurationInt run_duration_max() const { return run_time_.max(); }
- DurationInt queue_duration() const { return queue_time_.duration(); }
- DurationInt AverageMsQueueDuration() const;
- DurationInt queue_duration_max() const { return queue_time_.max(); }
-
- // Accumulate metrics from other into this. This method is never used on
- // realtime statistics, and only used in snapshots and aggregatinos.
- void AddDeathData(const DeathData& other);
+ void RecordDeath(const DurationInt queue_duration,
+ const DurationInt run_duration,
+ int random_number);
+
+ // Metrics accessors, used only in tests.
+ int count() const;
+ DurationInt run_duration_sum() const;
+ DurationInt run_duration_max() const;
+ DurationInt run_duration_sample() const;
+ DurationInt queue_duration_sum() const;
+ DurationInt queue_duration_max() const;
+ DurationInt queue_duration_sample() const;
// Construct a DictionaryValue instance containing all our stats. The caller
// assumes ownership of the returned instance.
base::DictionaryValue* ToValue() const;
+ // Reset the max values to zero.
+ void ResetMax();
+
// Reset all tallies to zero. This is used as a hack on realtime data.
void Clear();
private:
- // DeathData::Data is a helper class, useful when different metrics need to be
- // aggregated, such as queueing times, or run times.
- class Data {
- public:
- Data() : duration_(0), max_(0) {}
- ~Data() {}
-
- DurationInt duration() const { return duration_; }
- DurationInt max() const { return max_; }
-
- // Agggegate data into our state.
- void AddData(const Data& other);
- void AddDuration(DurationInt duration);
-
- // Central helper function for calculating averages (correctly, in only one
- // place).
- DurationInt AverageMsDuration(int count) const;
-
- // Resets all members to zero.
- void Clear();
-
- private:
- DurationInt duration_; // Sum of all durations seen.
- DurationInt max_; // Largest singular duration seen.
- };
-
-
- int count_; // Number of deaths seen.
- Data run_time_; // Data about run time durations.
- Data queue_time_; // Data about queueing times durations.
+ // Number of runs seen.
+ int count_;
+ // Data about run time durations.
+ DurationInt run_duration_sum_;
+ DurationInt run_duration_max_;
+ DurationInt run_duration_sample_;
+ // Data about queueing times durations.
+ DurationInt queue_duration_sum_;
+ DurationInt queue_duration_max_;
+ DurationInt queue_duration_sample_;
};
//------------------------------------------------------------------------------
@@ -329,29 +309,9 @@ class BASE_EXPORT Snapshot {
// When snapshotting a birth, with no death yet, use this:
Snapshot(const BirthOnThread& birth_on_thread, int count);
- const ThreadData* birth_thread() const { return birth_->birth_thread(); }
- const Location location() const { return birth_->location(); }
- const BirthOnThread& birth() const { return *birth_; }
- const ThreadData* death_thread() const {return death_thread_; }
- const DeathData& death_data() const { return death_data_; }
+ // Accessor, that provides default value when there is no death thread.
const std::string DeathThreadName() const;
- int count() const { return death_data_.count(); }
- DurationInt run_duration() const { return death_data_.run_duration(); }
- DurationInt AverageMsRunDuration() const {
- return death_data_.AverageMsRunDuration();
- }
- DurationInt run_duration_max() const {
- return death_data_.run_duration_max();
- }
- DurationInt queue_duration() const { return death_data_.queue_duration(); }
- DurationInt AverageMsQueueDuration() const {
- return death_data_.AverageMsQueueDuration();
- }
- DurationInt queue_duration_max() const {
- return death_data_.queue_duration_max();
- }
-
// Construct a DictionaryValue instance containing all our data recursively.
// The caller assumes ownership of the memory in the returned instance.
base::DictionaryValue* ToValue() const;
@@ -363,53 +323,6 @@ class BASE_EXPORT Snapshot {
};
//------------------------------------------------------------------------------
-// DataCollector is a container class for Snapshot and BirthOnThread count
-// items.
-
-class BASE_EXPORT DataCollector {
- public:
- typedef std::vector<Snapshot> Collection;
-
- // Construct with a list of how many threads should contribute. This helps us
- // determine (in the async case) when we are done with all contributions.
- DataCollector();
- ~DataCollector();
-
- // Adds all stats from the indicated thread into our arrays. This function
- // uses locks at the lowest level (when accessing the underlying maps which
- // could change when not locked), and can be called from any threads.
- void Append(const ThreadData& thread_data);
-
- // After the accumulation phase, the following accessor is used to process the
- // data (i.e., sort it, filter it, etc.).
- Collection* collection();
-
- // Adds entries for all the remaining living objects (objects that have
- // tallied a birth, but have not yet tallied a matching death, and hence must
- // be either running, queued up, or being held in limbo for future posting).
- // This should be called after all known ThreadData instances have been
- // processed using Append().
- void AddListOfLivingObjects();
-
- // Generates a ListValue representation of the vector of snapshots. The caller
- // assumes ownership of the memory in the returned instance.
- base::ListValue* ToValue() const;
-
- private:
- typedef std::map<const BirthOnThread*, int> BirthCount;
-
- // The array that we collect data into.
- Collection collection_;
-
- // The total number of births recorded at each location for which we have not
- // seen a death count. This map changes as we do Append() calls, and is later
- // used by AddListOfLivingObjects() to gather up unaccounted for births.
- BirthCount global_birth_count_;
-
- DISALLOW_COPY_AND_ASSIGN(DataCollector);
-};
-
-//------------------------------------------------------------------------------
// For each thread, we have a ThreadData that stores all tracking info generated
// on this thread. This prevents the need for locking as data accumulates.
// We use ThreadLocalStorage to quickly identfy the current ThreadData context.
@@ -443,8 +356,9 @@ class BASE_EXPORT ThreadData {
// Constructs a DictionaryValue instance containing all recursive results in
// our process. The caller assumes ownership of the memory in the returned
- // instance.
- static base::DictionaryValue* ToValue();
+ // instance. During the scavenging, if |reset_max| is true, then the
+ // DeathData instances max-values are reset to zero during this scan.
+ static base::DictionaryValue* ToValue(bool reset_max);
// Finds (or creates) a place to count births from the given location in this
// thread, and increment that tally.
@@ -484,24 +398,13 @@ class BASE_EXPORT ThreadData {
const TrackedTime& start_of_run,
const TrackedTime& end_of_run);
- const std::string thread_name() const { return thread_name_; }
+ const std::string thread_name() const;
- // ---------------------
- // TODO(jar):
- // The following functions should all be private, and are only public because
- // the collection is done externally. We need to relocate that code from the
- // collection class into this class, and then all these methods can be made
- // private.
- // (Thread safe) Get start of list of all ThreadData instances.
- static ThreadData* first();
- // Iterate through the null terminated list of ThreadData instances.
- ThreadData* next() const { return next_; }
- // Using our lock, make a copy of the specified maps. These calls may arrive
- // from non-local threads, and are used to quickly scan data from all threads
- // in order to build JSON for about:profiler.
- void SnapshotBirthMap(BirthMap *output) const;
- void SnapshotDeathMap(DeathMap *output) const;
- // -------- end of should be private methods.
+ // Snapshot (under a lock) copies of the maps in each ThreadData instance. For
+ // each set of maps (BirthMap and DeathMap) call the Append() method of the
+ // |target| DataCollector. If |reset_max| is true, then the max values in
+ // each DeathData instance should be reset during the scan.
+ static void SendAllMaps(bool reset_max, class DataCollector* target);
// Hack: asynchronously clear all birth counts and death tallies data values
// in all ThreadData instances. The numerical (zeroing) part is done without
@@ -540,7 +443,12 @@ class BASE_EXPORT ThreadData {
private:
// Allow only tests to call ShutdownSingleThreadedCleanup. We NEVER call it
// in production code.
+ // TODO(jar): Make this a friend in DEBUG only, so that the optimizer has a
+ // better change of optimizing (inlining? etc.) private methods (knowing that
+ // there will be no need for an external entry point).
friend class TrackedObjectsTest;
+ FRIEND_TEST_ALL_PREFIXES(TrackedObjectsTest, MinimalStartupShutdown);
+ FRIEND_TEST_ALL_PREFIXES(TrackedObjectsTest, TinyStartupShutdown);
// Worker thread construction creates a name since there is none.
explicit ThreadData(int thread_number);
@@ -555,6 +463,13 @@ class BASE_EXPORT ThreadData {
// the instance permanently on that list.
void PushToHeadOfList();
+ // (Thread safe) Get start of list of all ThreadData instances using the lock.
+ static ThreadData* first();
+
+ // Iterate through the null terminated list of ThreadData instances.
+ ThreadData* next() const;
+
+
// In this thread's data, record a new birth.
Births* TallyABirth(const Location& location);
@@ -563,6 +478,15 @@ class BASE_EXPORT ThreadData {
DurationInt queue_duration,
DurationInt duration);
+ // Using our lock, make a copy of the specified maps. This call may be made
+ // on non-local threads, which necessitate the use of the lock to prevent
+ // the map(s) from being reallocaed while they are copied. If |reset_max| is
+ // true, then, just after we copy the DeathMap, we will set the max values to
+ // zero in the active DeathMap (not the snapshot).
+ void SnapshotMaps(bool reset_max,
+ BirthMap* birth_map,
+ DeathMap* death_map);
+
// Using our lock to protect the iteration, Clear all birth and death data.
void Reset();
@@ -669,10 +593,65 @@ class BASE_EXPORT ThreadData {
// writing is only done from this thread.
mutable base::Lock map_lock_;
+ // A random number that we used to select decide which sample to keep as a
+ // representative sample in each DeathData instance. We can't start off with
+ // much randomness (because we can't call RandInt() on all our threads), so
+ // we stir in more and more as we go.
+ int32 random_number_;
+
DISALLOW_COPY_AND_ASSIGN(ThreadData);
};
//------------------------------------------------------------------------------
+// DataCollector is a container class for Snapshot and BirthOnThread count
+// items.
+
+class BASE_EXPORT DataCollector {
+ public:
+ typedef std::vector<Snapshot> Collection;
+
+ // Construct with a list of how many threads should contribute. This helps us
+ // determine (in the async case) when we are done with all contributions.
+ DataCollector();
+ ~DataCollector();
+
+ // Adds all stats from the indicated thread into our arrays. Accepts copies
+ // of the birth_map and death_map, so that the data will not change during the
+ // iterations and processing.
+ void Append(const ThreadData &thread_data,
+ const ThreadData::BirthMap &birth_map,
+ const ThreadData::DeathMap &death_map);
+
+ // After the accumulation phase, the following accessor is used to process the
+ // data (i.e., sort it, filter it, etc.).
+ Collection* collection();
+
+ // Adds entries for all the remaining living objects (objects that have
+ // tallied a birth, but have not yet tallied a matching death, and hence must
+ // be either running, queued up, or being held in limbo for future posting).
+ // This should be called after all known ThreadData instances have been
+ // processed using Append().
+ void AddListOfLivingObjects();
+
+ // Generates a ListValue representation of the vector of snapshots. The caller
+ // assumes ownership of the memory in the returned instance.
+ base::ListValue* ToValue() const;
+
+ private:
+ typedef std::map<const BirthOnThread*, int> BirthCount;
+
+ // The array that we collect data into.
+ Collection collection_;
+
+ // The total number of births recorded at each location for which we have not
+ // seen a death count. This map changes as we do Append() calls, and is later
+ // used by AddListOfLivingObjects() to gather up unaccounted for births.
+ BirthCount global_birth_count_;
+
+ DISALLOW_COPY_AND_ASSIGN(DataCollector);
+};
+
+//------------------------------------------------------------------------------
// Provide simple way to to start global tracking, and to tear down tracking
// when done. The design has evolved to *not* do any teardown (and just leak
// all allocated data structures). As a result, we don't have any code in this
diff --git a/base/tracked_objects_unittest.cc b/base/tracked_objects_unittest.cc
index 2bbdfe6..8282444 100644
--- a/base/tracked_objects_unittest.cc
+++ b/base/tracked_objects_unittest.cc
@@ -45,10 +45,9 @@ TEST_F(TrackedObjectsTest, MinimalStartupShutdown) {
EXPECT_TRUE(!data->next());
EXPECT_EQ(data, ThreadData::Get());
ThreadData::BirthMap birth_map;
- data->SnapshotBirthMap(&birth_map);
- EXPECT_EQ(0u, birth_map.size());
ThreadData::DeathMap death_map;
- data->SnapshotDeathMap(&death_map);
+ data->SnapshotMaps(false, &birth_map, &death_map);
+ EXPECT_EQ(0u, birth_map.size());
EXPECT_EQ(0u, death_map.size());
// Cleanup with no leaking.
ShutdownSingleThreadedCleanup(false);
@@ -62,10 +61,9 @@ TEST_F(TrackedObjectsTest, MinimalStartupShutdown) {
EXPECT_TRUE(!data->next());
EXPECT_EQ(data, ThreadData::Get());
birth_map.clear();
- data->SnapshotBirthMap(&birth_map);
- EXPECT_EQ(0u, birth_map.size());
death_map.clear();
- data->SnapshotDeathMap(&death_map);
+ data->SnapshotMaps(false, &birth_map, &death_map);
+ EXPECT_EQ(0u, birth_map.size());
EXPECT_EQ(0u, death_map.size());
}
@@ -77,16 +75,15 @@ TEST_F(TrackedObjectsTest, TinyStartupShutdown) {
const Location& location = FROM_HERE;
ThreadData::TallyABirthIfActive(location);
- const ThreadData* data = ThreadData::first();
+ ThreadData* data = ThreadData::first();
ASSERT_TRUE(data);
EXPECT_TRUE(!data->next());
EXPECT_EQ(data, ThreadData::Get());
ThreadData::BirthMap birth_map;
- data->SnapshotBirthMap(&birth_map);
+ ThreadData::DeathMap death_map;
+ data->SnapshotMaps(false, &birth_map, &death_map);
EXPECT_EQ(1u, birth_map.size()); // 1 birth location.
EXPECT_EQ(1, birth_map.begin()->second->birth_count()); // 1 birth.
- ThreadData::DeathMap death_map;
- data->SnapshotDeathMap(&death_map);
EXPECT_EQ(0u, death_map.size()); // No deaths.
@@ -100,11 +97,10 @@ TEST_F(TrackedObjectsTest, TinyStartupShutdown) {
kBogusEndRunTime);
birth_map.clear();
- data->SnapshotBirthMap(&birth_map);
+ death_map.clear();
+ data->SnapshotMaps(false, &birth_map, &death_map);
EXPECT_EQ(1u, birth_map.size()); // 1 birth location.
EXPECT_EQ(2, birth_map.begin()->second->birth_count()); // 2 births.
- death_map.clear();
- data->SnapshotDeathMap(&death_map);
EXPECT_EQ(1u, death_map.size()); // 1 location.
EXPECT_EQ(1, death_map.begin()->second.count()); // 1 death.
@@ -118,35 +114,40 @@ TEST_F(TrackedObjectsTest, DeathDataTest) {
scoped_ptr<DeathData> data(new DeathData());
ASSERT_NE(data, reinterpret_cast<DeathData*>(NULL));
- EXPECT_EQ(data->run_duration(), 0);
- EXPECT_EQ(data->queue_duration(), 0);
- EXPECT_EQ(data->AverageMsRunDuration(), 0);
- EXPECT_EQ(data->AverageMsQueueDuration(), 0);
+ EXPECT_EQ(data->run_duration_sum(), 0);
+ EXPECT_EQ(data->run_duration_sample(), 0);
+ EXPECT_EQ(data->queue_duration_sum(), 0);
+ EXPECT_EQ(data->queue_duration_sample(), 0);
EXPECT_EQ(data->count(), 0);
DurationInt run_ms = 42;
DurationInt queue_ms = 8;
- data->RecordDeath(queue_ms, run_ms);
- EXPECT_EQ(data->run_duration(), run_ms);
- EXPECT_EQ(data->queue_duration(), queue_ms);
- EXPECT_EQ(data->AverageMsRunDuration(), run_ms);
- EXPECT_EQ(data->AverageMsQueueDuration(), queue_ms);
+ const int kUnrandomInt = 0; // Fake random int that ensure we sample data.
+ data->RecordDeath(queue_ms, run_ms, kUnrandomInt);
+ EXPECT_EQ(data->run_duration_sum(), run_ms);
+ EXPECT_EQ(data->run_duration_sample(), run_ms);
+ EXPECT_EQ(data->queue_duration_sum(), queue_ms);
+ EXPECT_EQ(data->queue_duration_sample(), queue_ms);
EXPECT_EQ(data->count(), 1);
- data->RecordDeath(queue_ms, run_ms);
- EXPECT_EQ(data->run_duration(), run_ms + run_ms);
- EXPECT_EQ(data->queue_duration(), queue_ms + queue_ms);
- EXPECT_EQ(data->AverageMsRunDuration(), run_ms);
- EXPECT_EQ(data->AverageMsQueueDuration(), queue_ms);
+ data->RecordDeath(queue_ms, run_ms, kUnrandomInt);
+ EXPECT_EQ(data->run_duration_sum(), run_ms + run_ms);
+ EXPECT_EQ(data->run_duration_sample(), run_ms);
+ EXPECT_EQ(data->queue_duration_sum(), queue_ms + queue_ms);
+ EXPECT_EQ(data->queue_duration_sample(), queue_ms);
EXPECT_EQ(data->count(), 2);
scoped_ptr<base::DictionaryValue> dictionary(data->ToValue());
int integer;
EXPECT_TRUE(dictionary->GetInteger("run_ms", &integer));
EXPECT_EQ(integer, 2 * run_ms);
+ EXPECT_TRUE(dictionary->GetInteger("run_ms_sample", &integer));
+ EXPECT_EQ(integer, run_ms);
EXPECT_TRUE(dictionary->GetInteger("queue_ms", &integer));
EXPECT_EQ(integer, 2 * queue_ms);
+ EXPECT_TRUE(dictionary->GetInteger("queue_ms_sample", &integer));
+ EXPECT_EQ(integer, queue_ms);
EXPECT_TRUE(dictionary->GetInteger("count", &integer));
EXPECT_EQ(integer, 2);
@@ -157,8 +158,10 @@ TEST_F(TrackedObjectsTest, DeathDataTest) {
"\"count\":2,"
"\"queue_ms\":16,"
"\"queue_ms_max\":8,"
+ "\"queue_ms_sample\":8,"
"\"run_ms\":84,"
- "\"run_ms_max\":42"
+ "\"run_ms_max\":42,"
+ "\"run_ms_sample\":42"
"}";
EXPECT_EQ(birth_only_result, json);
}
@@ -177,7 +180,7 @@ TEST_F(TrackedObjectsTest, DeactivatedBirthOnlyToValueWorkerThread) {
// We should now see a NULL birth record.
EXPECT_EQ(birth, reinterpret_cast<Births*>(NULL));
- scoped_ptr<base::Value> value(ThreadData::ToValue());
+ scoped_ptr<base::Value> value(ThreadData::ToValue(false));
std::string json;
base::JSONWriter::Write(value.get(), false, &json);
std::string birth_only_result = "{"
@@ -203,7 +206,7 @@ TEST_F(TrackedObjectsTest, DeactivatedBirthOnlyToValueMainThread) {
// We expect to not get a birth record.
EXPECT_EQ(birth, reinterpret_cast<Births*>(NULL));
- scoped_ptr<base::Value> value(ThreadData::ToValue());
+ scoped_ptr<base::Value> value(ThreadData::ToValue(false));
std::string json;
base::JSONWriter::Write(value.get(), false, &json);
std::string birth_only_result = "{"
@@ -225,7 +228,7 @@ TEST_F(TrackedObjectsTest, BirthOnlyToValueWorkerThread) {
Births* birth = ThreadData::TallyABirthIfActive(location);
EXPECT_NE(birth, reinterpret_cast<Births*>(NULL));
- scoped_ptr<base::Value> value(ThreadData::ToValue());
+ scoped_ptr<base::Value> value(ThreadData::ToValue(false));
std::string json;
base::JSONWriter::Write(value.get(), false, &json);
std::string birth_only_result = "{"
@@ -236,8 +239,10 @@ TEST_F(TrackedObjectsTest, BirthOnlyToValueWorkerThread) {
"\"count\":1,"
"\"queue_ms\":0,"
"\"queue_ms_max\":0,"
+ "\"queue_ms_sample\":0,"
"\"run_ms\":0,"
- "\"run_ms_max\":0"
+ "\"run_ms_max\":0,"
+ "\"run_ms_sample\":0"
"},"
"\"death_thread\":\"Still_Alive\","
"\"location\":{"
@@ -265,7 +270,7 @@ TEST_F(TrackedObjectsTest, BirthOnlyToValueMainThread) {
Births* birth = ThreadData::TallyABirthIfActive(location);
EXPECT_NE(birth, reinterpret_cast<Births*>(NULL));
- scoped_ptr<base::Value> value(ThreadData::ToValue());
+ scoped_ptr<base::Value> value(ThreadData::ToValue(false));
std::string json;
base::JSONWriter::Write(value.get(), false, &json);
std::string birth_only_result = "{"
@@ -276,8 +281,10 @@ TEST_F(TrackedObjectsTest, BirthOnlyToValueMainThread) {
"\"count\":1,"
"\"queue_ms\":0,"
"\"queue_ms_max\":0,"
+ "\"queue_ms_sample\":0,"
"\"run_ms\":0,"
- "\"run_ms_max\":0"
+ "\"run_ms_max\":0,"
+ "\"run_ms_sample\":0"
"},"
"\"death_thread\":\"Still_Alive\","
"\"location\":{"
@@ -318,7 +325,7 @@ TEST_F(TrackedObjectsTest, LifeCycleToValueMainThread) {
ThreadData::TallyRunOnNamedThreadIfTracking(pending_task,
kStartOfRun, kEndOfRun);
- scoped_ptr<base::Value> value(ThreadData::ToValue());
+ scoped_ptr<base::Value> value(ThreadData::ToValue(false));
std::string json;
base::JSONWriter::Write(value.get(), false, &json);
std::string one_line_result = "{"
@@ -329,8 +336,10 @@ TEST_F(TrackedObjectsTest, LifeCycleToValueMainThread) {
"\"count\":1,"
"\"queue_ms\":4,"
"\"queue_ms_max\":4,"
+ "\"queue_ms_sample\":4,"
"\"run_ms\":2,"
- "\"run_ms_max\":2"
+ "\"run_ms_max\":2,"
+ "\"run_ms_sample\":2"
"},"
"\"death_thread\":\"SomeMainThreadName\","
"\"location\":{"
@@ -378,7 +387,7 @@ TEST_F(TrackedObjectsTest, LifeCycleMidDeactivatedToValueMainThread) {
ThreadData::TallyRunOnNamedThreadIfTracking(pending_task,
kStartOfRun, kEndOfRun);
- scoped_ptr<base::Value> value(ThreadData::ToValue());
+ scoped_ptr<base::Value> value(ThreadData::ToValue(false));
std::string json;
base::JSONWriter::Write(value.get(), false, &json);
std::string one_line_result = "{"
@@ -389,8 +398,10 @@ TEST_F(TrackedObjectsTest, LifeCycleMidDeactivatedToValueMainThread) {
"\"count\":1,"
"\"queue_ms\":4,"
"\"queue_ms_max\":4,"
+ "\"queue_ms_sample\":4,"
"\"run_ms\":2,"
- "\"run_ms_max\":2"
+ "\"run_ms_max\":2,"
+ "\"run_ms_sample\":2"
"},"
"\"death_thread\":\"SomeMainThreadName\","
"\"location\":{"
@@ -433,7 +444,7 @@ TEST_F(TrackedObjectsTest, LifeCyclePreDeactivatedToValueMainThread) {
ThreadData::TallyRunOnNamedThreadIfTracking(pending_task,
kStartOfRun, kEndOfRun);
- scoped_ptr<base::Value> value(ThreadData::ToValue());
+ scoped_ptr<base::Value> value(ThreadData::ToValue(false));
std::string json;
base::JSONWriter::Write(value.get(), false, &json);
std::string one_line_result = "{"
@@ -465,7 +476,8 @@ TEST_F(TrackedObjectsTest, LifeCycleToValueWorkerThread) {
ThreadData::TallyRunOnWorkerThreadIfTracking(birth, kTimePosted,
kStartOfRun, kEndOfRun);
- scoped_ptr<base::Value> value(ThreadData::ToValue());
+ // Call for the ToValue, but tell it to not the maxes after scanning.
+ scoped_ptr<base::Value> value(ThreadData::ToValue(false));
std::string json;
base::JSONWriter::Write(value.get(), false, &json);
std::string one_line_result = "{"
@@ -476,8 +488,10 @@ TEST_F(TrackedObjectsTest, LifeCycleToValueWorkerThread) {
"\"count\":1,"
"\"queue_ms\":4,"
"\"queue_ms_max\":4,"
+ "\"queue_ms_sample\":4,"
"\"run_ms\":2,"
- "\"run_ms_max\":2"
+ "\"run_ms_max\":2,"
+ "\"run_ms_sample\":2"
"},"
"\"death_thread\":\"WorkerThread-1\","
"\"location\":{"
@@ -489,6 +503,42 @@ TEST_F(TrackedObjectsTest, LifeCycleToValueWorkerThread) {
"]"
"}";
EXPECT_EQ(one_line_result, json);
+
+ // Call for the ToValue, but tell it to reset the maxes after scanning.
+ // We'll still get the same values, but the data will be reset (which we'll
+ // see in a moment).
+ value.reset(ThreadData::ToValue(true));
+ base::JSONWriter::Write(value.get(), false, &json);
+ // Result should be unchanged.
+ EXPECT_EQ(one_line_result, json);
+
+ // Call for the ToValue, and now we'll see the result of the last translation,
+ // as the max will have been pushed back to zero.
+ value.reset(ThreadData::ToValue(false));
+ base::JSONWriter::Write(value.get(), false, &json);
+ std::string one_line_result_with_zeros = "{"
+ "\"list\":["
+ "{"
+ "\"birth_thread\":\"WorkerThread-1\","
+ "\"death_data\":{"
+ "\"count\":1,"
+ "\"queue_ms\":4,"
+ "\"queue_ms_max\":0," // Note zero here.
+ "\"queue_ms_sample\":4,"
+ "\"run_ms\":2,"
+ "\"run_ms_max\":0," // Note zero here.
+ "\"run_ms_sample\":2"
+ "},"
+ "\"death_thread\":\"WorkerThread-1\","
+ "\"location\":{"
+ "\"file_name\":\"FixedFileName\","
+ "\"function_name\":\"LifeCycleToValueWorkerThread\","
+ "\"line_number\":236"
+ "}"
+ "}"
+ "]"
+ "}";
+ EXPECT_EQ(one_line_result_with_zeros, json);
}
TEST_F(TrackedObjectsTest, TwoLives) {
@@ -526,7 +576,7 @@ TEST_F(TrackedObjectsTest, TwoLives) {
ThreadData::TallyRunOnNamedThreadIfTracking(pending_task2,
kStartOfRun, kEndOfRun);
- scoped_ptr<base::Value> value(ThreadData::ToValue());
+ scoped_ptr<base::Value> value(ThreadData::ToValue(false));
std::string json;
base::JSONWriter::Write(value.get(), false, &json);
std::string one_line_result = "{"
@@ -537,8 +587,10 @@ TEST_F(TrackedObjectsTest, TwoLives) {
"\"count\":2,"
"\"queue_ms\":8,"
"\"queue_ms_max\":4,"
+ "\"queue_ms_sample\":4,"
"\"run_ms\":4,"
- "\"run_ms_max\":2"
+ "\"run_ms_max\":2,"
+ "\"run_ms_sample\":2"
"},"
"\"death_thread\":\"SomeFileThreadName\","
"\"location\":{"
@@ -583,7 +635,7 @@ TEST_F(TrackedObjectsTest, DifferentLives) {
base::TrackingInfo pending_task2(second_location, kDelayedStartTime);
pending_task2.time_posted = kTimePosted; // Overwrite implied Now().
- scoped_ptr<base::Value> value(ThreadData::ToValue());
+ scoped_ptr<base::Value> value(ThreadData::ToValue(false));
std::string json;
base::JSONWriter::Write(value.get(), false, &json);
std::string one_line_result = "{"
@@ -594,8 +646,10 @@ TEST_F(TrackedObjectsTest, DifferentLives) {
"\"count\":1,"
"\"queue_ms\":4,"
"\"queue_ms_max\":4,"
+ "\"queue_ms_sample\":4,"
"\"run_ms\":2,"
- "\"run_ms_max\":2"
+ "\"run_ms_max\":2,"
+ "\"run_ms_sample\":2"
"},"
"\"death_thread\":\"SomeFileThreadName\","
"\"location\":{"
@@ -610,8 +664,10 @@ TEST_F(TrackedObjectsTest, DifferentLives) {
"\"count\":1,"
"\"queue_ms\":0,"
"\"queue_ms_max\":0,"
+ "\"queue_ms_sample\":0,"
"\"run_ms\":0,"
- "\"run_ms_max\":0"
+ "\"run_ms_max\":0,"
+ "\"run_ms_sample\":0"
"},"
"\"death_thread\":\"Still_Alive\","
"\"location\":{"
diff --git a/chrome/browser/metrics/tracking_synchronizer.cc b/chrome/browser/metrics/tracking_synchronizer.cc
index 3844e87..84f670c 100644
--- a/chrome/browser/metrics/tracking_synchronizer.cc
+++ b/chrome/browser/metrics/tracking_synchronizer.cc
@@ -245,7 +245,7 @@ int TrackingSynchronizer::RegisterAndNotifyAllProcesses(
content::ProfilerController::GetInstance()->GetProfilerData(sequence_number);
// Send profiler_data from browser process.
- base::DictionaryValue* value = tracked_objects::ThreadData::ToValue();
+ base::DictionaryValue* value = tracked_objects::ThreadData::ToValue(false);
const std::string process_type =
content::GetProcessTypeNameInEnglish(content::PROCESS_TYPE_BROWSER);
value->SetString("process_type", process_type);
diff --git a/content/common/child_thread.cc b/content/common/child_thread.cc
index 9988e27..fedd88e 100644
--- a/content/common/child_thread.cc
+++ b/content/common/child_thread.cc
@@ -232,7 +232,7 @@ void ChildThread::OnGetChildProfilerData(
int sequence_number,
const std::string& process_type) {
scoped_ptr<base::DictionaryValue> value(
- tracked_objects::ThreadData::ToValue());
+ tracked_objects::ThreadData::ToValue(false));
value->SetString("process_type", process_type);
value->SetInteger("process_id", base::GetCurrentProcId());