summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjar@chromium.org <jar@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2009-12-30 07:31:45 +0000
committerjar@chromium.org <jar@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2009-12-30 07:31:45 +0000
commit75b7920402c6f834376a18aa1927b223c6e3eadb (patch)
treec0a770a9b85fcb09fb988ba2ff0c2eef8b994b98
parentd3d98bc21e6fe0ea6aa0186194347a1f5e4d7be8 (diff)
downloadchromium_src-75b7920402c6f834376a18aa1927b223c6e3eadb.zip
chromium_src-75b7920402c6f834376a18aa1927b223c6e3eadb.tar.gz
chromium_src-75b7920402c6f834376a18aa1927b223c6e3eadb.tar.bz2
Provide a quick and dirty way to reset about:objects data
To make it easier to use the about:object profiling facility, I put in a quick/dirty way to reset all profile stats to 0 (as if there were no births, deaths, etc.). This code is only activated under debug builds (or if a developer inists in a private build). These stats don't impact semantics of the browser, so the hackish approach to clearing the data counts can't instigate a crash, and it makes it much easier to look at changes in the stats. While changing the code, I also added a lot of comments, and did a few minor cleanups items. I also officially added about:tasks as a replacement for about:objects, as this is really how the service is used in Chrome. r=mbelshe Review URL: http://codereview.chromium.org/100297 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@35372 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r--base/tracked.cc3
-rw-r--r--base/tracked_objects.cc104
-rw-r--r--base/tracked_objects.h209
-rw-r--r--chrome/browser/browser_about_handler.cc18
4 files changed, 289 insertions, 45 deletions
diff --git a/base/tracked.cc b/base/tracked.cc
index 1ba894b..34694bb 100644
--- a/base/tracked.cc
+++ b/base/tracked.cc
@@ -77,8 +77,7 @@ void Tracked::SetBirthPlace(const Location& from_here) {
ThreadData* current_thread_data = ThreadData::current();
if (!current_thread_data)
return; // Shutdown started, and this thread wasn't registered.
- tracked_births_ = current_thread_data->FindLifetime(from_here);
- tracked_births_->RecordBirth();
+ tracked_births_ = current_thread_data->TallyABirth(from_here);
}
void Tracked::ResetBirthTime() {
diff --git a/base/tracked_objects.cc b/base/tracked_objects.cc
index 0d68703..a51c0da 100644
--- a/base/tracked_objects.cc
+++ b/base/tracked_objects.cc
@@ -62,7 +62,6 @@ void DeathData::Clear() {
}
//------------------------------------------------------------------------------
-
BirthOnThread::BirthOnThread(const Location& location)
: location_(location),
birth_thread_(ThreadData::current()) { }
@@ -70,7 +69,7 @@ BirthOnThread::BirthOnThread(const Location& location)
//------------------------------------------------------------------------------
Births::Births(const Location& location)
: BirthOnThread(location),
- birth_count_(0) { }
+ birth_count_(1) { }
//------------------------------------------------------------------------------
// ThreadData maintains the central data for all births and death.
@@ -178,7 +177,36 @@ void ThreadData::WriteHTML(const std::string& query, std::string* output) {
comparator.Clear(); // Delete tiebreaker_ instances.
- output->append("</pre></body></html>");
+ output->append("</pre>");
+
+ const char* help_string = "The following are the keywords that can be used to"
+ "sort and aggregate the data, or to select data.<br><ul>"
+ "<li><b>count</b> Number of instances seen."
+ "<li><b>duration</b> Duration in ms from construction to descrution."
+ "<li><b>birth</b> Thread on which the task was constructed."
+ "<li><b>death</b> Thread on which the task was run and deleted."
+ "<li><b>file</b> File in which the task was contructed."
+ "<li><b>function</b> Function in which the task was constructed."
+ "<li><b>line</b> Line number of the file in which the task was constructed."
+ "</ul><br>"
+ "As examples:<ul>"
+ "<li><b>about:tasks/file</b> would sort the above data by file, and"
+ " aggregate data on a per-file basis."
+ "<li><b>about:tasks/file=Dns</b> would only list data for tasks constructed"
+ " in a file containing the text |Dns|."
+ "<li><b>about:tasks/birth/death</b> would sort the above list by birth"
+ " thread, and then by death thread, and would aggregate data for each pair"
+ " of lifetime events."
+ "</ul>"
+ " The data can be reset to zero (discarding all births, deaths, etc.) using"
+ " <b>about:tasks/reset</b>. The existing stats will be displayed, but the"
+ " internal stats will be set to zero, and start accumulating afresh. This"
+ " option is very helpful if you only wish to consider tasks created after"
+ " some point in time.<br><br>"
+ "If you wish to monitor Renderer events, be sure to run in --single-process"
+ " mode.";
+ output->append(help_string);
+ output->append("</body></html>");
}
// static
@@ -222,15 +250,17 @@ void ThreadData::WriteHTMLTotalAndSubtotals(
}
}
-Births* ThreadData::FindLifetime(const Location& location) {
+Births* ThreadData::TallyABirth(const Location& location) {
if (!message_loop_) // In case message loop wasn't yet around...
message_loop_ = MessageLoop::current(); // Find it now.
BirthMap::iterator it = birth_map_.find(location);
- if (it != birth_map_.end())
+ if (it != birth_map_.end()) {
+ it->second->RecordBirth();
return it->second;
- Births* tracker = new Births(location);
+ }
+ Births* tracker = new Births(location);
// Lock since the map may get relocated now, and other threads sometimes
// snapshot it (but they lock before copying it).
AutoLock lock(lock_);
@@ -267,7 +297,7 @@ const std::string ThreadData::ThreadName() const {
// This may be called from another thread.
void ThreadData::SnapshotBirthMap(BirthMap *output) const {
- AutoLock lock(*const_cast<Lock*>(&lock_));
+ AutoLock lock(lock_);
for (BirthMap::const_iterator it = birth_map_.begin();
it != birth_map_.end(); ++it)
(*output)[it->first] = it->second;
@@ -275,13 +305,34 @@ void ThreadData::SnapshotBirthMap(BirthMap *output) const {
// This may be called from another thread.
void ThreadData::SnapshotDeathMap(DeathMap *output) const {
- AutoLock lock(*const_cast<Lock*>(&lock_));
+ AutoLock lock(lock_);
for (DeathMap::const_iterator it = death_map_.begin();
it != death_map_.end(); ++it)
(*output)[it->first] = it->second;
}
+// static
+void ThreadData::ResetAllThreadData() {
+ ThreadData* my_list = ThreadData::current()->first();
+
+ for (ThreadData* thread_data = my_list;
+ thread_data;
+ thread_data = thread_data->next())
+ thread_data->Reset();
+}
+
+void ThreadData::Reset() {
+ AutoLock lock(lock_);
+ for (DeathMap::iterator it = death_map_.begin();
+ it != death_map_.end(); ++it)
+ it->second.Clear();
+ for (BirthMap::iterator it = birth_map_.begin();
+ it != birth_map_.end(); ++it)
+ it->second->Clear();
+}
+
#ifdef OS_WIN
+// TODO(jar): This should use condition variables, and be cross platform.
void ThreadData::RunOnAllThreads(void (*function)()) {
ThreadData* list = first(); // Get existing list.
@@ -395,7 +446,7 @@ void ThreadData::ShutdownDisablingFurtherTracking() {
ThreadData::ThreadSafeDownCounter::ThreadSafeDownCounter(size_t count)
: remaining_count_(count) {
- DCHECK(remaining_count_ > 0);
+ DCHECK_GT(remaining_count_, 0u);
}
bool ThreadData::ThreadSafeDownCounter::LastCaller() {
@@ -467,6 +518,7 @@ void Snapshot::Add(const Snapshot& other) {
DataCollector::DataCollector() {
DCHECK(ThreadData::IsActive());
+ // Get an unchanging copy of a ThreadData list.
ThreadData* my_list = ThreadData::current()->first();
count_of_contributing_threads_ = 0;
@@ -478,6 +530,14 @@ DataCollector::DataCollector() {
// Gather data serially. A different constructor could be used to do in
// parallel, and then invoke an OnCompletion task.
+ // This hackish approach *can* get some slighly corrupt tallies, as we are
+ // grabbing values without the protection of a lock, but it has the advantage
+ // of working even with threads that don't have message loops. If a user
+ // sees any strangeness, they can always just run their stats gathering a
+ // second time.
+ // TODO(jar): Provide version that gathers stats safely via PostTask in all
+ // cases where thread_data supplies a message_loop to post to. Be careful to
+ // handle message_loops that are destroyed!?!
for (ThreadData* thread_data = my_list;
thread_data;
thread_data = thread_data->next()) {
@@ -670,6 +730,8 @@ bool Comparator::operator()(const Snapshot& left,
break;
case AVERAGE_DURATION:
+ if (!left.count() || !right.count())
+ break;
if (left.AverageMsDuration() != right.AverageMsDuration())
return left.AverageMsDuration() > right.AverageMsDuration();
break;
@@ -807,10 +869,13 @@ void Comparator::SetSubgroupTiebreaker(Selector selector) {
}
void Comparator::ParseKeyphrase(const std::string& key_phrase) {
- static std::map<const std::string, Selector> key_map;
+ typedef std::map<const std::string, Selector> KeyMap;
+ static KeyMap key_map;
static bool initialized = false;
if (!initialized) {
initialized = true;
+ // Sorting and aggretation keywords, which specify how to sort the data, or
+ // can specify a required match from the specified field in the record.
key_map["count"] = COUNT;
key_map["duration"] = AVERAGE_DURATION;
key_map["birth"] = BIRTH_THREAD;
@@ -818,20 +883,31 @@ void Comparator::ParseKeyphrase(const std::string& key_phrase) {
key_map["file"] = BIRTH_FILE;
key_map["function"] = BIRTH_FUNCTION;
key_map["line"] = BIRTH_LINE;
+
+ // Immediate commands that do not involve setting sort order.
+ key_map["reset"] = RESET_ALL_DATA;
}
std::string required;
+ // Watch for: "sort_key=value" as we parse.
size_t equal_offset = key_phrase.find('=', 0);
- if (key_phrase.npos != equal_offset)
+ if (key_phrase.npos != equal_offset) {
+ // There is a value that must be matched for the data to display.
required = key_phrase.substr(equal_offset + 1, key_phrase.npos);
+ }
std::string keyword(key_phrase.substr(0, equal_offset));
keyword = StringToLowerASCII(keyword);
- if (key_map.end() == key_map.find(keyword))
- return;
- SetTiebreaker(key_map[keyword], required);
+ KeyMap::iterator it = key_map.find(keyword);
+ if (key_map.end() == it)
+ return; // Unknown keyword.
+ if (it->second == RESET_ALL_DATA)
+ ThreadData::ResetAllThreadData();
+ else
+ SetTiebreaker(key_map[keyword], required);
}
bool Comparator::ParseQuery(const std::string& query) {
+ // Parse each keyphrase between consecutive slashes.
for (size_t i = 0; i < query.size();) {
size_t slash_offset = query.find('/', i);
ParseKeyphrase(query.substr(i, slash_offset - i));
diff --git a/base/tracked_objects.h b/base/tracked_objects.h
index 1934afb..4d63997 100644
--- a/base/tracked_objects.h
+++ b/base/tracked_objects.h
@@ -14,8 +14,143 @@
#include "base/thread_local_storage.h"
#include "base/tracked.h"
+// TrackedObjects provides a database of stats about objects (generally Tasks)
+// that are tracked. Tracking means their birth, death, duration, birth thread,
+// death thread, and birth place are recorded. This data is carefully spread
+// across a series of objects so that the counts and times can be rapidly
+// updated without (usually) having to lock the data, and hence there is usually
+// very little contention caused by the tracking. The data can be viewed via
+// the about:objects URL, with a variety of sorting and filtering choices.
+//
+// Theese classes serve as the basis of a profiler of sorts for the Tasks
+// system. As a result, design decisions were made to maximize speed, by
+// minimizing recurring allocation/deallocation, lock contention and data
+// copying. In the "stable" state, which is reached relatively quickly, there
+// is no separate marginal allocation cost associated with construction or
+// destruction of tracked objects, no locks are generally employed, and probably
+// the largest computational cost is associated with obtaining start and stop
+// times for instances as they are created and destroyed. The introduction of
+// worker threads had a slight impact on this approach, and required use of some
+// locks when accessing data from the worker threads.
+//
+// The following describes the lifecycle of tracking an instance.
+//
+// First off, when the instance is created, the FROM_HERE macro is expanded
+// to specify the birth place (file, line, function) where the instance was
+// created. That data is used to create a transient Location instance
+// encapsulating the above triple of information. The strings (like __FILE__)
+// are passed around by reference, with the assumption that they are static, and
+// will never go away. This ensures that the strings can be dealt with as atoms
+// with great efficiency (i.e., copying of strings is never needed, and
+// comparisons for equality can be based on pointer comparisons).
+//
+// Next, a Births instance is created for use ONLY on the thread where this
+// instance was created. That Births instance records (in a base class
+// BirthOnThread) references to the static data provided in a Location instance,
+// as well as a pointer specifying the thread on which the birth takes place.
+// Hence there is at most one Births instance for each Location on each thread.
+// The derived Births class contains slots for recording statistics about all
+// instances born at the same location. Statistics currently include only the
+// count of instances constructed.
+// Since the base class BirthOnThread contains only constant data, it can be
+// freely accessed by any thread at any time (i.e., only the statistic needs to
+// be handled carefully, and it is ONLY read or written by the birth thread).
+//
+// Having now either constructed or found the Births instance described above, a
+// pointer to the Births instance is then embedded in a base class of the
+// instance we're tracking (usually a Task). This fact alone is very useful in
+// debugging, when there is a question of where an instance came from. In
+// addition, the birth time is also embedded in the base class Tracked (see
+// tracked.h), and used to later evaluate the lifetime duration.
+// As a result of the above embedding, we can (for any tracked instance) find
+// out its location of birth, and thread of birth, without using any locks, as
+// all that data is constant across the life of the process.
+//
+// The amount of memory used in the above data structures depends on how many
+// threads there are, and how many Locations of construction there are.
+// Fortunately, we don't use memory that is the product of those two counts, but
+// rather we only need one Births instance for each thread that constructs an
+// instance at a Location. In many cases, instances (such as Tasks) are only
+// created on one thread, so the memory utilization is actually fairly
+// restrained.
+//
+// Lastly, when an instance is deleted, the final tallies of statistics are
+// carefully accumulated. That tallying wrties into slots (members) in a
+// collection of DeathData instances. For each birth place Location that is
+// destroyed on a thread, there is a DeathData instance to record the additional
+// death count, as well as accumulate the lifetime duration of the instance as
+// it is destroyed (dies). By maintaining a single place to aggregate this
+// addition *only* for the given thread, we avoid the need to lock such
+// DeathData instances.
+//
+// With the above lifecycle description complete, the major remaining detail is
+// explaining how each thread maintains a list of DeathData instances, and of
+// Births instances, and is able to avoid additional (redundant/unnecessary)
+// allocations.
+//
+// Each thread maintains a list of data items specific to that thread in a
+// ThreadData instance (for that specific thread only). The two critical items
+// are lists of DeathData and Births instances. These lists are maintained in
+// STL maps, which are indexed by Location. As noted earlier, we can compare
+// locations very efficiently as we consider the underlying data (file,
+// function, line) to be atoms, and hence pointer comparison is used rather than
+// (slow) string comparisons.
+//
+// To provide a mechanism for iterating over all "known threads," which means
+// threads that have recorded a birth or a death, we create a singly linked list
+// of ThreadData instances. Each such instance maintains a pointer to the next
+// one. A static member of ThreadData provides a pointer to the first_ item on
+// this global list, and access to that first_ item requires the use of a lock_.
+// When new ThreadData instances is added to the global list, it is pre-pended,
+// which ensures that any prior acquisition of the list is valid (i.e., the
+// holder can iterate over it without fear of it changing, or the necessity of
+// using an additional lock. Iterations are actually pretty rare (used
+// primarilly for cleanup, or snapshotting data for display), so this lock has
+// very little global performance impact.
+//
+// The above description tries to define the high performance (run time)
+// portions of these classes. After gathering statistics, calls instigated
+// by visiting about:objects will assemble and aggregate data for display. The
+// following data structures are used for producing such displays. They are
+// not performance critical, and their only major constraint is that they should
+// be able to run concurrently with ongoing augmentation of the birth and death
+// data.
+//
+// For a given birth location, information about births are spread across data
+// structures that are asynchronously changing on various threads. For display
+// purposes, we need to construct Snapshot instances for each combination of
+// birth thread, death thread, and location, along with the count of such
+// lifetimes. We gather such data into a Snapshot instances, so that such
+// instances can be sorted and aggregated (and remain frozen during our
+// processing). Snapshot instances use pointers to constant portions of the
+// birth and death datastructures, but have local (frozen) copies of the actual
+// statistics (birth count, durations, etc. etc.).
+//
+// A DataCollector is a container object that holds a set of Snapshots. A
+// DataCollector can be passed from thread to thread, and each thread
+// contributes to it by adding or updating Snapshot instances. DataCollector
+// instances are thread safe containers which are passed to various threads to
+// accumulate all Snapshot instances.
+//
+// After an array of Snapshots instances are colleted into a DataCollector, they
+// need to be sorted, and possibly aggregated (example: how many threads are in
+// a specific consecutive set of Snapshots? What was the total birth count for
+// that set? etc.). Aggregation instances collect running sums of any set of
+// snapshot instances, and are used to print sub-totals in an about:objects
+// page.
+//
+// TODO(jar): I need to store DataCollections, and provide facilities for taking
+// the difference between two gathered DataCollections. For now, I'm just
+// adding a hack that Reset()'s to zero all counts and stats. This is also
+// done in a slighly thread-unsafe fashion, as the reseting is done
+// asynchronously relative to ongoing updates, and worse yet, some data fields
+// are 64bit quantities, and are not atomicly accessed (reset or incremented
+// etc.). For basic profiling, this will work "most of the time," and should be
+// sufficient... but storing away DataCollections is the "right way" to do this.
+//
class MessageLoop;
+
namespace tracked_objects {
//------------------------------------------------------------------------------
@@ -58,6 +193,9 @@ class Births: public BirthOnThread {
// for the old instance.
void ForgetBirth() { --birth_count_; } // We corrected a birth place.
+ // Hack to quickly reset all counts to zero.
+ void Clear() { birth_count_ = 0; }
+
private:
// The number of births on this thread for our location_.
int birth_count_;
@@ -95,6 +233,7 @@ class DeathData {
// Simple print of internal state.
void Write(std::string* output) const;
+ // Reset all tallies to zero.
void Clear();
private:
@@ -144,7 +283,7 @@ class Snapshot {
//------------------------------------------------------------------------------
// DataCollector is a container class for Snapshot and BirthOnThread count
// items. It protects the gathering under locks, so that it could be called via
-// Posttask on any threads, such as all the target threads in parallel.
+// Posttask on any threads, or passed to all the target threads in parallel.
class DataCollector {
public:
@@ -159,7 +298,8 @@ class DataCollector {
// implementation serialized calls to Append).
void Append(const ThreadData& thread_data);
- // After the accumulation phase, the following access is to process data.
+ // After the accumulation phase, the following accessor is used to process the
+ // data.
Collection* collection();
// After collection of death data is complete, we can add entries for all the
@@ -213,14 +353,22 @@ class Aggregation: public DeathData {
};
//------------------------------------------------------------------------------
-// Comparator does the comparison of Snapshot instances. It is
-// used to order the instances in a vector. It orders them into groups (for
-// aggregation), and can also order instances within the groups (for detailed
-// rendering of the instances).
+// Comparator is a class that supports the comparison of Snapshot instances.
+// An instance is actually a list of chained Comparitors, that can provide for
+// arbitrary ordering. The path portion of an about:objects URL is translated
+// into such a chain, which is then used to order Snapshot instances in a
+// vector. It orders them into groups (for aggregation), and can also order
+// instances within the groups (for detailed rendering of the instances in an
+// aggregation).
class Comparator {
public:
+ // Selector enum is the token identifier for each parsed keyword, most of
+ // which specify a sort order.
+ // Since it is not meaningful to sort more than once on a specific key, we
+ // use bitfields to accumulate what we have sorted on so far.
enum Selector {
+ // Sort orders.
NIL = 0,
BIRTH_THREAD = 1,
DEATH_THREAD = 2,
@@ -230,11 +378,14 @@ class Comparator {
COUNT = 32,
AVERAGE_DURATION = 64,
TOTAL_DURATION = 128,
+
+ // Imediate action keywords.
+ RESET_ALL_DATA = -1,
};
explicit Comparator();
- // Reset the comparator to a NIL selector. Reset() and recursively delete any
+ // Reset the comparator to a NIL selector. Clear() and recursively delete any
// tiebreaker_ entries. NOTE: We can't use a standard destructor, because
// the sort algorithm makes copies of this object, and then deletes them,
// which would cause problems (either we'd make expensive deep copies, or we'd
@@ -335,8 +486,8 @@ class ThreadData {
const DataCollector::Collection& match_array,
const Comparator& comparator, std::string* output);
- // In this thread's data, find a place to record a new birth.
- Births* FindLifetime(const Location& location);
+ // In this thread's data, record a new birth.
+ Births* TallyABirth(const Location& location);
// Find a place to record a death on this thread.
void TallyADeath(const Births& lifetimes, const base::TimeDelta& duration);
@@ -350,10 +501,24 @@ class ThreadData {
const std::string ThreadName() const;
// Using our lock, make a copy of the specified maps. These calls may arrive
- // from non-local threads.
+ // from non-local threads, and are used to quickly scan data from all threads
+ // in order to build an HTML page for about:objects.
void SnapshotBirthMap(BirthMap *output) const;
void SnapshotDeathMap(DeathMap *output) const;
+ // Hack: asynchronously clear all birth counts and death tallies data values
+ // in all ThreadData instances. The numerical (zeroing) part is done without
+ // use of a locks or atomics exchanges, and may (for int64 values) produce
+ // bogus counts VERY rarely.
+ static void ResetAllThreadData();
+
+ // Using our lock to protect the iteration, Clear all birth and death data.
+ void Reset();
+
+ // Using the "known list of threads" gathered during births and deaths, the
+ // following attempts to run the given function once all all such threads.
+ // Note that the function can only be run on threads which have a message
+ // loop!
static void RunOnAllThreads(void (*Func)());
// Set internal status_ to either become ACTIVE, or later, to be SHUTDOWN,
@@ -439,18 +604,17 @@ class ThreadData {
static void ShutdownDisablingFurtherTracking();
// We use thread local store to identify which ThreadData to interact with.
- static TLSSlot tls_index_ ;
+ static TLSSlot tls_index_;
// Link to the most recently created instance (starts a null terminated list).
static ThreadData* first_;
// Protection for access to first_.
static Lock list_lock_;
-
// We set status_ to SHUTDOWN when we shut down the tracking service. This
- // setting is redundantly established by all participating
- // threads so that we are *guaranteed* (without locking) that all threads
- // can "see" the status and avoid additional calls into the service.
+ // setting is redundantly established by all participating threads so that we
+ // are *guaranteed* (without locking) that all threads can "see" the status
+ // and avoid additional calls into the service.
static Status status_;
// Link to next instance (null terminated list). Used to globally track all
@@ -471,14 +635,17 @@ class ThreadData {
// Similar to birth_map_, this records informations about death of tracked
// instances (i.e., when a tracked instance was destroyed on this thread).
+ // It is locked before changing, and hence other threads may access it by
+ // locking before reading it.
DeathMap death_map_;
- // Lock to protect *some* access to BirthMap and DeathMap. We only use
- // locking protection when we are growing the maps, or using an iterator. We
- // only do writes to members from this thread, so the updates of values are
- // atomic. Folks can read from other threads, and get (via races) new or old
- // data, but that is considered acceptable errors (mis-information).
- Lock lock_;
+ // Lock to protect *some* access to BirthMap and DeathMap. The maps are
+ // regularly read and written on this thread, but may only be read from other
+ // threads. To support this, we acquire this lock if we are writing from this
+ // thread, or reading from another thread. For reading from this thread we
+ // don't need a lock, as there is no potential for a conflict since the
+ // writing is only done from this thread.
+ mutable Lock lock_;
DISALLOW_COPY_AND_ASSIGN(ThreadData);
};
diff --git a/chrome/browser/browser_about_handler.cc b/chrome/browser/browser_about_handler.cc
index 320af0c..1b8a556 100644
--- a/chrome/browser/browser_about_handler.cc
+++ b/chrome/browser/browser_about_handler.cc
@@ -77,19 +77,19 @@ void AboutTcmallocRendererCallback(base::ProcessId pid, std::string output) {
namespace {
-// The paths used for the about pages.
+// The (alphabetized) paths used for the about pages.
+const char kCreditsPath[] = "credits";
const char kDnsPath[] = "dns";
const char kHistogramsPath[] = "histograms";
-const char kObjectsPath[] = "objects";
const char kMemoryRedirectPath[] = "memory-redirect";
const char kMemoryPath[] = "memory";
-const char kTcmallocPath[] = "tcmalloc";
const char kPluginsPath[] = "plugins";
const char kStatsPath[] = "stats";
-const char kVersionPath[] = "version";
-const char kCreditsPath[] = "credits";
-const char kTermsPath[] = "terms";
const char kSyncPath[] = "sync";
+const char kTasksPath[] = "tasks";
+const char kTcmallocPath[] = "tcmalloc";
+const char kTermsPath[] = "terms";
+const char kVersionPath[] = "version";
#if defined(OS_CHROMEOS)
const char kOSCreditsPath[] = "os-credits";
@@ -325,7 +325,7 @@ void AboutMemory(AboutSource* source, int request_id) {
handler->StartFetch();
}
-std::string AboutObjects(const std::string& query) {
+static std::string AboutObjects(const std::string& query) {
std::string data;
tracked_objects::ThreadData::WriteHTML(query, &data);
return data;
@@ -675,8 +675,10 @@ void AboutSource::StartDataRequest(const std::string& path_raw,
return;
} else if (path == kMemoryRedirectPath) {
response = GetAboutMemoryRedirectResponse();
- } else if (path == kObjectsPath) {
+#ifdef TRACK_ALL_TASK_OBJECTS
+ } else if (path == kTasksPath) {
response = AboutObjects(info);
+#endif
} else if (path == kPluginsPath) {
response = AboutPlugins();
} else if (path == kStatsPath) {