summaryrefslogtreecommitdiffstats
path: root/base/trace_event
diff options
context:
space:
mode:
authorssid <ssid@chromium.org>2015-01-27 05:14:07 -0800
committerCommit bot <commit-bot@chromium.org>2015-01-27 13:16:19 +0000
commita59d4277a6a82b3f379cf742f7320d9a6f3801ce (patch)
treebd724eff37826b58a56614236e996a368cb56f1c /base/trace_event
parent454b4e532e491f2582029994ae263623ae58317d (diff)
downloadchromium_src-a59d4277a6a82b3f379cf742f7320d9a6f3801ce.zip
chromium_src-a59d4277a6a82b3f379cf742f7320d9a6f3801ce.tar.gz
chromium_src-a59d4277a6a82b3f379cf742f7320d9a6f3801ce.tar.bz2
Move base/debug/trace_event* to base/trace_event/ [part 1]
Trace Event used to be a single file, base/debug/trace_event.h. Then we added more functionality, so it was base/debug/trace_event*.* For many years, we have had per-file OWNERS for Trace Event based off of trace_event*. This let us do trace_event improvements/iteration out of band with the much slower base/OWNERS process. But, this leads to a lot of poor filename choices for tracing related features, specifically to fall under trace event reviewers instead of base/ reviewers. That's clearly wrong. BUG=451032 Review URL: https://codereview.chromium.org/837303004 Cr-Commit-Position: refs/heads/master@{#313266}
Diffstat (limited to 'base/trace_event')
-rw-r--r--base/trace_event/OWNERS3
-rw-r--r--base/trace_event/trace_event.h1589
-rw-r--r--base/trace_event/trace_event_android.cc199
-rw-r--r--base/trace_event/trace_event_argument.cc117
-rw-r--r--base/trace_event/trace_event_argument.h59
-rw-r--r--base/trace_event/trace_event_argument_unittest.cc53
-rw-r--r--base/trace_event/trace_event_impl.cc2594
-rw-r--r--base/trace_event/trace_event_impl.h813
-rw-r--r--base/trace_event/trace_event_impl_constants.cc28
-rw-r--r--base/trace_event/trace_event_memory.cc440
-rw-r--r--base/trace_event/trace_event_memory.h172
-rw-r--r--base/trace_event/trace_event_memory_unittest.cc240
-rw-r--r--base/trace_event/trace_event_synthetic_delay.cc233
-rw-r--r--base/trace_event/trace_event_synthetic_delay.h166
-rw-r--r--base/trace_event/trace_event_synthetic_delay_unittest.cc154
-rw-r--r--base/trace_event/trace_event_system_stats_monitor.cc133
-rw-r--r--base/trace_event/trace_event_system_stats_monitor.h75
-rw-r--r--base/trace_event/trace_event_system_stats_monitor_unittest.cc66
-rw-r--r--base/trace_event/trace_event_unittest.cc3084
-rw-r--r--base/trace_event/trace_event_win.cc124
-rw-r--r--base/trace_event/trace_event_win.h125
-rw-r--r--base/trace_event/trace_event_win_unittest.cc319
22 files changed, 10786 insertions, 0 deletions
diff --git a/base/trace_event/OWNERS b/base/trace_event/OWNERS
new file mode 100644
index 0000000..3932776
--- /dev/null
+++ b/base/trace_event/OWNERS
@@ -0,0 +1,3 @@
+nduca@chromium.org
+dsinclair@chromium.org
+per-file trace_event_android.cc=wangxianzhu@chromium.org
diff --git a/base/trace_event/trace_event.h b/base/trace_event/trace_event.h
new file mode 100644
index 0000000..7f73071
--- /dev/null
+++ b/base/trace_event/trace_event.h
@@ -0,0 +1,1589 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This header file defines the set of trace_event macros without specifying
+// how the events actually get collected and stored. If you need to expose trace
+// events to some other universe, you can copy-and-paste this file as well as
+// trace_event.h, modifying the macros contained there as necessary for the
+// target platform. The end result is that multiple libraries can funnel events
+// through to a shared trace event collector.
+
+// Trace events are for tracking application performance and resource usage.
+// Macros are provided to track:
+// Begin and end of function calls
+// Counters
+//
+// Events are issued against categories. Whereas LOG's
+// categories are statically defined, TRACE categories are created
+// implicitly with a string. For example:
+// TRACE_EVENT_INSTANT0("MY_SUBSYSTEM", "SomeImportantEvent",
+// TRACE_EVENT_SCOPE_THREAD)
+//
+// It is often the case that one trace may belong in multiple categories at the
+// same time. The first argument to the trace can be a comma-separated list of
+// categories, forming a category group, like:
+//
+// TRACE_EVENT_INSTANT0("input,views", "OnMouseOver", TRACE_EVENT_SCOPE_THREAD)
+//
+// We can enable/disable tracing of OnMouseOver by enabling/disabling either
+// category.
+//
+// Events can be INSTANT, or can be pairs of BEGIN and END in the same scope:
+// TRACE_EVENT_BEGIN0("MY_SUBSYSTEM", "SomethingCostly")
+// doSomethingCostly()
+// TRACE_EVENT_END0("MY_SUBSYSTEM", "SomethingCostly")
+// Note: our tools can't always determine the correct BEGIN/END pairs unless
+// these are used in the same scope. Use ASYNC_BEGIN/ASYNC_END macros if you
+// need them to be in separate scopes.
+//
+// A common use case is to trace entire function scopes. This
+// issues a trace BEGIN and END automatically:
+// void doSomethingCostly() {
+// TRACE_EVENT0("MY_SUBSYSTEM", "doSomethingCostly");
+// ...
+// }
+//
+// Additional parameters can be associated with an event:
+// void doSomethingCostly2(int howMuch) {
+// TRACE_EVENT1("MY_SUBSYSTEM", "doSomethingCostly",
+// "howMuch", howMuch);
+// ...
+// }
+//
+// The trace system will automatically add to this information the
+// current process id, thread id, and a timestamp in microseconds.
+//
+// To trace an asynchronous procedure such as an IPC send/receive, use
+// ASYNC_BEGIN and ASYNC_END:
+// [single threaded sender code]
+// static int send_count = 0;
+// ++send_count;
+// TRACE_EVENT_ASYNC_BEGIN0("ipc", "message", send_count);
+// Send(new MyMessage(send_count));
+// [receive code]
+// void OnMyMessage(send_count) {
+// TRACE_EVENT_ASYNC_END0("ipc", "message", send_count);
+// }
+// The third parameter is a unique ID to match ASYNC_BEGIN/ASYNC_END pairs.
+// ASYNC_BEGIN and ASYNC_END can occur on any thread of any traced process.
+// Pointers can be used for the ID parameter, and they will be mangled
+// internally so that the same pointer on two different processes will not
+// match. For example:
+// class MyTracedClass {
+// public:
+// MyTracedClass() {
+// TRACE_EVENT_ASYNC_BEGIN0("category", "MyTracedClass", this);
+// }
+// ~MyTracedClass() {
+// TRACE_EVENT_ASYNC_END0("category", "MyTracedClass", this);
+// }
+// }
+//
+// Trace event also supports counters, which is a way to track a quantity
+// as it varies over time. Counters are created with the following macro:
+// TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter", g_myCounterValue);
+//
+// Counters are process-specific. The macro itself can be issued from any
+// thread, however.
+//
+// Sometimes, you want to track two counters at once. You can do this with two
+// counter macros:
+// TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter0", g_myCounterValue[0]);
+// TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter1", g_myCounterValue[1]);
+// Or you can do it with a combined macro:
+// TRACE_COUNTER2("MY_SUBSYSTEM", "myCounter",
+// "bytesPinned", g_myCounterValue[0],
+// "bytesAllocated", g_myCounterValue[1]);
+// This indicates to the tracing UI that these counters should be displayed
+// in a single graph, as a summed area chart.
+//
+// Since counters are in a global namespace, you may want to disambiguate with a
+// unique ID, by using the TRACE_COUNTER_ID* variations.
+//
+// By default, trace collection is compiled in, but turned off at runtime.
+// Collecting trace data is the responsibility of the embedding
+// application. In Chrome's case, navigating to about:tracing will turn on
+// tracing and display data collected across all active processes.
+//
+//
+// Memory scoping note:
+// Tracing copies the pointers, not the string content, of the strings passed
+// in for category_group, name, and arg_names. Thus, the following code will
+// cause problems:
+// char* str = strdup("importantName");
+// TRACE_EVENT_INSTANT0("SUBSYSTEM", str); // BAD!
+// free(str); // Trace system now has dangling pointer
+//
+// To avoid this issue with the |name| and |arg_name| parameters, use the
+// TRACE_EVENT_COPY_XXX overloads of the macros at additional runtime overhead.
+// Notes: The category must always be in a long-lived char* (i.e. static const).
+// The |arg_values|, when used, are always deep copied with the _COPY
+// macros.
+//
+// When are string argument values copied:
+// const char* arg_values are only referenced by default:
+// TRACE_EVENT1("category", "name",
+// "arg1", "literal string is only referenced");
+// Use TRACE_STR_COPY to force copying of a const char*:
+// TRACE_EVENT1("category", "name",
+// "arg1", TRACE_STR_COPY("string will be copied"));
+// std::string arg_values are always copied:
+// TRACE_EVENT1("category", "name",
+// "arg1", std::string("string will be copied"));
+//
+//
+// Convertable notes:
+// Converting a large data type to a string can be costly. To help with this,
+// the trace framework provides an interface ConvertableToTraceFormat. If you
+// inherit from it and implement the AppendAsTraceFormat method the trace
+// framework will call back to your object to convert a trace output time. This
+// means, if the category for the event is disabled, the conversion will not
+// happen.
+//
+// class MyData : public base::debug::ConvertableToTraceFormat {
+// public:
+// MyData() {}
+// virtual void AppendAsTraceFormat(std::string* out) const override {
+// out->append("{\"foo\":1}");
+// }
+// private:
+// virtual ~MyData() {}
+// DISALLOW_COPY_AND_ASSIGN(MyData);
+// };
+//
+// TRACE_EVENT1("foo", "bar", "data",
+// scoped_refptr<ConvertableToTraceFormat>(new MyData()));
+//
+// The trace framework will take ownership if the passed pointer and it will
+// be free'd when the trace buffer is flushed.
+//
+// Note, we only do the conversion when the buffer is flushed, so the provided
+// data object should not be modified after it's passed to the trace framework.
+//
+//
+// Thread Safety:
+// A thread safe singleton and mutex are used for thread safety. Category
+// enabled flags are used to limit the performance impact when the system
+// is not enabled.
+//
+// TRACE_EVENT macros first cache a pointer to a category. The categories are
+// statically allocated and safe at all times, even after exit. Fetching a
+// category is protected by the TraceLog::lock_. Multiple threads initializing
+// the static variable is safe, as they will be serialized by the lock and
+// multiple calls will return the same pointer to the category.
+//
+// Then the category_group_enabled flag is checked. This is a unsigned char, and
+// not intended to be multithread safe. It optimizes access to AddTraceEvent
+// which is threadsafe internally via TraceLog::lock_. The enabled flag may
+// cause some threads to incorrectly call or skip calling AddTraceEvent near
+// the time of the system being enabled or disabled. This is acceptable as
+// we tolerate some data loss while the system is being enabled/disabled and
+// because AddTraceEvent is threadsafe internally and checks the enabled state
+// again under lock.
+//
+// Without the use of these static category pointers and enabled flags all
+// trace points would carry a significant performance cost of acquiring a lock
+// and resolving the category.
+
+#ifndef BASE_TRACE_EVENT_TRACE_EVENT_H_
+#define BASE_TRACE_EVENT_TRACE_EVENT_H_
+
+#include <string>
+
+#include "base/atomicops.h"
+#include "base/time/time.h"
+#include "base/trace_event/trace_event_impl.h"
+#include "base/trace_event/trace_event_memory.h"
+#include "base/trace_event/trace_event_system_stats_monitor.h"
+#include "build/build_config.h"
+
+// By default, const char* argument values are assumed to have long-lived scope
+// and will not be copied. Use this macro to force a const char* to be copied.
+#define TRACE_STR_COPY(str) \
+ trace_event_internal::TraceStringWithCopy(str)
+
+// This will mark the trace event as disabled by default. The user will need
+// to explicitly enable the event.
+#define TRACE_DISABLED_BY_DEFAULT(name) "disabled-by-default-" name
+
+// By default, uint64 ID argument values are not mangled with the Process ID in
+// TRACE_EVENT_ASYNC macros. Use this macro to force Process ID mangling.
+#define TRACE_ID_MANGLE(id) \
+ trace_event_internal::TraceID::ForceMangle(id)
+
+// By default, pointers are mangled with the Process ID in TRACE_EVENT_ASYNC
+// macros. Use this macro to prevent Process ID mangling.
+#define TRACE_ID_DONT_MANGLE(id) \
+ trace_event_internal::TraceID::DontMangle(id)
+
+// Records a pair of begin and end events called "name" for the current
+// scope, with 0, 1 or 2 associated arguments. If the category is not
+// enabled, then this does nothing.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+#define TRACE_EVENT0(category_group, name) \
+ INTERNAL_TRACE_MEMORY(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name)
+#define TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
+ INTERNAL_TRACE_MEMORY(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, arg1_name, arg1_val)
+#define TRACE_EVENT2( \
+ category_group, name, arg1_name, arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_MEMORY(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED( \
+ category_group, name, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Records events like TRACE_EVENT2 but uses |memory_tag| for memory tracing.
+// Use this where |name| is too generic to accurately aggregate allocations.
+#define TRACE_EVENT_WITH_MEMORY_TAG2( \
+ category, name, memory_tag, arg1_name, arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_MEMORY(category, memory_tag) \
+ INTERNAL_TRACE_EVENT_ADD_SCOPED( \
+ category, name, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// UNSHIPPED_TRACE_EVENT* are like TRACE_EVENT* except that they are not
+// included in official builds.
+
+#if OFFICIAL_BUILD
+#undef TRACING_IS_OFFICIAL_BUILD
+#define TRACING_IS_OFFICIAL_BUILD 1
+#elif !defined(TRACING_IS_OFFICIAL_BUILD)
+#define TRACING_IS_OFFICIAL_BUILD 0
+#endif
+
+#if TRACING_IS_OFFICIAL_BUILD
+#define UNSHIPPED_TRACE_EVENT0(category_group, name) (void)0
+#define UNSHIPPED_TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
+ (void)0
+#define UNSHIPPED_TRACE_EVENT2(category_group, name, arg1_name, arg1_val, \
+ arg2_name, arg2_val) (void)0
+#define UNSHIPPED_TRACE_EVENT_INSTANT0(category_group, name, scope) (void)0
+#define UNSHIPPED_TRACE_EVENT_INSTANT1(category_group, name, scope, \
+ arg1_name, arg1_val) (void)0
+#define UNSHIPPED_TRACE_EVENT_INSTANT2(category_group, name, scope, \
+ arg1_name, arg1_val, \
+ arg2_name, arg2_val) (void)0
+#else
+#define UNSHIPPED_TRACE_EVENT0(category_group, name) \
+ TRACE_EVENT0(category_group, name)
+#define UNSHIPPED_TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
+ TRACE_EVENT1(category_group, name, arg1_name, arg1_val)
+#define UNSHIPPED_TRACE_EVENT2(category_group, name, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ TRACE_EVENT2(category_group, name, arg1_name, arg1_val, arg2_name, arg2_val)
+#define UNSHIPPED_TRACE_EVENT_INSTANT0(category_group, name, scope) \
+ TRACE_EVENT_INSTANT0(category_group, name, scope)
+#define UNSHIPPED_TRACE_EVENT_INSTANT1(category_group, name, scope, \
+ arg1_name, arg1_val) \
+ TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, arg1_val)
+#define UNSHIPPED_TRACE_EVENT_INSTANT2(category_group, name, scope, \
+ arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+#endif
+
+// Records a single event called "name" immediately, with 0, 1 or 2
+// associated arguments. If the category is not enabled, then this
+// does nothing.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+#define TRACE_EVENT_INSTANT0(category_group, name, scope) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, \
+ category_group, name, TRACE_EVENT_FLAG_NONE | scope)
+#define TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, \
+ category_group, name, TRACE_EVENT_FLAG_NONE | scope, \
+ arg1_name, arg1_val)
+#define TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, \
+ category_group, name, TRACE_EVENT_FLAG_NONE | scope, \
+ arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_INSTANT0(category_group, name, scope) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, \
+ category_group, name, TRACE_EVENT_FLAG_COPY | scope)
+#define TRACE_EVENT_COPY_INSTANT1(category_group, name, scope, \
+ arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, \
+ category_group, name, TRACE_EVENT_FLAG_COPY | scope, arg1_name, \
+ arg1_val)
+#define TRACE_EVENT_COPY_INSTANT2(category_group, name, scope, \
+ arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, \
+ category_group, name, TRACE_EVENT_FLAG_COPY | scope, \
+ arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Sets the current sample state to the given category and name (both must be
+// constant strings). These states are intended for a sampling profiler.
+// Implementation note: we store category and name together because we don't
+// want the inconsistency/expense of storing two pointers.
+// |thread_bucket| is [0..2] and is used to statically isolate samples in one
+// thread from others.
+#define TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET( \
+ bucket_number, category, name) \
+ trace_event_internal:: \
+ TraceEventSamplingStateScope<bucket_number>::Set(category "\0" name)
+
+// Returns a current sampling state of the given bucket.
+#define TRACE_EVENT_GET_SAMPLING_STATE_FOR_BUCKET(bucket_number) \
+ trace_event_internal::TraceEventSamplingStateScope<bucket_number>::Current()
+
+// Creates a scope of a sampling state of the given bucket.
+//
+// { // The sampling state is set within this scope.
+// TRACE_EVENT_SAMPLING_STATE_SCOPE_FOR_BUCKET(0, "category", "name");
+// ...;
+// }
+#define TRACE_EVENT_SCOPED_SAMPLING_STATE_FOR_BUCKET( \
+ bucket_number, category, name) \
+ trace_event_internal::TraceEventSamplingStateScope<bucket_number> \
+ traceEventSamplingScope(category "\0" name);
+
+// Syntactic sugars for the sampling tracing in the main thread.
+#define TRACE_EVENT_SCOPED_SAMPLING_STATE(category, name) \
+ TRACE_EVENT_SCOPED_SAMPLING_STATE_FOR_BUCKET(0, category, name)
+#define TRACE_EVENT_GET_SAMPLING_STATE() \
+ TRACE_EVENT_GET_SAMPLING_STATE_FOR_BUCKET(0)
+#define TRACE_EVENT_SET_SAMPLING_STATE(category, name) \
+ TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(0, category, name)
+
+
+// Records a single BEGIN event called "name" immediately, with 0, 1 or 2
+// associated arguments. If the category is not enabled, then this
+// does nothing.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+#define TRACE_EVENT_BEGIN0(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, \
+ category_group, name, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_BEGIN1(category_group, name, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, \
+ category_group, name, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_BEGIN2(category_group, name, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, \
+ category_group, name, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_BEGIN0(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, \
+ category_group, name, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_BEGIN1(category_group, name, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, \
+ category_group, name, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_BEGIN2(category_group, name, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, \
+ category_group, name, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+
+// Similar to TRACE_EVENT_BEGINx but with a custom |at| timestamp provided.
+// - |id| is used to match the _BEGIN event with the _END event.
+// Events are considered to match if their category_group, name and id values
+// all match. |id| must either be a pointer or an integer value up to 64 bits.
+// If it's a pointer, the bits will be xored with a hash of the process ID so
+// that the same pointer on two different processes will not collide.
+#define TRACE_EVENT_BEGIN_WITH_ID_TID_AND_TIMESTAMP0(category_group, \
+ name, id, thread_id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id, \
+ timestamp, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP0( \
+ category_group, name, id, thread_id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id, \
+ timestamp, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP1( \
+ category_group, name, id, thread_id, timestamp, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id, \
+ timestamp, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+
+// Records a single END event for "name" immediately. If the category
+// is not enabled, then this does nothing.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+#define TRACE_EVENT_END0(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, \
+ category_group, name, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_END1(category_group, name, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, \
+ category_group, name, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_END2(category_group, name, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, \
+ category_group, name, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_END0(category_group, name) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, \
+ category_group, name, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_END1(category_group, name, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, \
+ category_group, name, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_END2(category_group, name, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, \
+ category_group, name, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+
+// Similar to TRACE_EVENT_ENDx but with a custom |at| timestamp provided.
+// - |id| is used to match the _BEGIN event with the _END event.
+// Events are considered to match if their category_group, name and id values
+// all match. |id| must either be a pointer or an integer value up to 64 bits.
+// If it's a pointer, the bits will be xored with a hash of the process ID so
+// that the same pointer on two different processes will not collide.
+#define TRACE_EVENT_END_WITH_ID_TID_AND_TIMESTAMP0(category_group, \
+ name, id, thread_id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id, \
+ timestamp, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP0( \
+ category_group, name, id, thread_id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id, \
+ timestamp, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP1( \
+ category_group, name, id, thread_id, timestamp, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id, \
+ timestamp, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+
+// Records the value of a counter called "name" immediately. Value
+// must be representable as a 32 bit integer.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+#define TRACE_COUNTER1(category_group, name, value) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, \
+ category_group, name, TRACE_EVENT_FLAG_NONE, \
+ "value", static_cast<int>(value))
+#define TRACE_COPY_COUNTER1(category_group, name, value) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, \
+ category_group, name, TRACE_EVENT_FLAG_COPY, \
+ "value", static_cast<int>(value))
+
+// Records the values of a multi-parted counter called "name" immediately.
+// The UI will treat value1 and value2 as parts of a whole, displaying their
+// values as a stacked-bar chart.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+#define TRACE_COUNTER2(category_group, name, value1_name, value1_val, \
+ value2_name, value2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, \
+ category_group, name, TRACE_EVENT_FLAG_NONE, \
+ value1_name, static_cast<int>(value1_val), \
+ value2_name, static_cast<int>(value2_val))
+#define TRACE_COPY_COUNTER2(category_group, name, value1_name, value1_val, \
+ value2_name, value2_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, \
+ category_group, name, TRACE_EVENT_FLAG_COPY, \
+ value1_name, static_cast<int>(value1_val), \
+ value2_name, static_cast<int>(value2_val))
+
+// Records the value of a counter called "name" immediately. Value
+// must be representable as a 32 bit integer.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+// - |id| is used to disambiguate counters with the same name. It must either
+// be a pointer or an integer value up to 64 bits. If it's a pointer, the bits
+// will be xored with a hash of the process ID so that the same pointer on
+// two different processes will not collide.
+#define TRACE_COUNTER_ID1(category_group, name, id, value) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, \
+ category_group, name, id, TRACE_EVENT_FLAG_NONE, \
+ "value", static_cast<int>(value))
+#define TRACE_COPY_COUNTER_ID1(category_group, name, id, value) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, \
+ category_group, name, id, TRACE_EVENT_FLAG_COPY, \
+ "value", static_cast<int>(value))
+
+// Records the values of a multi-parted counter called "name" immediately.
+// The UI will treat value1 and value2 as parts of a whole, displaying their
+// values as a stacked-bar chart.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+// - |id| is used to disambiguate counters with the same name. It must either
+// be a pointer or an integer value up to 64 bits. If it's a pointer, the bits
+// will be xored with a hash of the process ID so that the same pointer on
+// two different processes will not collide.
+#define TRACE_COUNTER_ID2(category_group, name, id, value1_name, value1_val, \
+ value2_name, value2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, \
+ category_group, name, id, TRACE_EVENT_FLAG_NONE, \
+ value1_name, static_cast<int>(value1_val), \
+ value2_name, static_cast<int>(value2_val))
+#define TRACE_COPY_COUNTER_ID2(category_group, name, id, value1_name, \
+ value1_val, value2_name, value2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, \
+ category_group, name, id, TRACE_EVENT_FLAG_COPY, \
+ value1_name, static_cast<int>(value1_val), \
+ value2_name, static_cast<int>(value2_val))
+
+// ASYNC_STEP_* APIs should be only used by legacy code. New code should
+// consider using NESTABLE_ASYNC_* APIs to describe substeps within an async
+// event.
+// Records a single ASYNC_BEGIN event called "name" immediately, with 0, 1 or 2
+// associated arguments. If the category is not enabled, then this
+// does nothing.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+// - |id| is used to match the ASYNC_BEGIN event with the ASYNC_END event. ASYNC
+// events are considered to match if their category_group, name and id values
+// all match. |id| must either be a pointer or an integer value up to 64 bits.
+// If it's a pointer, the bits will be xored with a hash of the process ID so
+// that the same pointer on two different processes will not collide.
+//
+// An asynchronous operation can consist of multiple phases. The first phase is
+// defined by the ASYNC_BEGIN calls. Additional phases can be defined using the
+// ASYNC_STEP_INTO or ASYNC_STEP_PAST macros. The ASYNC_STEP_INTO macro will
+// annotate the block following the call. The ASYNC_STEP_PAST macro will
+// annotate the block prior to the call. Note that any particular event must use
+// only STEP_INTO or STEP_PAST macros; they can not mix and match. When the
+// operation completes, call ASYNC_END.
+//
+// An ASYNC trace typically occurs on a single thread (if not, they will only be
+// drawn on the thread defined in the ASYNC_BEGIN event), but all events in that
+// operation must use the same |name| and |id|. Each step can have its own
+// args.
+#define TRACE_EVENT_ASYNC_BEGIN0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
+ category_group, name, id, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_ASYNC_BEGIN1(category_group, name, id, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
+ category_group, name, id, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_ASYNC_BEGIN2(category_group, name, id, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
+ category_group, name, id, TRACE_EVENT_FLAG_NONE, \
+ arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_ASYNC_BEGIN0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
+ category_group, name, id, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_ASYNC_BEGIN1(category_group, name, id, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
+ category_group, name, id, TRACE_EVENT_FLAG_COPY, \
+ arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_ASYNC_BEGIN2(category_group, name, id, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
+ category_group, name, id, TRACE_EVENT_FLAG_COPY, \
+ arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Similar to TRACE_EVENT_ASYNC_BEGINx but with a custom |at| timestamp
+// provided.
+#define TRACE_EVENT_ASYNC_BEGIN_WITH_TIMESTAMP0(category_group, \
+ name, id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \
+ static_cast<int>(base::PlatformThread::CurrentId()), \
+ timestamp, TRACE_EVENT_FLAG_NONE)
+
+// Records a single ASYNC_STEP_INTO event for |step| immediately. If the
+// category is not enabled, then this does nothing. The |name| and |id| must
+// match the ASYNC_BEGIN event above. The |step| param identifies this step
+// within the async event. This should be called at the beginning of the next
+// phase of an asynchronous operation. The ASYNC_BEGIN event must not have any
+// ASYNC_STEP_PAST events.
+#define TRACE_EVENT_ASYNC_STEP_INTO0(category_group, name, id, step) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_STEP_INTO, \
+ category_group, name, id, TRACE_EVENT_FLAG_NONE, "step", step)
+#define TRACE_EVENT_ASYNC_STEP_INTO1(category_group, name, id, step, \
+ arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_STEP_INTO, \
+ category_group, name, id, TRACE_EVENT_FLAG_NONE, "step", step, \
+ arg1_name, arg1_val)
+
+// Records a single ASYNC_STEP_PAST event for |step| immediately. If the
+// category is not enabled, then this does nothing. The |name| and |id| must
+// match the ASYNC_BEGIN event above. The |step| param identifies this step
+// within the async event. This should be called at the beginning of the next
+// phase of an asynchronous operation. The ASYNC_BEGIN event must not have any
+// ASYNC_STEP_INTO events.
+#define TRACE_EVENT_ASYNC_STEP_PAST0(category_group, name, id, step) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_STEP_PAST, \
+ category_group, name, id, TRACE_EVENT_FLAG_NONE, "step", step)
+#define TRACE_EVENT_ASYNC_STEP_PAST1(category_group, name, id, step, \
+ arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_STEP_PAST, \
+ category_group, name, id, TRACE_EVENT_FLAG_NONE, "step", step, \
+ arg1_name, arg1_val)
+
+// Records a single ASYNC_END event for "name" immediately. If the category
+// is not enabled, then this does nothing.
+#define TRACE_EVENT_ASYNC_END0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
+ category_group, name, id, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_ASYNC_END1(category_group, name, id, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
+ category_group, name, id, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_ASYNC_END2(category_group, name, id, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
+ category_group, name, id, TRACE_EVENT_FLAG_NONE, \
+ arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_ASYNC_END0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
+ category_group, name, id, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_ASYNC_END1(category_group, name, id, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
+ category_group, name, id, TRACE_EVENT_FLAG_COPY, \
+ arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_ASYNC_END2(category_group, name, id, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
+ category_group, name, id, TRACE_EVENT_FLAG_COPY, \
+ arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Similar to TRACE_EVENT_ASYNC_ENDx but with a custom |at| timestamp provided.
+#define TRACE_EVENT_ASYNC_END_WITH_TIMESTAMP0(category_group, \
+ name, id, timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \
+ static_cast<int>(base::PlatformThread::CurrentId()), \
+ timestamp, TRACE_EVENT_FLAG_NONE)
+
+// NESTABLE_ASYNC_* APIs are used to describe an async operation, which can
+// be nested within a NESTABLE_ASYNC event and/or have inner NESTABLE_ASYNC
+// events.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+// - |id| is used to match the NESTABLE_ASYNC_BEGIN event with the
+// NESTABLE_ASYNC_END event. Events are considered to match if their
+// category_group, name and id values all match. |id| must either be a
+// pointer or an integer value up to 64 bits. If it's a pointer, the bits
+// will be xored with a hash of the process ID so that the same pointer on two
+// different processes will not collide.
+//
+// Unmatched NESTABLE_ASYNC_END event will be parsed as an instant event,
+// and unmatched NESTABLE_ASYNC_BEGIN event will be parsed as an event that
+// ends at the last NESTABLE_ASYNC_END event of that |id|.
+
+// Records a single NESTABLE_ASYNC_BEGIN event called "name" immediately, with 2
+// associated arguments. If the category is not enabled, then this does nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN2(category_group, name, id, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, \
+ category_group, name, id, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+// Records a single NESTABLE_ASYNC_END event called "name" immediately, with 2
+// associated arguments. If the category is not enabled, then this does nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_END2(category_group, name, id, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, \
+ category_group, name, id, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+// Records a single NESTABLE_ASYNC_INSTANT event called "name" immediately,
+// with 2 associated arguments. If the category is not enabled, then this
+// does nothing.
+#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT2(category_group, name, id, \
+ arg1_name, arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, \
+ category_group, name, id, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, \
+ arg2_name, arg2_val)
+
+// Records a single FLOW_BEGIN event called "name" immediately, with 0, 1 or 2
+// associated arguments. If the category is not enabled, then this
+// does nothing.
+// - category and name strings must have application lifetime (statics or
+// literals). They may not include " chars.
+// - |id| is used to match the FLOW_BEGIN event with the FLOW_END event. FLOW
+// events are considered to match if their category_group, name and id values
+// all match. |id| must either be a pointer or an integer value up to 64 bits.
+// If it's a pointer, the bits will be xored with a hash of the process ID so
+// that the same pointer on two different processes will not collide.
+// FLOW events are different from ASYNC events in how they are drawn by the
+// tracing UI. A FLOW defines asynchronous data flow, such as posting a task
+// (FLOW_BEGIN) and later executing that task (FLOW_END). Expect FLOWs to be
+// drawn as lines or arrows from FLOW_BEGIN scopes to FLOW_END scopes. Similar
+// to ASYNC, a FLOW can consist of multiple phases. The first phase is defined
+// by the FLOW_BEGIN calls. Additional phases can be defined using the FLOW_STEP
+// macros. When the operation completes, call FLOW_END. An async operation can
+// span threads and processes, but all events in that operation must use the
+// same |name| and |id|. Each event can have its own args.
+#define TRACE_EVENT_FLOW_BEGIN0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
+ category_group, name, id, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_FLOW_BEGIN1(category_group, name, id, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
+ category_group, name, id, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_FLOW_BEGIN2(category_group, name, id, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
+ category_group, name, id, TRACE_EVENT_FLAG_NONE, \
+ arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_FLOW_BEGIN0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
+ category_group, name, id, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_FLOW_BEGIN1(category_group, name, id, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
+ category_group, name, id, TRACE_EVENT_FLAG_COPY, \
+ arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_FLOW_BEGIN2(category_group, name, id, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
+ category_group, name, id, TRACE_EVENT_FLAG_COPY, \
+ arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Records a single FLOW_STEP event for |step| immediately. If the category
+// is not enabled, then this does nothing. The |name| and |id| must match the
+// FLOW_BEGIN event above. The |step| param identifies this step within the
+// async event. This should be called at the beginning of the next phase of an
+// asynchronous operation.
+#define TRACE_EVENT_FLOW_STEP0(category_group, name, id, step) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_STEP, \
+ category_group, name, id, TRACE_EVENT_FLAG_NONE, "step", step)
+#define TRACE_EVENT_FLOW_STEP1(category_group, name, id, step, \
+ arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_STEP, \
+ category_group, name, id, TRACE_EVENT_FLAG_NONE, "step", step, \
+ arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_FLOW_STEP0(category_group, name, id, step) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_STEP, \
+ category_group, name, id, TRACE_EVENT_FLAG_COPY, "step", step)
+#define TRACE_EVENT_COPY_FLOW_STEP1(category_group, name, id, step, \
+ arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_STEP, \
+ category_group, name, id, TRACE_EVENT_FLAG_COPY, "step", step, \
+ arg1_name, arg1_val)
+
+// Records a single FLOW_END event for "name" immediately. If the category
+// is not enabled, then this does nothing.
+#define TRACE_EVENT_FLOW_END0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, \
+ category_group, name, id, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_FLOW_END1(category_group, name, id, arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, \
+ category_group, name, id, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_FLOW_END2(category_group, name, id, arg1_name, arg1_val, \
+ arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, \
+ category_group, name, id, TRACE_EVENT_FLAG_NONE, \
+ arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_FLOW_END0(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, \
+ category_group, name, id, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_FLOW_END1(category_group, name, id, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, \
+ category_group, name, id, TRACE_EVENT_FLAG_COPY, \
+ arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_FLOW_END2(category_group, name, id, arg1_name, \
+ arg1_val, arg2_name, arg2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, \
+ category_group, name, id, TRACE_EVENT_FLAG_COPY, \
+ arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Macros to track the life time and value of arbitrary client objects.
+// See also TraceTrackableObject.
+#define TRACE_EVENT_OBJECT_CREATED_WITH_ID(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_CREATE_OBJECT, \
+ category_group, name, TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE)
+
+#define TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(category_group, name, id, snapshot) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_SNAPSHOT_OBJECT, \
+ category_group, name, TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE,\
+ "snapshot", snapshot)
+
+#define TRACE_EVENT_OBJECT_DELETED_WITH_ID(category_group, name, id) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_DELETE_OBJECT, \
+ category_group, name, TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE)
+
+#define INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE() \
+ UNLIKELY(*INTERNAL_TRACE_EVENT_UID(category_group_enabled) & \
+ (base::debug::TraceLog::ENABLED_FOR_RECORDING | \
+ base::debug::TraceLog::ENABLED_FOR_EVENT_CALLBACK))
+
+// Macro to efficiently determine if a given category group is enabled.
+#define TRACE_EVENT_CATEGORY_GROUP_ENABLED(category_group, ret) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ *ret = true; \
+ } else { \
+ *ret = false; \
+ } \
+ } while (0)
+
+// Macro to efficiently determine, through polling, if a new trace has begun.
+#define TRACE_EVENT_IS_NEW_TRACE(ret) \
+ do { \
+ static int INTERNAL_TRACE_EVENT_UID(lastRecordingNumber) = 0; \
+ int num_traces_recorded = TRACE_EVENT_API_GET_NUM_TRACES_RECORDED(); \
+ if (num_traces_recorded != -1 && \
+ num_traces_recorded != \
+ INTERNAL_TRACE_EVENT_UID(lastRecordingNumber)) { \
+ INTERNAL_TRACE_EVENT_UID(lastRecordingNumber) = \
+ num_traces_recorded; \
+ *ret = true; \
+ } else { \
+ *ret = false; \
+ } \
+ } while (0)
+
+////////////////////////////////////////////////////////////////////////////////
+// Implementation specific tracing API definitions.
+
+// Get a pointer to the enabled state of the given trace category. Only
+// long-lived literal strings should be given as the category group. The
+// returned pointer can be held permanently in a local static for example. If
+// the unsigned char is non-zero, tracing is enabled. If tracing is enabled,
+// TRACE_EVENT_API_ADD_TRACE_EVENT can be called. It's OK if tracing is disabled
+// between the load of the tracing state and the call to
+// TRACE_EVENT_API_ADD_TRACE_EVENT, because this flag only provides an early out
+// for best performance when tracing is disabled.
+// const unsigned char*
+// TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(const char* category_group)
+#define TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED \
+ base::debug::TraceLog::GetCategoryGroupEnabled
+
+// Get the number of times traces have been recorded. This is used to implement
+// the TRACE_EVENT_IS_NEW_TRACE facility.
+// unsigned int TRACE_EVENT_API_GET_NUM_TRACES_RECORDED()
+#define TRACE_EVENT_API_GET_NUM_TRACES_RECORDED \
+ base::debug::TraceLog::GetInstance()->GetNumTracesRecorded
+
+// Add a trace event to the platform tracing system.
+// base::debug::TraceEventHandle TRACE_EVENT_API_ADD_TRACE_EVENT(
+// char phase,
+// const unsigned char* category_group_enabled,
+// const char* name,
+// unsigned long long id,
+// int num_args,
+// const char** arg_names,
+// const unsigned char* arg_types,
+// const unsigned long long* arg_values,
+// unsigned char flags)
+#define TRACE_EVENT_API_ADD_TRACE_EVENT \
+ base::debug::TraceLog::GetInstance()->AddTraceEvent
+
+// Add a trace event to the platform tracing system.
+// base::debug::TraceEventHandle TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_TIMESTAMP(
+// char phase,
+// const unsigned char* category_group_enabled,
+// const char* name,
+// unsigned long long id,
+// int thread_id,
+// const TimeTicks& timestamp,
+// int num_args,
+// const char** arg_names,
+// const unsigned char* arg_types,
+// const unsigned long long* arg_values,
+// unsigned char flags)
+#define TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP \
+ base::debug::TraceLog::GetInstance()->AddTraceEventWithThreadIdAndTimestamp
+
+// Set the duration field of a COMPLETE trace event.
+// void TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(
+// const unsigned char* category_group_enabled,
+// const char* name,
+// base::debug::TraceEventHandle id)
+#define TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION \
+ base::debug::TraceLog::GetInstance()->UpdateTraceEventDuration
+
+// Defines atomic operations used internally by the tracing system.
+#define TRACE_EVENT_API_ATOMIC_WORD base::subtle::AtomicWord
+#define TRACE_EVENT_API_ATOMIC_LOAD(var) base::subtle::NoBarrier_Load(&(var))
+#define TRACE_EVENT_API_ATOMIC_STORE(var, value) \
+ base::subtle::NoBarrier_Store(&(var), (value))
+
+// Defines visibility for classes in trace_event.h
+#define TRACE_EVENT_API_CLASS_EXPORT BASE_EXPORT
+
+// The thread buckets for the sampling profiler.
+TRACE_EVENT_API_CLASS_EXPORT extern \
+ TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
+
+#define TRACE_EVENT_API_THREAD_BUCKET(thread_bucket) \
+ g_trace_state[thread_bucket]
+
+////////////////////////////////////////////////////////////////////////////////
+
+// Implementation detail: trace event macros create temporary variables
+// to keep instrumentation overhead low. These macros give each temporary
+// variable a unique name based on the line number to prevent name collisions.
+#define INTERNAL_TRACE_EVENT_UID3(a,b) \
+ trace_event_unique_##a##b
+#define INTERNAL_TRACE_EVENT_UID2(a,b) \
+ INTERNAL_TRACE_EVENT_UID3(a,b)
+#define INTERNAL_TRACE_EVENT_UID(name_prefix) \
+ INTERNAL_TRACE_EVENT_UID2(name_prefix, __LINE__)
+
+// Implementation detail: internal macro to create static category.
+// No barriers are needed, because this code is designed to operate safely
+// even when the unsigned char* points to garbage data (which may be the case
+// on processors without cache coherency).
+#define INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES( \
+ category_group, atomic, category_group_enabled) \
+ category_group_enabled = \
+ reinterpret_cast<const unsigned char*>(TRACE_EVENT_API_ATOMIC_LOAD( \
+ atomic)); \
+ if (UNLIKELY(!category_group_enabled)) { \
+ category_group_enabled = \
+ TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(category_group); \
+ TRACE_EVENT_API_ATOMIC_STORE(atomic, \
+ reinterpret_cast<TRACE_EVENT_API_ATOMIC_WORD>( \
+ category_group_enabled)); \
+ }
+
+#define INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group) \
+ static TRACE_EVENT_API_ATOMIC_WORD INTERNAL_TRACE_EVENT_UID(atomic) = 0; \
+ const unsigned char* INTERNAL_TRACE_EVENT_UID(category_group_enabled); \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES(category_group, \
+ INTERNAL_TRACE_EVENT_UID(atomic), \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled));
+
+// Implementation detail: internal macro to create static category and add
+// event if the category is enabled.
+#define INTERNAL_TRACE_EVENT_ADD(phase, category_group, name, flags, ...) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ trace_event_internal::AddTraceEvent( \
+ phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ trace_event_internal::kNoEventId, flags, ##__VA_ARGS__); \
+ } \
+ } while (0)
+
+// Implementation detail: internal macro to create static category and add begin
+// event if the category is enabled. Also adds the end event when the scope
+// ends.
+#define INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, ...) \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ trace_event_internal::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ base::debug::TraceEventHandle h = trace_event_internal::AddTraceEvent( \
+ TRACE_EVENT_PHASE_COMPLETE, \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled), \
+ name, trace_event_internal::kNoEventId, \
+ TRACE_EVENT_FLAG_NONE, ##__VA_ARGS__); \
+ INTERNAL_TRACE_EVENT_UID(tracer).Initialize( \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, h); \
+ }
+
+// Implementation detail: internal macro to create static category and add
+// event if the category is enabled.
+#define INTERNAL_TRACE_EVENT_ADD_WITH_ID(phase, category_group, name, id, \
+ flags, ...) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ unsigned char trace_event_flags = flags | TRACE_EVENT_FLAG_HAS_ID; \
+ trace_event_internal::TraceID trace_event_trace_id( \
+ id, &trace_event_flags); \
+ trace_event_internal::AddTraceEvent( \
+ phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), \
+ name, trace_event_trace_id.data(), trace_event_flags, \
+ ##__VA_ARGS__); \
+ } \
+ } while (0)
+
+// Implementation detail: internal macro to create static category and add
+// event if the category is enabled.
+#define INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(phase, \
+ category_group, name, id, thread_id, timestamp, flags, ...) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ unsigned char trace_event_flags = flags | TRACE_EVENT_FLAG_HAS_ID; \
+ trace_event_internal::TraceID trace_event_trace_id( \
+ id, &trace_event_flags); \
+ trace_event_internal::AddTraceEventWithThreadIdAndTimestamp( \
+ phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), \
+ name, trace_event_trace_id.data(), \
+ thread_id, base::TimeTicks::FromInternalValue(timestamp), \
+ trace_event_flags, ##__VA_ARGS__); \
+ } \
+ } while (0)
+
+// Notes regarding the following definitions:
+// New values can be added and propagated to third party libraries, but existing
+// definitions must never be changed, because third party libraries may use old
+// definitions.
+
+// Phase indicates the nature of an event entry. E.g. part of a begin/end pair.
+#define TRACE_EVENT_PHASE_BEGIN ('B')
+#define TRACE_EVENT_PHASE_END ('E')
+#define TRACE_EVENT_PHASE_COMPLETE ('X')
+#define TRACE_EVENT_PHASE_INSTANT ('I')
+#define TRACE_EVENT_PHASE_ASYNC_BEGIN ('S')
+#define TRACE_EVENT_PHASE_ASYNC_STEP_INTO ('T')
+#define TRACE_EVENT_PHASE_ASYNC_STEP_PAST ('p')
+#define TRACE_EVENT_PHASE_ASYNC_END ('F')
+#define TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN ('b')
+#define TRACE_EVENT_PHASE_NESTABLE_ASYNC_END ('e')
+#define TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT ('n')
+#define TRACE_EVENT_PHASE_FLOW_BEGIN ('s')
+#define TRACE_EVENT_PHASE_FLOW_STEP ('t')
+#define TRACE_EVENT_PHASE_FLOW_END ('f')
+#define TRACE_EVENT_PHASE_METADATA ('M')
+#define TRACE_EVENT_PHASE_COUNTER ('C')
+#define TRACE_EVENT_PHASE_SAMPLE ('P')
+#define TRACE_EVENT_PHASE_CREATE_OBJECT ('N')
+#define TRACE_EVENT_PHASE_SNAPSHOT_OBJECT ('O')
+#define TRACE_EVENT_PHASE_DELETE_OBJECT ('D')
+
+// Flags for changing the behavior of TRACE_EVENT_API_ADD_TRACE_EVENT.
+#define TRACE_EVENT_FLAG_NONE (static_cast<unsigned char>(0))
+#define TRACE_EVENT_FLAG_COPY (static_cast<unsigned char>(1 << 0))
+#define TRACE_EVENT_FLAG_HAS_ID (static_cast<unsigned char>(1 << 1))
+#define TRACE_EVENT_FLAG_MANGLE_ID (static_cast<unsigned char>(1 << 2))
+#define TRACE_EVENT_FLAG_SCOPE_OFFSET (static_cast<unsigned char>(1 << 3))
+
+#define TRACE_EVENT_FLAG_SCOPE_MASK (static_cast<unsigned char>( \
+ TRACE_EVENT_FLAG_SCOPE_OFFSET | (TRACE_EVENT_FLAG_SCOPE_OFFSET << 1)))
+
+// Type values for identifying types in the TraceValue union.
+#define TRACE_VALUE_TYPE_BOOL (static_cast<unsigned char>(1))
+#define TRACE_VALUE_TYPE_UINT (static_cast<unsigned char>(2))
+#define TRACE_VALUE_TYPE_INT (static_cast<unsigned char>(3))
+#define TRACE_VALUE_TYPE_DOUBLE (static_cast<unsigned char>(4))
+#define TRACE_VALUE_TYPE_POINTER (static_cast<unsigned char>(5))
+#define TRACE_VALUE_TYPE_STRING (static_cast<unsigned char>(6))
+#define TRACE_VALUE_TYPE_COPY_STRING (static_cast<unsigned char>(7))
+#define TRACE_VALUE_TYPE_CONVERTABLE (static_cast<unsigned char>(8))
+
+// Enum reflecting the scope of an INSTANT event. Must fit within
+// TRACE_EVENT_FLAG_SCOPE_MASK.
+#define TRACE_EVENT_SCOPE_GLOBAL (static_cast<unsigned char>(0 << 3))
+#define TRACE_EVENT_SCOPE_PROCESS (static_cast<unsigned char>(1 << 3))
+#define TRACE_EVENT_SCOPE_THREAD (static_cast<unsigned char>(2 << 3))
+
+#define TRACE_EVENT_SCOPE_NAME_GLOBAL ('g')
+#define TRACE_EVENT_SCOPE_NAME_PROCESS ('p')
+#define TRACE_EVENT_SCOPE_NAME_THREAD ('t')
+
+namespace trace_event_internal {
+
+// Specify these values when the corresponding argument of AddTraceEvent is not
+// used.
+const int kZeroNumArgs = 0;
+const unsigned long long kNoEventId = 0;
+
+// TraceID encapsulates an ID that can either be an integer or pointer. Pointers
+// are by default mangled with the Process ID so that they are unlikely to
+// collide when the same pointer is used on different processes.
+class TraceID {
+ public:
+ class DontMangle {
+ public:
+ explicit DontMangle(const void* id)
+ : data_(static_cast<unsigned long long>(
+ reinterpret_cast<uintptr_t>(id))) {}
+ explicit DontMangle(unsigned long long id) : data_(id) {}
+ explicit DontMangle(unsigned long id) : data_(id) {}
+ explicit DontMangle(unsigned int id) : data_(id) {}
+ explicit DontMangle(unsigned short id) : data_(id) {}
+ explicit DontMangle(unsigned char id) : data_(id) {}
+ explicit DontMangle(long long id)
+ : data_(static_cast<unsigned long long>(id)) {}
+ explicit DontMangle(long id)
+ : data_(static_cast<unsigned long long>(id)) {}
+ explicit DontMangle(int id)
+ : data_(static_cast<unsigned long long>(id)) {}
+ explicit DontMangle(short id)
+ : data_(static_cast<unsigned long long>(id)) {}
+ explicit DontMangle(signed char id)
+ : data_(static_cast<unsigned long long>(id)) {}
+ unsigned long long data() const { return data_; }
+ private:
+ unsigned long long data_;
+ };
+
+ class ForceMangle {
+ public:
+ explicit ForceMangle(unsigned long long id) : data_(id) {}
+ explicit ForceMangle(unsigned long id) : data_(id) {}
+ explicit ForceMangle(unsigned int id) : data_(id) {}
+ explicit ForceMangle(unsigned short id) : data_(id) {}
+ explicit ForceMangle(unsigned char id) : data_(id) {}
+ explicit ForceMangle(long long id)
+ : data_(static_cast<unsigned long long>(id)) {}
+ explicit ForceMangle(long id)
+ : data_(static_cast<unsigned long long>(id)) {}
+ explicit ForceMangle(int id)
+ : data_(static_cast<unsigned long long>(id)) {}
+ explicit ForceMangle(short id)
+ : data_(static_cast<unsigned long long>(id)) {}
+ explicit ForceMangle(signed char id)
+ : data_(static_cast<unsigned long long>(id)) {}
+ unsigned long long data() const { return data_; }
+ private:
+ unsigned long long data_;
+ };
+ TraceID(const void* id, unsigned char* flags)
+ : data_(static_cast<unsigned long long>(
+ reinterpret_cast<uintptr_t>(id))) {
+ *flags |= TRACE_EVENT_FLAG_MANGLE_ID;
+ }
+ TraceID(ForceMangle id, unsigned char* flags) : data_(id.data()) {
+ *flags |= TRACE_EVENT_FLAG_MANGLE_ID;
+ }
+ TraceID(DontMangle id, unsigned char* flags) : data_(id.data()) {
+ }
+ TraceID(unsigned long long id, unsigned char* flags)
+ : data_(id) { (void)flags; }
+ TraceID(unsigned long id, unsigned char* flags)
+ : data_(id) { (void)flags; }
+ TraceID(unsigned int id, unsigned char* flags)
+ : data_(id) { (void)flags; }
+ TraceID(unsigned short id, unsigned char* flags)
+ : data_(id) { (void)flags; }
+ TraceID(unsigned char id, unsigned char* flags)
+ : data_(id) { (void)flags; }
+ TraceID(long long id, unsigned char* flags)
+ : data_(static_cast<unsigned long long>(id)) { (void)flags; }
+ TraceID(long id, unsigned char* flags)
+ : data_(static_cast<unsigned long long>(id)) { (void)flags; }
+ TraceID(int id, unsigned char* flags)
+ : data_(static_cast<unsigned long long>(id)) { (void)flags; }
+ TraceID(short id, unsigned char* flags)
+ : data_(static_cast<unsigned long long>(id)) { (void)flags; }
+ TraceID(signed char id, unsigned char* flags)
+ : data_(static_cast<unsigned long long>(id)) { (void)flags; }
+
+ unsigned long long data() const { return data_; }
+
+ private:
+ unsigned long long data_;
+};
+
+// Simple union to store various types as unsigned long long.
+union TraceValueUnion {
+ bool as_bool;
+ unsigned long long as_uint;
+ long long as_int;
+ double as_double;
+ const void* as_pointer;
+ const char* as_string;
+};
+
+// Simple container for const char* that should be copied instead of retained.
+class TraceStringWithCopy {
+ public:
+ explicit TraceStringWithCopy(const char* str) : str_(str) {}
+ const char* str() const { return str_; }
+ private:
+ const char* str_;
+};
+
+// Define SetTraceValue for each allowed type. It stores the type and
+// value in the return arguments. This allows this API to avoid declaring any
+// structures so that it is portable to third_party libraries.
+#define INTERNAL_DECLARE_SET_TRACE_VALUE(actual_type, \
+ arg_expression, \
+ union_member, \
+ value_type_id) \
+ static inline void SetTraceValue( \
+ actual_type arg, \
+ unsigned char* type, \
+ unsigned long long* value) { \
+ TraceValueUnion type_value; \
+ type_value.union_member = arg_expression; \
+ *type = value_type_id; \
+ *value = type_value.as_uint; \
+ }
+// Simpler form for int types that can be safely casted.
+#define INTERNAL_DECLARE_SET_TRACE_VALUE_INT(actual_type, \
+ value_type_id) \
+ static inline void SetTraceValue( \
+ actual_type arg, \
+ unsigned char* type, \
+ unsigned long long* value) { \
+ *type = value_type_id; \
+ *value = static_cast<unsigned long long>(arg); \
+ }
+
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned long long, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned long, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned int, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned short, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned char, TRACE_VALUE_TYPE_UINT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(long long, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(long, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(int, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(short, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE_INT(signed char, TRACE_VALUE_TYPE_INT)
+INTERNAL_DECLARE_SET_TRACE_VALUE(bool, arg, as_bool, TRACE_VALUE_TYPE_BOOL)
+INTERNAL_DECLARE_SET_TRACE_VALUE(double, arg, as_double,
+ TRACE_VALUE_TYPE_DOUBLE)
+INTERNAL_DECLARE_SET_TRACE_VALUE(const void*, arg, as_pointer,
+ TRACE_VALUE_TYPE_POINTER)
+INTERNAL_DECLARE_SET_TRACE_VALUE(const char*, arg, as_string,
+ TRACE_VALUE_TYPE_STRING)
+INTERNAL_DECLARE_SET_TRACE_VALUE(const TraceStringWithCopy&, arg.str(),
+ as_string, TRACE_VALUE_TYPE_COPY_STRING)
+
+#undef INTERNAL_DECLARE_SET_TRACE_VALUE
+#undef INTERNAL_DECLARE_SET_TRACE_VALUE_INT
+
+// std::string version of SetTraceValue so that trace arguments can be strings.
+static inline void SetTraceValue(const std::string& arg,
+ unsigned char* type,
+ unsigned long long* value) {
+ TraceValueUnion type_value;
+ type_value.as_string = arg.c_str();
+ *type = TRACE_VALUE_TYPE_COPY_STRING;
+ *value = type_value.as_uint;
+}
+
+// base::Time and base::TimeTicks version of SetTraceValue to make it easier to
+// trace these types.
+static inline void SetTraceValue(const base::Time arg,
+ unsigned char* type,
+ unsigned long long* value) {
+ *type = TRACE_VALUE_TYPE_INT;
+ *value = arg.ToInternalValue();
+}
+
+static inline void SetTraceValue(const base::TimeTicks arg,
+ unsigned char* type,
+ unsigned long long* value) {
+ *type = TRACE_VALUE_TYPE_INT;
+ *value = arg.ToInternalValue();
+}
+
+// These AddTraceEvent and AddTraceEventWithThreadIdAndTimestamp template
+// functions are defined here instead of in the macro, because the arg_values
+// could be temporary objects, such as std::string. In order to store
+// pointers to the internal c_str and pass through to the tracing API,
+// the arg_values must live throughout these procedures.
+
+static inline base::debug::TraceEventHandle
+AddTraceEventWithThreadIdAndTimestamp(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ unsigned long long id,
+ int thread_id,
+ const base::TimeTicks& timestamp,
+ unsigned char flags,
+ const char* arg1_name,
+ const scoped_refptr<base::debug::ConvertableToTraceFormat>& arg1_val) {
+ const int num_args = 1;
+ unsigned char arg_types[1] = { TRACE_VALUE_TYPE_CONVERTABLE };
+ return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
+ phase, category_group_enabled, name, id, thread_id, timestamp,
+ num_args, &arg1_name, arg_types, NULL, &arg1_val, flags);
+}
+
+template<class ARG1_TYPE>
+static inline base::debug::TraceEventHandle
+AddTraceEventWithThreadIdAndTimestamp(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ unsigned long long id,
+ int thread_id,
+ const base::TimeTicks& timestamp,
+ unsigned char flags,
+ const char* arg1_name,
+ const ARG1_TYPE& arg1_val,
+ const char* arg2_name,
+ const scoped_refptr<base::debug::ConvertableToTraceFormat>& arg2_val) {
+ const int num_args = 2;
+ const char* arg_names[2] = { arg1_name, arg2_name };
+
+ unsigned char arg_types[2];
+ unsigned long long arg_values[2];
+ SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
+ arg_types[1] = TRACE_VALUE_TYPE_CONVERTABLE;
+
+ scoped_refptr<base::debug::ConvertableToTraceFormat> convertable_values[2];
+ convertable_values[1] = arg2_val;
+
+ return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
+ phase, category_group_enabled, name, id, thread_id, timestamp,
+ num_args, arg_names, arg_types, arg_values, convertable_values, flags);
+}
+
+template<class ARG2_TYPE>
+static inline base::debug::TraceEventHandle
+AddTraceEventWithThreadIdAndTimestamp(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ unsigned long long id,
+ int thread_id,
+ const base::TimeTicks& timestamp,
+ unsigned char flags,
+ const char* arg1_name,
+ const scoped_refptr<base::debug::ConvertableToTraceFormat>& arg1_val,
+ const char* arg2_name,
+ const ARG2_TYPE& arg2_val) {
+ const int num_args = 2;
+ const char* arg_names[2] = { arg1_name, arg2_name };
+
+ unsigned char arg_types[2];
+ unsigned long long arg_values[2];
+ arg_types[0] = TRACE_VALUE_TYPE_CONVERTABLE;
+ arg_values[0] = 0;
+ SetTraceValue(arg2_val, &arg_types[1], &arg_values[1]);
+
+ scoped_refptr<base::debug::ConvertableToTraceFormat> convertable_values[2];
+ convertable_values[0] = arg1_val;
+
+ return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
+ phase, category_group_enabled, name, id, thread_id, timestamp,
+ num_args, arg_names, arg_types, arg_values, convertable_values, flags);
+}
+
+static inline base::debug::TraceEventHandle
+AddTraceEventWithThreadIdAndTimestamp(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ unsigned long long id,
+ int thread_id,
+ const base::TimeTicks& timestamp,
+ unsigned char flags,
+ const char* arg1_name,
+ const scoped_refptr<base::debug::ConvertableToTraceFormat>& arg1_val,
+ const char* arg2_name,
+ const scoped_refptr<base::debug::ConvertableToTraceFormat>& arg2_val) {
+ const int num_args = 2;
+ const char* arg_names[2] = { arg1_name, arg2_name };
+ unsigned char arg_types[2] =
+ { TRACE_VALUE_TYPE_CONVERTABLE, TRACE_VALUE_TYPE_CONVERTABLE };
+ scoped_refptr<base::debug::ConvertableToTraceFormat> convertable_values[2] =
+ { arg1_val, arg2_val };
+
+ return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
+ phase, category_group_enabled, name, id, thread_id, timestamp,
+ num_args, arg_names, arg_types, NULL, convertable_values, flags);
+}
+
+static inline base::debug::TraceEventHandle
+AddTraceEventWithThreadIdAndTimestamp(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ unsigned long long id,
+ int thread_id,
+ const base::TimeTicks& timestamp,
+ unsigned char flags) {
+ return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
+ phase, category_group_enabled, name, id, thread_id, timestamp,
+ kZeroNumArgs, NULL, NULL, NULL, NULL, flags);
+}
+
+static inline base::debug::TraceEventHandle AddTraceEvent(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ unsigned long long id,
+ unsigned char flags) {
+ int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
+ base::TimeTicks now = base::TimeTicks::NowFromSystemTraceTime();
+ return AddTraceEventWithThreadIdAndTimestamp(phase, category_group_enabled,
+ name, id, thread_id, now, flags);
+}
+
+template<class ARG1_TYPE>
+static inline base::debug::TraceEventHandle
+AddTraceEventWithThreadIdAndTimestamp(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ unsigned long long id,
+ int thread_id,
+ const base::TimeTicks& timestamp,
+ unsigned char flags,
+ const char* arg1_name,
+ const ARG1_TYPE& arg1_val) {
+ const int num_args = 1;
+ unsigned char arg_types[1];
+ unsigned long long arg_values[1];
+ SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
+ return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
+ phase, category_group_enabled, name, id, thread_id, timestamp,
+ num_args, &arg1_name, arg_types, arg_values, NULL, flags);
+}
+
+template<class ARG1_TYPE>
+static inline base::debug::TraceEventHandle AddTraceEvent(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ unsigned long long id,
+ unsigned char flags,
+ const char* arg1_name,
+ const ARG1_TYPE& arg1_val) {
+ int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
+ base::TimeTicks now = base::TimeTicks::NowFromSystemTraceTime();
+ return AddTraceEventWithThreadIdAndTimestamp(phase, category_group_enabled,
+ name, id, thread_id, now, flags,
+ arg1_name, arg1_val);
+}
+
+template<class ARG1_TYPE, class ARG2_TYPE>
+static inline base::debug::TraceEventHandle
+AddTraceEventWithThreadIdAndTimestamp(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ unsigned long long id,
+ int thread_id,
+ const base::TimeTicks& timestamp,
+ unsigned char flags,
+ const char* arg1_name,
+ const ARG1_TYPE& arg1_val,
+ const char* arg2_name,
+ const ARG2_TYPE& arg2_val) {
+ const int num_args = 2;
+ const char* arg_names[2] = { arg1_name, arg2_name };
+ unsigned char arg_types[2];
+ unsigned long long arg_values[2];
+ SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
+ SetTraceValue(arg2_val, &arg_types[1], &arg_values[1]);
+ return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
+ phase, category_group_enabled, name, id, thread_id, timestamp,
+ num_args, arg_names, arg_types, arg_values, NULL, flags);
+}
+
+template<class ARG1_TYPE, class ARG2_TYPE>
+static inline base::debug::TraceEventHandle AddTraceEvent(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ unsigned long long id,
+ unsigned char flags,
+ const char* arg1_name,
+ const ARG1_TYPE& arg1_val,
+ const char* arg2_name,
+ const ARG2_TYPE& arg2_val) {
+ int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
+ base::TimeTicks now = base::TimeTicks::NowFromSystemTraceTime();
+ return AddTraceEventWithThreadIdAndTimestamp(phase, category_group_enabled,
+ name, id, thread_id, now, flags,
+ arg1_name, arg1_val,
+ arg2_name, arg2_val);
+}
+
+// Used by TRACE_EVENTx macros. Do not use directly.
+class TRACE_EVENT_API_CLASS_EXPORT ScopedTracer {
+ public:
+ // Note: members of data_ intentionally left uninitialized. See Initialize.
+ ScopedTracer() : p_data_(NULL) {}
+
+ ~ScopedTracer() {
+ if (p_data_ && *data_.category_group_enabled)
+ TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(
+ data_.category_group_enabled, data_.name, data_.event_handle);
+ }
+
+ void Initialize(const unsigned char* category_group_enabled,
+ const char* name,
+ base::debug::TraceEventHandle event_handle) {
+ data_.category_group_enabled = category_group_enabled;
+ data_.name = name;
+ data_.event_handle = event_handle;
+ p_data_ = &data_;
+ }
+
+ private:
+ // This Data struct workaround is to avoid initializing all the members
+ // in Data during construction of this object, since this object is always
+ // constructed, even when tracing is disabled. If the members of Data were
+ // members of this class instead, compiler warnings occur about potential
+ // uninitialized accesses.
+ struct Data {
+ const unsigned char* category_group_enabled;
+ const char* name;
+ base::debug::TraceEventHandle event_handle;
+ };
+ Data* p_data_;
+ Data data_;
+};
+
+// Used by TRACE_EVENT_BINARY_EFFICIENTx macro. Do not use directly.
+class TRACE_EVENT_API_CLASS_EXPORT ScopedTraceBinaryEfficient {
+ public:
+ ScopedTraceBinaryEfficient(const char* category_group, const char* name);
+ ~ScopedTraceBinaryEfficient();
+
+ private:
+ const unsigned char* category_group_enabled_;
+ const char* name_;
+ base::debug::TraceEventHandle event_handle_;
+};
+
+// This macro generates less code then TRACE_EVENT0 but is also
+// slower to execute when tracing is off. It should generally only be
+// used with code that is seldom executed or conditionally executed
+// when debugging.
+// For now the category_group must be "gpu".
+#define TRACE_EVENT_BINARY_EFFICIENT0(category_group, name) \
+ trace_event_internal::ScopedTraceBinaryEfficient \
+ INTERNAL_TRACE_EVENT_UID(scoped_trace)(category_group, name);
+
+// TraceEventSamplingStateScope records the current sampling state
+// and sets a new sampling state. When the scope exists, it restores
+// the sampling state having recorded.
+template<size_t BucketNumber>
+class TraceEventSamplingStateScope {
+ public:
+ TraceEventSamplingStateScope(const char* category_and_name) {
+ previous_state_ = TraceEventSamplingStateScope<BucketNumber>::Current();
+ TraceEventSamplingStateScope<BucketNumber>::Set(category_and_name);
+ }
+
+ ~TraceEventSamplingStateScope() {
+ TraceEventSamplingStateScope<BucketNumber>::Set(previous_state_);
+ }
+
+ static inline const char* Current() {
+ return reinterpret_cast<const char*>(TRACE_EVENT_API_ATOMIC_LOAD(
+ g_trace_state[BucketNumber]));
+ }
+
+ static inline void Set(const char* category_and_name) {
+ TRACE_EVENT_API_ATOMIC_STORE(
+ g_trace_state[BucketNumber],
+ reinterpret_cast<TRACE_EVENT_API_ATOMIC_WORD>(
+ const_cast<char*>(category_and_name)));
+ }
+
+ private:
+ const char* previous_state_;
+};
+
+} // namespace trace_event_internal
+
+namespace base {
+namespace debug {
+
+template<typename IDType> class TraceScopedTrackableObject {
+ public:
+ TraceScopedTrackableObject(const char* category_group, const char* name,
+ IDType id)
+ : category_group_(category_group),
+ name_(name),
+ id_(id) {
+ TRACE_EVENT_OBJECT_CREATED_WITH_ID(category_group_, name_, id_);
+ }
+
+ template <typename ArgType> void snapshot(ArgType snapshot) {
+ TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(category_group_, name_, id_, snapshot);
+ }
+
+ ~TraceScopedTrackableObject() {
+ TRACE_EVENT_OBJECT_DELETED_WITH_ID(category_group_, name_, id_);
+ }
+
+ private:
+ const char* category_group_;
+ const char* name_;
+ IDType id_;
+
+ DISALLOW_COPY_AND_ASSIGN(TraceScopedTrackableObject);
+};
+
+} // namespace debug
+} // namespace base
+
+#endif /* BASE_TRACE_EVENT_TRACE_EVENT_H_ */
diff --git a/base/trace_event/trace_event_android.cc b/base/trace_event/trace_event_android.cc
new file mode 100644
index 0000000..31da26d
--- /dev/null
+++ b/base/trace_event/trace_event_android.cc
@@ -0,0 +1,199 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_event_impl.h"
+
+#include <fcntl.h>
+
+#include "base/format_macros.h"
+#include "base/logging.h"
+#include "base/strings/stringprintf.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/trace_event/trace_event.h"
+
+namespace {
+
+int g_atrace_fd = -1;
+const char kATraceMarkerFile[] = "/sys/kernel/debug/tracing/trace_marker";
+
+void WriteEvent(
+ char phase,
+ const char* category_group,
+ const char* name,
+ unsigned long long id,
+ const char** arg_names,
+ const unsigned char* arg_types,
+ const base::debug::TraceEvent::TraceValue* arg_values,
+ const scoped_refptr<base::debug::ConvertableToTraceFormat>*
+ convertable_values,
+ unsigned char flags) {
+ std::string out = base::StringPrintf("%c|%d|%s", phase, getpid(), name);
+ if (flags & TRACE_EVENT_FLAG_HAS_ID)
+ base::StringAppendF(&out, "-%" PRIx64, static_cast<uint64>(id));
+ out += '|';
+
+ for (int i = 0; i < base::debug::kTraceMaxNumArgs && arg_names[i]; ++i) {
+ if (i)
+ out += ';';
+ out += arg_names[i];
+ out += '=';
+ std::string::size_type value_start = out.length();
+ if (arg_types[i] == TRACE_VALUE_TYPE_CONVERTABLE) {
+ convertable_values[i]->AppendAsTraceFormat(&out);
+ } else {
+ base::debug::TraceEvent::AppendValueAsJSON(
+ arg_types[i], arg_values[i], &out);
+ }
+ // Remove the quotes which may confuse the atrace script.
+ ReplaceSubstringsAfterOffset(&out, value_start, "\\\"", "'");
+ ReplaceSubstringsAfterOffset(&out, value_start, "\"", "");
+ // Replace chars used for separators with similar chars in the value.
+ std::replace(out.begin() + value_start, out.end(), ';', ',');
+ std::replace(out.begin() + value_start, out.end(), '|', '!');
+ }
+
+ out += '|';
+ out += category_group;
+ write(g_atrace_fd, out.c_str(), out.size());
+}
+
+void NoOpOutputCallback(base::WaitableEvent* complete_event,
+ const scoped_refptr<base::RefCountedString>&,
+ bool has_more_events) {
+ if (!has_more_events)
+ complete_event->Signal();
+}
+
+void EndChromeTracing(base::debug::TraceLog* trace_log,
+ base::WaitableEvent* complete_event) {
+ trace_log->SetDisabled();
+ // Delete the buffered trace events as they have been sent to atrace.
+ trace_log->Flush(base::Bind(&NoOpOutputCallback, complete_event));
+}
+
+} // namespace
+
+namespace base {
+namespace debug {
+
+// These functions support Android systrace.py when 'webview' category is
+// traced. With the new adb_profile_chrome, we may have two phases:
+// - before WebView is ready for combined tracing, we can use adb_profile_chrome
+// to trace android categories other than 'webview' and chromium categories.
+// In this way we can avoid the conflict between StartATrace/StopATrace and
+// the intents.
+// - TODO(wangxianzhu): after WebView is ready for combined tracing, remove
+// StartATrace, StopATrace and SendToATrace, and perhaps send Java traces
+// directly to atrace in trace_event_binding.cc.
+
+void TraceLog::StartATrace() {
+ if (g_atrace_fd != -1)
+ return;
+
+ g_atrace_fd = open(kATraceMarkerFile, O_WRONLY);
+ if (g_atrace_fd == -1) {
+ PLOG(WARNING) << "Couldn't open " << kATraceMarkerFile;
+ return;
+ }
+ SetEnabled(CategoryFilter(CategoryFilter::kDefaultCategoryFilterString),
+ TraceLog::RECORDING_MODE,
+ TraceOptions(RECORD_CONTINUOUSLY));
+}
+
+void TraceLog::StopATrace() {
+ if (g_atrace_fd == -1)
+ return;
+
+ close(g_atrace_fd);
+ g_atrace_fd = -1;
+
+ // TraceLog::Flush() requires the current thread to have a message loop, but
+ // this thread called from Java may not have one, so flush in another thread.
+ Thread end_chrome_tracing_thread("end_chrome_tracing");
+ WaitableEvent complete_event(false, false);
+ end_chrome_tracing_thread.Start();
+ end_chrome_tracing_thread.message_loop()->PostTask(
+ FROM_HERE, base::Bind(&EndChromeTracing, Unretained(this),
+ Unretained(&complete_event)));
+ complete_event.Wait();
+}
+
+void TraceEvent::SendToATrace() {
+ if (g_atrace_fd == -1)
+ return;
+
+ const char* category_group =
+ TraceLog::GetCategoryGroupName(category_group_enabled_);
+
+ switch (phase_) {
+ case TRACE_EVENT_PHASE_BEGIN:
+ WriteEvent('B', category_group, name_, id_,
+ arg_names_, arg_types_, arg_values_, convertable_values_,
+ flags_);
+ break;
+
+ case TRACE_EVENT_PHASE_COMPLETE:
+ WriteEvent(duration_.ToInternalValue() == -1 ? 'B' : 'E',
+ category_group, name_, id_,
+ arg_names_, arg_types_, arg_values_, convertable_values_,
+ flags_);
+ break;
+
+ case TRACE_EVENT_PHASE_END:
+ // Though a single 'E' is enough, here append pid, name and
+ // category_group etc. So that unpaired events can be found easily.
+ WriteEvent('E', category_group, name_, id_,
+ arg_names_, arg_types_, arg_values_, convertable_values_,
+ flags_);
+ break;
+
+ case TRACE_EVENT_PHASE_INSTANT:
+ // Simulate an instance event with a pair of begin/end events.
+ WriteEvent('B', category_group, name_, id_,
+ arg_names_, arg_types_, arg_values_, convertable_values_,
+ flags_);
+ write(g_atrace_fd, "E", 1);
+ break;
+
+ case TRACE_EVENT_PHASE_COUNTER:
+ for (int i = 0; i < kTraceMaxNumArgs && arg_names_[i]; ++i) {
+ DCHECK(arg_types_[i] == TRACE_VALUE_TYPE_INT);
+ std::string out = base::StringPrintf(
+ "C|%d|%s-%s", getpid(), name_, arg_names_[i]);
+ if (flags_ & TRACE_EVENT_FLAG_HAS_ID)
+ StringAppendF(&out, "-%" PRIx64, static_cast<uint64>(id_));
+ StringAppendF(&out, "|%d|%s",
+ static_cast<int>(arg_values_[i].as_int), category_group);
+ write(g_atrace_fd, out.c_str(), out.size());
+ }
+ break;
+
+ default:
+ // Do nothing.
+ break;
+ }
+}
+
+void TraceLog::AddClockSyncMetadataEvent() {
+ int atrace_fd = open(kATraceMarkerFile, O_WRONLY | O_APPEND);
+ if (atrace_fd == -1) {
+ PLOG(WARNING) << "Couldn't open " << kATraceMarkerFile;
+ return;
+ }
+
+ // Android's kernel trace system has a trace_marker feature: this is a file on
+ // debugfs that takes the written data and pushes it onto the trace
+ // buffer. So, to establish clock sync, we write our monotonic clock into that
+ // trace buffer.
+ TimeTicks now = TimeTicks::NowFromSystemTraceTime();
+ double now_in_seconds = now.ToInternalValue() / 1000000.0;
+ std::string marker = StringPrintf(
+ "trace_event_clock_sync: parent_ts=%f\n", now_in_seconds);
+ if (write(atrace_fd, marker.c_str(), marker.size()) == -1)
+ PLOG(WARNING) << "Couldn't write to " << kATraceMarkerFile;
+ close(atrace_fd);
+}
+
+} // namespace debug
+} // namespace base
diff --git a/base/trace_event/trace_event_argument.cc b/base/trace_event/trace_event_argument.cc
new file mode 100644
index 0000000..00fcde1
--- /dev/null
+++ b/base/trace_event/trace_event_argument.cc
@@ -0,0 +1,117 @@
+// Copyright (c) 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_event_argument.h"
+
+#include "base/json/json_writer.h"
+#include "base/values.h"
+
+namespace base {
+namespace debug {
+
+TracedValue::TracedValue() : root_(new DictionaryValue()) {
+ stack_.push_back(root_.get());
+}
+
+TracedValue::~TracedValue() {
+ DCHECK_EQ(1u, stack_.size());
+}
+
+void TracedValue::SetInteger(const char* name, int value) {
+ GetCurrentDictionary()->SetInteger(name, value);
+}
+
+void TracedValue::SetDouble(const char* name, double value) {
+ GetCurrentDictionary()->SetDouble(name, value);
+}
+
+void TracedValue::SetBoolean(const char* name, bool value) {
+ GetCurrentDictionary()->SetBoolean(name, value);
+}
+
+void TracedValue::SetString(const char* name, const std::string& value) {
+ GetCurrentDictionary()->SetString(name, value);
+}
+
+void TracedValue::SetValue(const char* name, Value* value) {
+ GetCurrentDictionary()->Set(name, value);
+}
+
+void TracedValue::BeginDictionary(const char* name) {
+ DictionaryValue* dictionary = new DictionaryValue();
+ GetCurrentDictionary()->Set(name, dictionary);
+ stack_.push_back(dictionary);
+}
+
+void TracedValue::BeginArray(const char* name) {
+ ListValue* array = new ListValue();
+ GetCurrentDictionary()->Set(name, array);
+ stack_.push_back(array);
+}
+
+void TracedValue::EndDictionary() {
+ DCHECK_GT(stack_.size(), 1u);
+ DCHECK(GetCurrentDictionary());
+ stack_.pop_back();
+}
+
+void TracedValue::AppendInteger(int value) {
+ GetCurrentArray()->AppendInteger(value);
+}
+
+void TracedValue::AppendDouble(double value) {
+ GetCurrentArray()->AppendDouble(value);
+}
+
+void TracedValue::AppendBoolean(bool value) {
+ GetCurrentArray()->AppendBoolean(value);
+}
+
+void TracedValue::AppendString(const std::string& value) {
+ GetCurrentArray()->AppendString(value);
+}
+
+void TracedValue::BeginArray() {
+ ListValue* array = new ListValue();
+ GetCurrentArray()->Append(array);
+ stack_.push_back(array);
+}
+
+void TracedValue::BeginDictionary() {
+ DictionaryValue* dictionary = new DictionaryValue();
+ GetCurrentArray()->Append(dictionary);
+ stack_.push_back(dictionary);
+}
+
+void TracedValue::EndArray() {
+ DCHECK_GT(stack_.size(), 1u);
+ DCHECK(GetCurrentArray());
+ stack_.pop_back();
+}
+
+DictionaryValue* TracedValue::GetCurrentDictionary() {
+ DCHECK(!stack_.empty());
+ DictionaryValue* dictionary = NULL;
+ stack_.back()->GetAsDictionary(&dictionary);
+ DCHECK(dictionary);
+ return dictionary;
+}
+
+ListValue* TracedValue::GetCurrentArray() {
+ DCHECK(!stack_.empty());
+ ListValue* list = NULL;
+ stack_.back()->GetAsList(&list);
+ DCHECK(list);
+ return list;
+}
+
+void TracedValue::AppendAsTraceFormat(std::string* out) const {
+ std::string tmp;
+ JSONWriter::Write(stack_.front(), &tmp);
+ *out += tmp;
+ DCHECK_EQ(1u, stack_.size()) << tmp;
+}
+
+} // namespace debug
+} // namespace base
diff --git a/base/trace_event/trace_event_argument.h b/base/trace_event/trace_event_argument.h
new file mode 100644
index 0000000..08a5b50
--- /dev/null
+++ b/base/trace_event/trace_event_argument.h
@@ -0,0 +1,59 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_EVENT_ARGUMENT_H_
+#define BASE_TRACE_EVENT_TRACE_EVENT_ARGUMENT_H_
+
+#include <string>
+#include <vector>
+
+#include "base/memory/scoped_ptr.h"
+#include "base/trace_event/trace_event.h"
+
+namespace base {
+class DictionaryValue;
+class ListValue;
+class Value;
+
+namespace debug {
+
+class BASE_EXPORT TracedValue : public ConvertableToTraceFormat {
+ public:
+ TracedValue();
+
+ void EndDictionary();
+ void EndArray();
+
+ void SetInteger(const char* name, int value);
+ void SetDouble(const char* name, double);
+ void SetBoolean(const char* name, bool value);
+ void SetString(const char* name, const std::string& value);
+ void SetValue(const char* name, Value* value);
+ void BeginDictionary(const char* name);
+ void BeginArray(const char* name);
+
+ void AppendInteger(int);
+ void AppendDouble(double);
+ void AppendBoolean(bool);
+ void AppendString(const std::string&);
+ void BeginArray();
+ void BeginDictionary();
+
+ void AppendAsTraceFormat(std::string* out) const override;
+
+ private:
+ ~TracedValue() override;
+
+ DictionaryValue* GetCurrentDictionary();
+ ListValue* GetCurrentArray();
+
+ scoped_ptr<base::Value> root_;
+ std::vector<Value*> stack_;
+ DISALLOW_COPY_AND_ASSIGN(TracedValue);
+};
+
+} // namespace debug
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_TRACE_EVENT_ARGUMENT_H_
diff --git a/base/trace_event/trace_event_argument_unittest.cc b/base/trace_event/trace_event_argument_unittest.cc
new file mode 100644
index 0000000..39cafef
--- /dev/null
+++ b/base/trace_event/trace_event_argument_unittest.cc
@@ -0,0 +1,53 @@
+// Copyright (c) 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_event_argument.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace debug {
+
+TEST(TraceEventArgumentTest, FlatDictionary) {
+ scoped_refptr<TracedValue> value = new TracedValue();
+ value->SetInteger("int", 2014);
+ value->SetDouble("double", 0.0);
+ value->SetBoolean("bool", true);
+ value->SetString("string", "string");
+ std::string json;
+ value->AppendAsTraceFormat(&json);
+ EXPECT_EQ("{\"bool\":true,\"double\":0.0,\"int\":2014,\"string\":\"string\"}",
+ json);
+}
+
+TEST(TraceEventArgumentTest, Hierarchy) {
+ scoped_refptr<TracedValue> value = new TracedValue();
+ value->SetInteger("i0", 2014);
+ value->BeginDictionary("dict1");
+ value->SetInteger("i1", 2014);
+ value->BeginDictionary("dict2");
+ value->SetBoolean("b2", false);
+ value->EndDictionary();
+ value->SetString("s1", "foo");
+ value->EndDictionary();
+ value->SetDouble("d0", 0.0);
+ value->SetBoolean("b0", true);
+ value->BeginArray("a1");
+ value->AppendInteger(1);
+ value->AppendBoolean(true);
+ value->BeginDictionary();
+ value->SetInteger("i2", 3);
+ value->EndDictionary();
+ value->EndArray();
+ value->SetString("s0", "foo");
+ std::string json;
+ value->AppendAsTraceFormat(&json);
+ EXPECT_EQ(
+ "{\"a1\":[1,true,{\"i2\":3}],\"b0\":true,\"d0\":0.0,\"dict1\":{\"dict2\":"
+ "{\"b2\":false},\"i1\":2014,\"s1\":\"foo\"},\"i0\":2014,\"s0\":"
+ "\"foo\"}",
+ json);
+}
+
+} // namespace debug
+} // namespace base
diff --git a/base/trace_event/trace_event_impl.cc b/base/trace_event/trace_event_impl.cc
new file mode 100644
index 0000000..a8a0cc1
--- /dev/null
+++ b/base/trace_event/trace_event_impl.cc
@@ -0,0 +1,2594 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_event_impl.h"
+
+#include <algorithm>
+
+#include "base/base_switches.h"
+#include "base/bind.h"
+#include "base/command_line.h"
+#include "base/debug/leak_annotations.h"
+#include "base/float_util.h"
+#include "base/format_macros.h"
+#include "base/json/string_escape.h"
+#include "base/lazy_instance.h"
+#include "base/memory/singleton.h"
+#include "base/message_loop/message_loop.h"
+#include "base/process/process_metrics.h"
+#include "base/stl_util.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_tokenizer.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/synchronization/cancellation_flag.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/sys_info.h"
+#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread_id_name_manager.h"
+#include "base/time/time.h"
+#include "base/trace_event/trace_event.h"
+#include "base/trace_event/trace_event_synthetic_delay.h"
+
+#if defined(OS_WIN)
+#include "base/trace_event/trace_event_win.h"
+#endif
+
+class DeleteTraceLogForTesting {
+ public:
+ static void Delete() {
+ Singleton<base::debug::TraceLog,
+ LeakySingletonTraits<base::debug::TraceLog> >::OnExit(0);
+ }
+};
+
+// The thread buckets for the sampling profiler.
+BASE_EXPORT TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
+
+namespace base {
+namespace debug {
+
+namespace {
+
+// The overhead of TraceEvent above this threshold will be reported in the
+// trace.
+const int kOverheadReportThresholdInMicroseconds = 50;
+
+// String options that can be used to initialize TraceOptions.
+const char kRecordUntilFull[] = "record-until-full";
+const char kRecordContinuously[] = "record-continuously";
+const char kRecordAsMuchAsPossible[] = "record-as-much-as-possible";
+const char kTraceToConsole[] = "trace-to-console";
+const char kEnableSampling[] = "enable-sampling";
+const char kEnableSystrace[] = "enable-systrace";
+
+// Controls the number of trace events we will buffer in-memory
+// before throwing them away.
+const size_t kTraceBufferChunkSize = TraceBufferChunk::kTraceBufferChunkSize;
+const size_t kTraceEventVectorBigBufferChunks =
+ 512000000 / kTraceBufferChunkSize;
+const size_t kTraceEventVectorBufferChunks = 256000 / kTraceBufferChunkSize;
+const size_t kTraceEventRingBufferChunks = kTraceEventVectorBufferChunks / 4;
+const size_t kTraceEventBatchChunks = 1000 / kTraceBufferChunkSize;
+// Can store results for 30 seconds with 1 ms sampling interval.
+const size_t kMonitorTraceEventBufferChunks = 30000 / kTraceBufferChunkSize;
+// ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events.
+const size_t kEchoToConsoleTraceEventBufferChunks = 256;
+
+const int kThreadFlushTimeoutMs = 3000;
+
+#if !defined(OS_NACL)
+// These categories will cause deadlock when ECHO_TO_CONSOLE. crbug.com/325575.
+const char kEchoToConsoleCategoryFilter[] = "-ipc,-task";
+#endif
+
+const char kSyntheticDelayCategoryFilterPrefix[] = "DELAY(";
+
+#define MAX_CATEGORY_GROUPS 100
+
+// Parallel arrays g_category_groups and g_category_group_enabled are separate
+// so that a pointer to a member of g_category_group_enabled can be easily
+// converted to an index into g_category_groups. This allows macros to deal
+// only with char enabled pointers from g_category_group_enabled, and we can
+// convert internally to determine the category name from the char enabled
+// pointer.
+const char* g_category_groups[MAX_CATEGORY_GROUPS] = {
+ "toplevel",
+ "tracing already shutdown",
+ "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS",
+ "__metadata",
+ // For reporting trace_event overhead. For thread local event buffers only.
+ "trace_event_overhead"};
+
+// The enabled flag is char instead of bool so that the API can be used from C.
+unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = { 0 };
+// Indexes here have to match the g_category_groups array indexes above.
+const int g_category_already_shutdown = 1;
+const int g_category_categories_exhausted = 2;
+const int g_category_metadata = 3;
+const int g_category_trace_event_overhead = 4;
+const int g_num_builtin_categories = 5;
+// Skip default categories.
+base::subtle::AtomicWord g_category_index = g_num_builtin_categories;
+
+// The name of the current thread. This is used to decide if the current
+// thread name has changed. We combine all the seen thread names into the
+// output name for the thread.
+LazyInstance<ThreadLocalPointer<const char> >::Leaky
+ g_current_thread_name = LAZY_INSTANCE_INITIALIZER;
+
+TimeTicks ThreadNow() {
+ return TimeTicks::IsThreadNowSupported() ?
+ TimeTicks::ThreadNow() : TimeTicks();
+}
+
+class TraceBufferRingBuffer : public TraceBuffer {
+ public:
+ TraceBufferRingBuffer(size_t max_chunks)
+ : max_chunks_(max_chunks),
+ recyclable_chunks_queue_(new size_t[queue_capacity()]),
+ queue_head_(0),
+ queue_tail_(max_chunks),
+ current_iteration_index_(0),
+ current_chunk_seq_(1) {
+ chunks_.reserve(max_chunks);
+ for (size_t i = 0; i < max_chunks; ++i)
+ recyclable_chunks_queue_[i] = i;
+ }
+
+ scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
+ // Because the number of threads is much less than the number of chunks,
+ // the queue should never be empty.
+ DCHECK(!QueueIsEmpty());
+
+ *index = recyclable_chunks_queue_[queue_head_];
+ queue_head_ = NextQueueIndex(queue_head_);
+ current_iteration_index_ = queue_head_;
+
+ if (*index >= chunks_.size())
+ chunks_.resize(*index + 1);
+
+ TraceBufferChunk* chunk = chunks_[*index];
+ chunks_[*index] = NULL; // Put NULL in the slot of a in-flight chunk.
+ if (chunk)
+ chunk->Reset(current_chunk_seq_++);
+ else
+ chunk = new TraceBufferChunk(current_chunk_seq_++);
+
+ return scoped_ptr<TraceBufferChunk>(chunk);
+ }
+
+ void ReturnChunk(size_t index, scoped_ptr<TraceBufferChunk> chunk) override {
+ // When this method is called, the queue should not be full because it
+ // can contain all chunks including the one to be returned.
+ DCHECK(!QueueIsFull());
+ DCHECK(chunk);
+ DCHECK_LT(index, chunks_.size());
+ DCHECK(!chunks_[index]);
+ chunks_[index] = chunk.release();
+ recyclable_chunks_queue_[queue_tail_] = index;
+ queue_tail_ = NextQueueIndex(queue_tail_);
+ }
+
+ bool IsFull() const override { return false; }
+
+ size_t Size() const override {
+ // This is approximate because not all of the chunks are full.
+ return chunks_.size() * kTraceBufferChunkSize;
+ }
+
+ size_t Capacity() const override {
+ return max_chunks_ * kTraceBufferChunkSize;
+ }
+
+ TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
+ if (handle.chunk_index >= chunks_.size())
+ return NULL;
+ TraceBufferChunk* chunk = chunks_[handle.chunk_index];
+ if (!chunk || chunk->seq() != handle.chunk_seq)
+ return NULL;
+ return chunk->GetEventAt(handle.event_index);
+ }
+
+ const TraceBufferChunk* NextChunk() override {
+ if (chunks_.empty())
+ return NULL;
+
+ while (current_iteration_index_ != queue_tail_) {
+ size_t chunk_index = recyclable_chunks_queue_[current_iteration_index_];
+ current_iteration_index_ = NextQueueIndex(current_iteration_index_);
+ if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
+ continue;
+ DCHECK(chunks_[chunk_index]);
+ return chunks_[chunk_index];
+ }
+ return NULL;
+ }
+
+ scoped_ptr<TraceBuffer> CloneForIteration() const override {
+ scoped_ptr<ClonedTraceBuffer> cloned_buffer(new ClonedTraceBuffer());
+ for (size_t queue_index = queue_head_; queue_index != queue_tail_;
+ queue_index = NextQueueIndex(queue_index)) {
+ size_t chunk_index = recyclable_chunks_queue_[queue_index];
+ if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
+ continue;
+ TraceBufferChunk* chunk = chunks_[chunk_index];
+ cloned_buffer->chunks_.push_back(chunk ? chunk->Clone().release() : NULL);
+ }
+ return cloned_buffer.Pass();
+ }
+
+ private:
+ class ClonedTraceBuffer : public TraceBuffer {
+ public:
+ ClonedTraceBuffer() : current_iteration_index_(0) {}
+
+ // The only implemented method.
+ const TraceBufferChunk* NextChunk() override {
+ return current_iteration_index_ < chunks_.size() ?
+ chunks_[current_iteration_index_++] : NULL;
+ }
+
+ scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
+ NOTIMPLEMENTED();
+ return scoped_ptr<TraceBufferChunk>();
+ }
+ void ReturnChunk(size_t index, scoped_ptr<TraceBufferChunk>) override {
+ NOTIMPLEMENTED();
+ }
+ bool IsFull() const override { return false; }
+ size_t Size() const override { return 0; }
+ size_t Capacity() const override { return 0; }
+ TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
+ return NULL;
+ }
+ scoped_ptr<TraceBuffer> CloneForIteration() const override {
+ NOTIMPLEMENTED();
+ return scoped_ptr<TraceBuffer>();
+ }
+
+ size_t current_iteration_index_;
+ ScopedVector<TraceBufferChunk> chunks_;
+ };
+
+ bool QueueIsEmpty() const {
+ return queue_head_ == queue_tail_;
+ }
+
+ size_t QueueSize() const {
+ return queue_tail_ > queue_head_ ? queue_tail_ - queue_head_ :
+ queue_tail_ + queue_capacity() - queue_head_;
+ }
+
+ bool QueueIsFull() const {
+ return QueueSize() == queue_capacity() - 1;
+ }
+
+ size_t queue_capacity() const {
+ // One extra space to help distinguish full state and empty state.
+ return max_chunks_ + 1;
+ }
+
+ size_t NextQueueIndex(size_t index) const {
+ index++;
+ if (index >= queue_capacity())
+ index = 0;
+ return index;
+ }
+
+ size_t max_chunks_;
+ ScopedVector<TraceBufferChunk> chunks_;
+
+ scoped_ptr<size_t[]> recyclable_chunks_queue_;
+ size_t queue_head_;
+ size_t queue_tail_;
+
+ size_t current_iteration_index_;
+ uint32 current_chunk_seq_;
+
+ DISALLOW_COPY_AND_ASSIGN(TraceBufferRingBuffer);
+};
+
+class TraceBufferVector : public TraceBuffer {
+ public:
+ TraceBufferVector(size_t max_chunks)
+ : in_flight_chunk_count_(0),
+ current_iteration_index_(0),
+ max_chunks_(max_chunks) {
+ chunks_.reserve(max_chunks_);
+ }
+
+ scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
+ // This function may be called when adding normal events or indirectly from
+ // AddMetadataEventsWhileLocked(). We can not DECHECK(!IsFull()) because we
+ // have to add the metadata events and flush thread-local buffers even if
+ // the buffer is full.
+ *index = chunks_.size();
+ chunks_.push_back(NULL); // Put NULL in the slot of a in-flight chunk.
+ ++in_flight_chunk_count_;
+ // + 1 because zero chunk_seq is not allowed.
+ return scoped_ptr<TraceBufferChunk>(
+ new TraceBufferChunk(static_cast<uint32>(*index) + 1));
+ }
+
+ void ReturnChunk(size_t index, scoped_ptr<TraceBufferChunk> chunk) override {
+ DCHECK_GT(in_flight_chunk_count_, 0u);
+ DCHECK_LT(index, chunks_.size());
+ DCHECK(!chunks_[index]);
+ --in_flight_chunk_count_;
+ chunks_[index] = chunk.release();
+ }
+
+ bool IsFull() const override { return chunks_.size() >= max_chunks_; }
+
+ size_t Size() const override {
+ // This is approximate because not all of the chunks are full.
+ return chunks_.size() * kTraceBufferChunkSize;
+ }
+
+ size_t Capacity() const override {
+ return max_chunks_ * kTraceBufferChunkSize;
+ }
+
+ TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
+ if (handle.chunk_index >= chunks_.size())
+ return NULL;
+ TraceBufferChunk* chunk = chunks_[handle.chunk_index];
+ if (!chunk || chunk->seq() != handle.chunk_seq)
+ return NULL;
+ return chunk->GetEventAt(handle.event_index);
+ }
+
+ const TraceBufferChunk* NextChunk() override {
+ while (current_iteration_index_ < chunks_.size()) {
+ // Skip in-flight chunks.
+ const TraceBufferChunk* chunk = chunks_[current_iteration_index_++];
+ if (chunk)
+ return chunk;
+ }
+ return NULL;
+ }
+
+ scoped_ptr<TraceBuffer> CloneForIteration() const override {
+ NOTIMPLEMENTED();
+ return scoped_ptr<TraceBuffer>();
+ }
+
+ private:
+ size_t in_flight_chunk_count_;
+ size_t current_iteration_index_;
+ size_t max_chunks_;
+ ScopedVector<TraceBufferChunk> chunks_;
+
+ DISALLOW_COPY_AND_ASSIGN(TraceBufferVector);
+};
+
+template <typename T>
+void InitializeMetadataEvent(TraceEvent* trace_event,
+ int thread_id,
+ const char* metadata_name, const char* arg_name,
+ const T& value) {
+ if (!trace_event)
+ return;
+
+ int num_args = 1;
+ unsigned char arg_type;
+ unsigned long long arg_value;
+ ::trace_event_internal::SetTraceValue(value, &arg_type, &arg_value);
+ trace_event->Initialize(thread_id,
+ TimeTicks(), TimeTicks(), TRACE_EVENT_PHASE_METADATA,
+ &g_category_group_enabled[g_category_metadata],
+ metadata_name, ::trace_event_internal::kNoEventId,
+ num_args, &arg_name, &arg_type, &arg_value, NULL,
+ TRACE_EVENT_FLAG_NONE);
+}
+
+class AutoThreadLocalBoolean {
+ public:
+ explicit AutoThreadLocalBoolean(ThreadLocalBoolean* thread_local_boolean)
+ : thread_local_boolean_(thread_local_boolean) {
+ DCHECK(!thread_local_boolean_->Get());
+ thread_local_boolean_->Set(true);
+ }
+ ~AutoThreadLocalBoolean() {
+ thread_local_boolean_->Set(false);
+ }
+
+ private:
+ ThreadLocalBoolean* thread_local_boolean_;
+ DISALLOW_COPY_AND_ASSIGN(AutoThreadLocalBoolean);
+};
+
+} // namespace
+
+void TraceBufferChunk::Reset(uint32 new_seq) {
+ for (size_t i = 0; i < next_free_; ++i)
+ chunk_[i].Reset();
+ next_free_ = 0;
+ seq_ = new_seq;
+}
+
+TraceEvent* TraceBufferChunk::AddTraceEvent(size_t* event_index) {
+ DCHECK(!IsFull());
+ *event_index = next_free_++;
+ return &chunk_[*event_index];
+}
+
+scoped_ptr<TraceBufferChunk> TraceBufferChunk::Clone() const {
+ scoped_ptr<TraceBufferChunk> cloned_chunk(new TraceBufferChunk(seq_));
+ cloned_chunk->next_free_ = next_free_;
+ for (size_t i = 0; i < next_free_; ++i)
+ cloned_chunk->chunk_[i].CopyFrom(chunk_[i]);
+ return cloned_chunk.Pass();
+}
+
+// A helper class that allows the lock to be acquired in the middle of the scope
+// and unlocks at the end of scope if locked.
+class TraceLog::OptionalAutoLock {
+ public:
+ explicit OptionalAutoLock(Lock& lock)
+ : lock_(lock),
+ locked_(false) {
+ }
+
+ ~OptionalAutoLock() {
+ if (locked_)
+ lock_.Release();
+ }
+
+ void EnsureAcquired() {
+ if (!locked_) {
+ lock_.Acquire();
+ locked_ = true;
+ }
+ }
+
+ private:
+ Lock& lock_;
+ bool locked_;
+ DISALLOW_COPY_AND_ASSIGN(OptionalAutoLock);
+};
+
+// Use this function instead of TraceEventHandle constructor to keep the
+// overhead of ScopedTracer (trace_event.h) constructor minimum.
+void MakeHandle(uint32 chunk_seq, size_t chunk_index, size_t event_index,
+ TraceEventHandle* handle) {
+ DCHECK(chunk_seq);
+ DCHECK(chunk_index < (1u << 16));
+ DCHECK(event_index < (1u << 16));
+ handle->chunk_seq = chunk_seq;
+ handle->chunk_index = static_cast<uint16>(chunk_index);
+ handle->event_index = static_cast<uint16>(event_index);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// TraceEvent
+//
+////////////////////////////////////////////////////////////////////////////////
+
+namespace {
+
+size_t GetAllocLength(const char* str) { return str ? strlen(str) + 1 : 0; }
+
+// Copies |*member| into |*buffer|, sets |*member| to point to this new
+// location, and then advances |*buffer| by the amount written.
+void CopyTraceEventParameter(char** buffer,
+ const char** member,
+ const char* end) {
+ if (*member) {
+ size_t written = strlcpy(*buffer, *member, end - *buffer) + 1;
+ DCHECK_LE(static_cast<int>(written), end - *buffer);
+ *member = *buffer;
+ *buffer += written;
+ }
+}
+
+} // namespace
+
+TraceEvent::TraceEvent()
+ : duration_(TimeDelta::FromInternalValue(-1)),
+ id_(0u),
+ category_group_enabled_(NULL),
+ name_(NULL),
+ thread_id_(0),
+ phase_(TRACE_EVENT_PHASE_BEGIN),
+ flags_(0) {
+ for (int i = 0; i < kTraceMaxNumArgs; ++i)
+ arg_names_[i] = NULL;
+ memset(arg_values_, 0, sizeof(arg_values_));
+}
+
+TraceEvent::~TraceEvent() {
+}
+
+void TraceEvent::CopyFrom(const TraceEvent& other) {
+ timestamp_ = other.timestamp_;
+ thread_timestamp_ = other.thread_timestamp_;
+ duration_ = other.duration_;
+ id_ = other.id_;
+ category_group_enabled_ = other.category_group_enabled_;
+ name_ = other.name_;
+ thread_id_ = other.thread_id_;
+ phase_ = other.phase_;
+ flags_ = other.flags_;
+ parameter_copy_storage_ = other.parameter_copy_storage_;
+
+ for (int i = 0; i < kTraceMaxNumArgs; ++i) {
+ arg_names_[i] = other.arg_names_[i];
+ arg_types_[i] = other.arg_types_[i];
+ arg_values_[i] = other.arg_values_[i];
+ convertable_values_[i] = other.convertable_values_[i];
+ }
+}
+
+void TraceEvent::Initialize(
+ int thread_id,
+ TimeTicks timestamp,
+ TimeTicks thread_timestamp,
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ unsigned long long id,
+ int num_args,
+ const char** arg_names,
+ const unsigned char* arg_types,
+ const unsigned long long* arg_values,
+ const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
+ unsigned char flags) {
+ timestamp_ = timestamp;
+ thread_timestamp_ = thread_timestamp;
+ duration_ = TimeDelta::FromInternalValue(-1);
+ id_ = id;
+ category_group_enabled_ = category_group_enabled;
+ name_ = name;
+ thread_id_ = thread_id;
+ phase_ = phase;
+ flags_ = flags;
+
+ // Clamp num_args since it may have been set by a third_party library.
+ num_args = (num_args > kTraceMaxNumArgs) ? kTraceMaxNumArgs : num_args;
+ int i = 0;
+ for (; i < num_args; ++i) {
+ arg_names_[i] = arg_names[i];
+ arg_types_[i] = arg_types[i];
+
+ if (arg_types[i] == TRACE_VALUE_TYPE_CONVERTABLE)
+ convertable_values_[i] = convertable_values[i];
+ else
+ arg_values_[i].as_uint = arg_values[i];
+ }
+ for (; i < kTraceMaxNumArgs; ++i) {
+ arg_names_[i] = NULL;
+ arg_values_[i].as_uint = 0u;
+ convertable_values_[i] = NULL;
+ arg_types_[i] = TRACE_VALUE_TYPE_UINT;
+ }
+
+ bool copy = !!(flags & TRACE_EVENT_FLAG_COPY);
+ size_t alloc_size = 0;
+ if (copy) {
+ alloc_size += GetAllocLength(name);
+ for (i = 0; i < num_args; ++i) {
+ alloc_size += GetAllocLength(arg_names_[i]);
+ if (arg_types_[i] == TRACE_VALUE_TYPE_STRING)
+ arg_types_[i] = TRACE_VALUE_TYPE_COPY_STRING;
+ }
+ }
+
+ bool arg_is_copy[kTraceMaxNumArgs];
+ for (i = 0; i < num_args; ++i) {
+ // No copying of convertable types, we retain ownership.
+ if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
+ continue;
+
+ // We only take a copy of arg_vals if they are of type COPY_STRING.
+ arg_is_copy[i] = (arg_types_[i] == TRACE_VALUE_TYPE_COPY_STRING);
+ if (arg_is_copy[i])
+ alloc_size += GetAllocLength(arg_values_[i].as_string);
+ }
+
+ if (alloc_size) {
+ parameter_copy_storage_ = new RefCountedString;
+ parameter_copy_storage_->data().resize(alloc_size);
+ char* ptr = string_as_array(&parameter_copy_storage_->data());
+ const char* end = ptr + alloc_size;
+ if (copy) {
+ CopyTraceEventParameter(&ptr, &name_, end);
+ for (i = 0; i < num_args; ++i) {
+ CopyTraceEventParameter(&ptr, &arg_names_[i], end);
+ }
+ }
+ for (i = 0; i < num_args; ++i) {
+ if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
+ continue;
+ if (arg_is_copy[i])
+ CopyTraceEventParameter(&ptr, &arg_values_[i].as_string, end);
+ }
+ DCHECK_EQ(end, ptr) << "Overrun by " << ptr - end;
+ }
+}
+
+void TraceEvent::Reset() {
+ // Only reset fields that won't be initialized in Initialize(), or that may
+ // hold references to other objects.
+ duration_ = TimeDelta::FromInternalValue(-1);
+ parameter_copy_storage_ = NULL;
+ for (int i = 0; i < kTraceMaxNumArgs; ++i)
+ convertable_values_[i] = NULL;
+}
+
+void TraceEvent::UpdateDuration(const TimeTicks& now,
+ const TimeTicks& thread_now) {
+ DCHECK(duration_.ToInternalValue() == -1);
+ duration_ = now - timestamp_;
+ thread_duration_ = thread_now - thread_timestamp_;
+}
+
+// static
+void TraceEvent::AppendValueAsJSON(unsigned char type,
+ TraceEvent::TraceValue value,
+ std::string* out) {
+ switch (type) {
+ case TRACE_VALUE_TYPE_BOOL:
+ *out += value.as_bool ? "true" : "false";
+ break;
+ case TRACE_VALUE_TYPE_UINT:
+ StringAppendF(out, "%" PRIu64, static_cast<uint64>(value.as_uint));
+ break;
+ case TRACE_VALUE_TYPE_INT:
+ StringAppendF(out, "%" PRId64, static_cast<int64>(value.as_int));
+ break;
+ case TRACE_VALUE_TYPE_DOUBLE: {
+ // FIXME: base/json/json_writer.cc is using the same code,
+ // should be made into a common method.
+ std::string real;
+ double val = value.as_double;
+ if (IsFinite(val)) {
+ real = DoubleToString(val);
+ // Ensure that the number has a .0 if there's no decimal or 'e'. This
+ // makes sure that when we read the JSON back, it's interpreted as a
+ // real rather than an int.
+ if (real.find('.') == std::string::npos &&
+ real.find('e') == std::string::npos &&
+ real.find('E') == std::string::npos) {
+ real.append(".0");
+ }
+ // The JSON spec requires that non-integer values in the range (-1,1)
+ // have a zero before the decimal point - ".52" is not valid, "0.52" is.
+ if (real[0] == '.') {
+ real.insert(0, "0");
+ } else if (real.length() > 1 && real[0] == '-' && real[1] == '.') {
+ // "-.1" bad "-0.1" good
+ real.insert(1, "0");
+ }
+ } else if (IsNaN(val)){
+ // The JSON spec doesn't allow NaN and Infinity (since these are
+ // objects in EcmaScript). Use strings instead.
+ real = "\"NaN\"";
+ } else if (val < 0) {
+ real = "\"-Infinity\"";
+ } else {
+ real = "\"Infinity\"";
+ }
+ StringAppendF(out, "%s", real.c_str());
+ break;
+ }
+ case TRACE_VALUE_TYPE_POINTER:
+ // JSON only supports double and int numbers.
+ // So as not to lose bits from a 64-bit pointer, output as a hex string.
+ StringAppendF(out, "\"0x%" PRIx64 "\"", static_cast<uint64>(
+ reinterpret_cast<intptr_t>(
+ value.as_pointer)));
+ break;
+ case TRACE_VALUE_TYPE_STRING:
+ case TRACE_VALUE_TYPE_COPY_STRING:
+ EscapeJSONString(value.as_string ? value.as_string : "NULL", true, out);
+ break;
+ default:
+ NOTREACHED() << "Don't know how to print this value";
+ break;
+ }
+}
+
+void TraceEvent::AppendAsJSON(std::string* out) const {
+ int64 time_int64 = timestamp_.ToInternalValue();
+ int process_id = TraceLog::GetInstance()->process_id();
+ // Category group checked at category creation time.
+ DCHECK(!strchr(name_, '"'));
+ StringAppendF(out,
+ "{\"pid\":%i,\"tid\":%i,\"ts\":%" PRId64 ","
+ "\"ph\":\"%c\",\"cat\":\"%s\",\"name\":\"%s\",\"args\":{",
+ process_id,
+ thread_id_,
+ time_int64,
+ phase_,
+ TraceLog::GetCategoryGroupName(category_group_enabled_),
+ name_);
+
+ // Output argument names and values, stop at first NULL argument name.
+ for (int i = 0; i < kTraceMaxNumArgs && arg_names_[i]; ++i) {
+ if (i > 0)
+ *out += ",";
+ *out += "\"";
+ *out += arg_names_[i];
+ *out += "\":";
+
+ if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
+ convertable_values_[i]->AppendAsTraceFormat(out);
+ else
+ AppendValueAsJSON(arg_types_[i], arg_values_[i], out);
+ }
+ *out += "}";
+
+ if (phase_ == TRACE_EVENT_PHASE_COMPLETE) {
+ int64 duration = duration_.ToInternalValue();
+ if (duration != -1)
+ StringAppendF(out, ",\"dur\":%" PRId64, duration);
+ if (!thread_timestamp_.is_null()) {
+ int64 thread_duration = thread_duration_.ToInternalValue();
+ if (thread_duration != -1)
+ StringAppendF(out, ",\"tdur\":%" PRId64, thread_duration);
+ }
+ }
+
+ // Output tts if thread_timestamp is valid.
+ if (!thread_timestamp_.is_null()) {
+ int64 thread_time_int64 = thread_timestamp_.ToInternalValue();
+ StringAppendF(out, ",\"tts\":%" PRId64, thread_time_int64);
+ }
+
+ // If id_ is set, print it out as a hex string so we don't loose any
+ // bits (it might be a 64-bit pointer).
+ if (flags_ & TRACE_EVENT_FLAG_HAS_ID)
+ StringAppendF(out, ",\"id\":\"0x%" PRIx64 "\"", static_cast<uint64>(id_));
+
+ // Instant events also output their scope.
+ if (phase_ == TRACE_EVENT_PHASE_INSTANT) {
+ char scope = '?';
+ switch (flags_ & TRACE_EVENT_FLAG_SCOPE_MASK) {
+ case TRACE_EVENT_SCOPE_GLOBAL:
+ scope = TRACE_EVENT_SCOPE_NAME_GLOBAL;
+ break;
+
+ case TRACE_EVENT_SCOPE_PROCESS:
+ scope = TRACE_EVENT_SCOPE_NAME_PROCESS;
+ break;
+
+ case TRACE_EVENT_SCOPE_THREAD:
+ scope = TRACE_EVENT_SCOPE_NAME_THREAD;
+ break;
+ }
+ StringAppendF(out, ",\"s\":\"%c\"", scope);
+ }
+
+ *out += "}";
+}
+
+void TraceEvent::AppendPrettyPrinted(std::ostringstream* out) const {
+ *out << name_ << "[";
+ *out << TraceLog::GetCategoryGroupName(category_group_enabled_);
+ *out << "]";
+ if (arg_names_[0]) {
+ *out << ", {";
+ for (int i = 0; i < kTraceMaxNumArgs && arg_names_[i]; ++i) {
+ if (i > 0)
+ *out << ", ";
+ *out << arg_names_[i] << ":";
+ std::string value_as_text;
+
+ if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
+ convertable_values_[i]->AppendAsTraceFormat(&value_as_text);
+ else
+ AppendValueAsJSON(arg_types_[i], arg_values_[i], &value_as_text);
+
+ *out << value_as_text;
+ }
+ *out << "}";
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// TraceResultBuffer
+//
+////////////////////////////////////////////////////////////////////////////////
+
+TraceResultBuffer::OutputCallback
+ TraceResultBuffer::SimpleOutput::GetCallback() {
+ return Bind(&SimpleOutput::Append, Unretained(this));
+}
+
+void TraceResultBuffer::SimpleOutput::Append(
+ const std::string& json_trace_output) {
+ json_output += json_trace_output;
+}
+
+TraceResultBuffer::TraceResultBuffer() : append_comma_(false) {
+}
+
+TraceResultBuffer::~TraceResultBuffer() {
+}
+
+void TraceResultBuffer::SetOutputCallback(
+ const OutputCallback& json_chunk_callback) {
+ output_callback_ = json_chunk_callback;
+}
+
+void TraceResultBuffer::Start() {
+ append_comma_ = false;
+ output_callback_.Run("[");
+}
+
+void TraceResultBuffer::AddFragment(const std::string& trace_fragment) {
+ if (append_comma_)
+ output_callback_.Run(",");
+ append_comma_ = true;
+ output_callback_.Run(trace_fragment);
+}
+
+void TraceResultBuffer::Finish() {
+ output_callback_.Run("]");
+}
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// TraceSamplingThread
+//
+////////////////////////////////////////////////////////////////////////////////
+class TraceBucketData;
+typedef base::Callback<void(TraceBucketData*)> TraceSampleCallback;
+
+class TraceBucketData {
+ public:
+ TraceBucketData(base::subtle::AtomicWord* bucket,
+ const char* name,
+ TraceSampleCallback callback);
+ ~TraceBucketData();
+
+ TRACE_EVENT_API_ATOMIC_WORD* bucket;
+ const char* bucket_name;
+ TraceSampleCallback callback;
+};
+
+// This object must be created on the IO thread.
+class TraceSamplingThread : public PlatformThread::Delegate {
+ public:
+ TraceSamplingThread();
+ ~TraceSamplingThread() override;
+
+ // Implementation of PlatformThread::Delegate:
+ void ThreadMain() override;
+
+ static void DefaultSamplingCallback(TraceBucketData* bucekt_data);
+
+ void Stop();
+ void WaitSamplingEventForTesting();
+
+ private:
+ friend class TraceLog;
+
+ void GetSamples();
+ // Not thread-safe. Once the ThreadMain has been called, this can no longer
+ // be called.
+ void RegisterSampleBucket(TRACE_EVENT_API_ATOMIC_WORD* bucket,
+ const char* const name,
+ TraceSampleCallback callback);
+ // Splits a combined "category\0name" into the two component parts.
+ static void ExtractCategoryAndName(const char* combined,
+ const char** category,
+ const char** name);
+ std::vector<TraceBucketData> sample_buckets_;
+ bool thread_running_;
+ CancellationFlag cancellation_flag_;
+ WaitableEvent waitable_event_for_testing_;
+};
+
+
+TraceSamplingThread::TraceSamplingThread()
+ : thread_running_(false),
+ waitable_event_for_testing_(false, false) {
+}
+
+TraceSamplingThread::~TraceSamplingThread() {
+}
+
+void TraceSamplingThread::ThreadMain() {
+ PlatformThread::SetName("Sampling Thread");
+ thread_running_ = true;
+ const int kSamplingFrequencyMicroseconds = 1000;
+ while (!cancellation_flag_.IsSet()) {
+ PlatformThread::Sleep(
+ TimeDelta::FromMicroseconds(kSamplingFrequencyMicroseconds));
+ GetSamples();
+ waitable_event_for_testing_.Signal();
+ }
+}
+
+// static
+void TraceSamplingThread::DefaultSamplingCallback(
+ TraceBucketData* bucket_data) {
+ TRACE_EVENT_API_ATOMIC_WORD category_and_name =
+ TRACE_EVENT_API_ATOMIC_LOAD(*bucket_data->bucket);
+ if (!category_and_name)
+ return;
+ const char* const combined =
+ reinterpret_cast<const char* const>(category_and_name);
+ const char* category_group;
+ const char* name;
+ ExtractCategoryAndName(combined, &category_group, &name);
+ TRACE_EVENT_API_ADD_TRACE_EVENT(TRACE_EVENT_PHASE_SAMPLE,
+ TraceLog::GetCategoryGroupEnabled(category_group),
+ name, 0, 0, NULL, NULL, NULL, NULL, 0);
+}
+
+void TraceSamplingThread::GetSamples() {
+ for (size_t i = 0; i < sample_buckets_.size(); ++i) {
+ TraceBucketData* bucket_data = &sample_buckets_[i];
+ bucket_data->callback.Run(bucket_data);
+ }
+}
+
+void TraceSamplingThread::RegisterSampleBucket(
+ TRACE_EVENT_API_ATOMIC_WORD* bucket,
+ const char* const name,
+ TraceSampleCallback callback) {
+ // Access to sample_buckets_ doesn't cause races with the sampling thread
+ // that uses the sample_buckets_, because it is guaranteed that
+ // RegisterSampleBucket is called before the sampling thread is created.
+ DCHECK(!thread_running_);
+ sample_buckets_.push_back(TraceBucketData(bucket, name, callback));
+}
+
+// static
+void TraceSamplingThread::ExtractCategoryAndName(const char* combined,
+ const char** category,
+ const char** name) {
+ *category = combined;
+ *name = &combined[strlen(combined) + 1];
+}
+
+void TraceSamplingThread::Stop() {
+ cancellation_flag_.Set();
+}
+
+void TraceSamplingThread::WaitSamplingEventForTesting() {
+ waitable_event_for_testing_.Wait();
+}
+
+TraceBucketData::TraceBucketData(base::subtle::AtomicWord* bucket,
+ const char* name,
+ TraceSampleCallback callback)
+ : bucket(bucket),
+ bucket_name(name),
+ callback(callback) {
+}
+
+TraceBucketData::~TraceBucketData() {
+}
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// TraceOptions
+//
+////////////////////////////////////////////////////////////////////////////////
+
+bool TraceOptions::SetFromString(const std::string& options_string) {
+ record_mode = RECORD_UNTIL_FULL;
+ enable_sampling = false;
+ enable_systrace = false;
+
+ std::vector<std::string> split;
+ std::vector<std::string>::iterator iter;
+ base::SplitString(options_string, ',', &split);
+ for (iter = split.begin(); iter != split.end(); ++iter) {
+ if (*iter == kRecordUntilFull) {
+ record_mode = RECORD_UNTIL_FULL;
+ } else if (*iter == kRecordContinuously) {
+ record_mode = RECORD_CONTINUOUSLY;
+ } else if (*iter == kTraceToConsole) {
+ record_mode = ECHO_TO_CONSOLE;
+ } else if (*iter == kRecordAsMuchAsPossible) {
+ record_mode = RECORD_AS_MUCH_AS_POSSIBLE;
+ } else if (*iter == kEnableSampling) {
+ enable_sampling = true;
+ } else if (*iter == kEnableSystrace) {
+ enable_systrace = true;
+ } else {
+ return false;
+ }
+ }
+ return true;
+}
+
+std::string TraceOptions::ToString() const {
+ std::string ret;
+ switch (record_mode) {
+ case RECORD_UNTIL_FULL:
+ ret = kRecordUntilFull;
+ break;
+ case RECORD_CONTINUOUSLY:
+ ret = kRecordContinuously;
+ break;
+ case ECHO_TO_CONSOLE:
+ ret = kTraceToConsole;
+ break;
+ case RECORD_AS_MUCH_AS_POSSIBLE:
+ ret = kRecordAsMuchAsPossible;
+ break;
+ default:
+ NOTREACHED();
+ }
+ if (enable_sampling)
+ ret = ret + "," + kEnableSampling;
+ if (enable_systrace)
+ ret = ret + "," + kEnableSystrace;
+ return ret;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// TraceLog
+//
+////////////////////////////////////////////////////////////////////////////////
+
+class TraceLog::ThreadLocalEventBuffer
+ : public MessageLoop::DestructionObserver {
+ public:
+ ThreadLocalEventBuffer(TraceLog* trace_log);
+ ~ThreadLocalEventBuffer() override;
+
+ TraceEvent* AddTraceEvent(TraceEventHandle* handle);
+
+ void ReportOverhead(const TimeTicks& event_timestamp,
+ const TimeTicks& event_thread_timestamp);
+
+ TraceEvent* GetEventByHandle(TraceEventHandle handle) {
+ if (!chunk_ || handle.chunk_seq != chunk_->seq() ||
+ handle.chunk_index != chunk_index_)
+ return NULL;
+
+ return chunk_->GetEventAt(handle.event_index);
+ }
+
+ int generation() const { return generation_; }
+
+ private:
+ // MessageLoop::DestructionObserver
+ void WillDestroyCurrentMessageLoop() override;
+
+ void FlushWhileLocked();
+
+ void CheckThisIsCurrentBuffer() const {
+ DCHECK(trace_log_->thread_local_event_buffer_.Get() == this);
+ }
+
+ // Since TraceLog is a leaky singleton, trace_log_ will always be valid
+ // as long as the thread exists.
+ TraceLog* trace_log_;
+ scoped_ptr<TraceBufferChunk> chunk_;
+ size_t chunk_index_;
+ int event_count_;
+ TimeDelta overhead_;
+ int generation_;
+
+ DISALLOW_COPY_AND_ASSIGN(ThreadLocalEventBuffer);
+};
+
+TraceLog::ThreadLocalEventBuffer::ThreadLocalEventBuffer(TraceLog* trace_log)
+ : trace_log_(trace_log),
+ chunk_index_(0),
+ event_count_(0),
+ generation_(trace_log->generation()) {
+ // ThreadLocalEventBuffer is created only if the thread has a message loop, so
+ // the following message_loop won't be NULL.
+ MessageLoop* message_loop = MessageLoop::current();
+ message_loop->AddDestructionObserver(this);
+
+ AutoLock lock(trace_log->lock_);
+ trace_log->thread_message_loops_.insert(message_loop);
+}
+
+TraceLog::ThreadLocalEventBuffer::~ThreadLocalEventBuffer() {
+ CheckThisIsCurrentBuffer();
+ MessageLoop::current()->RemoveDestructionObserver(this);
+
+ // Zero event_count_ happens in either of the following cases:
+ // - no event generated for the thread;
+ // - the thread has no message loop;
+ // - trace_event_overhead is disabled.
+ if (event_count_) {
+ InitializeMetadataEvent(AddTraceEvent(NULL),
+ static_cast<int>(base::PlatformThread::CurrentId()),
+ "overhead", "average_overhead",
+ overhead_.InMillisecondsF() / event_count_);
+ }
+
+ {
+ AutoLock lock(trace_log_->lock_);
+ FlushWhileLocked();
+ trace_log_->thread_message_loops_.erase(MessageLoop::current());
+ }
+ trace_log_->thread_local_event_buffer_.Set(NULL);
+}
+
+TraceEvent* TraceLog::ThreadLocalEventBuffer::AddTraceEvent(
+ TraceEventHandle* handle) {
+ CheckThisIsCurrentBuffer();
+
+ if (chunk_ && chunk_->IsFull()) {
+ AutoLock lock(trace_log_->lock_);
+ FlushWhileLocked();
+ chunk_.reset();
+ }
+ if (!chunk_) {
+ AutoLock lock(trace_log_->lock_);
+ chunk_ = trace_log_->logged_events_->GetChunk(&chunk_index_);
+ trace_log_->CheckIfBufferIsFullWhileLocked();
+ }
+ if (!chunk_)
+ return NULL;
+
+ size_t event_index;
+ TraceEvent* trace_event = chunk_->AddTraceEvent(&event_index);
+ if (trace_event && handle)
+ MakeHandle(chunk_->seq(), chunk_index_, event_index, handle);
+
+ return trace_event;
+}
+
+void TraceLog::ThreadLocalEventBuffer::ReportOverhead(
+ const TimeTicks& event_timestamp,
+ const TimeTicks& event_thread_timestamp) {
+ if (!g_category_group_enabled[g_category_trace_event_overhead])
+ return;
+
+ CheckThisIsCurrentBuffer();
+
+ event_count_++;
+ TimeTicks thread_now = ThreadNow();
+ TimeTicks now = trace_log_->OffsetNow();
+ TimeDelta overhead = now - event_timestamp;
+ if (overhead.InMicroseconds() >= kOverheadReportThresholdInMicroseconds) {
+ TraceEvent* trace_event = AddTraceEvent(NULL);
+ if (trace_event) {
+ trace_event->Initialize(
+ static_cast<int>(PlatformThread::CurrentId()),
+ event_timestamp, event_thread_timestamp,
+ TRACE_EVENT_PHASE_COMPLETE,
+ &g_category_group_enabled[g_category_trace_event_overhead],
+ "overhead", 0, 0, NULL, NULL, NULL, NULL, 0);
+ trace_event->UpdateDuration(now, thread_now);
+ }
+ }
+ overhead_ += overhead;
+}
+
+void TraceLog::ThreadLocalEventBuffer::WillDestroyCurrentMessageLoop() {
+ delete this;
+}
+
+void TraceLog::ThreadLocalEventBuffer::FlushWhileLocked() {
+ if (!chunk_)
+ return;
+
+ trace_log_->lock_.AssertAcquired();
+ if (trace_log_->CheckGeneration(generation_)) {
+ // Return the chunk to the buffer only if the generation matches.
+ trace_log_->logged_events_->ReturnChunk(chunk_index_, chunk_.Pass());
+ }
+ // Otherwise this method may be called from the destructor, or TraceLog will
+ // find the generation mismatch and delete this buffer soon.
+}
+
+TraceLogStatus::TraceLogStatus() : event_capacity(0), event_count(0) {
+}
+
+TraceLogStatus::~TraceLogStatus() {
+}
+
+// static
+TraceLog* TraceLog::GetInstance() {
+ return Singleton<TraceLog, LeakySingletonTraits<TraceLog> >::get();
+}
+
+TraceLog::TraceLog()
+ : mode_(DISABLED),
+ num_traces_recorded_(0),
+ event_callback_(0),
+ dispatching_to_observer_list_(false),
+ process_sort_index_(0),
+ process_id_hash_(0),
+ process_id_(0),
+ watch_category_(0),
+ trace_options_(kInternalRecordUntilFull),
+ sampling_thread_handle_(0),
+ category_filter_(CategoryFilter::kDefaultCategoryFilterString),
+ event_callback_category_filter_(
+ CategoryFilter::kDefaultCategoryFilterString),
+ thread_shared_chunk_index_(0),
+ generation_(0) {
+ // Trace is enabled or disabled on one thread while other threads are
+ // accessing the enabled flag. We don't care whether edge-case events are
+ // traced or not, so we allow races on the enabled flag to keep the trace
+ // macros fast.
+ // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots:
+ // ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled,
+ // sizeof(g_category_group_enabled),
+ // "trace_event category enabled");
+ for (int i = 0; i < MAX_CATEGORY_GROUPS; ++i) {
+ ANNOTATE_BENIGN_RACE(&g_category_group_enabled[i],
+ "trace_event category enabled");
+ }
+#if defined(OS_NACL) // NaCl shouldn't expose the process id.
+ SetProcessID(0);
+#else
+ SetProcessID(static_cast<int>(GetCurrentProcId()));
+
+ // NaCl also shouldn't access the command line.
+ if (CommandLine::InitializedForCurrentProcess() &&
+ CommandLine::ForCurrentProcess()->HasSwitch(switches::kTraceToConsole)) {
+ std::string filter = CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
+ switches::kTraceToConsole);
+ if (filter.empty()) {
+ filter = kEchoToConsoleCategoryFilter;
+ } else {
+ filter.append(",");
+ filter.append(kEchoToConsoleCategoryFilter);
+ }
+
+ LOG(ERROR) << "Start " << switches::kTraceToConsole
+ << " with CategoryFilter '" << filter << "'.";
+ SetEnabled(CategoryFilter(filter),
+ RECORDING_MODE,
+ TraceOptions(ECHO_TO_CONSOLE));
+ }
+#endif
+
+ logged_events_.reset(CreateTraceBuffer());
+}
+
+TraceLog::~TraceLog() {
+}
+
+const unsigned char* TraceLog::GetCategoryGroupEnabled(
+ const char* category_group) {
+ TraceLog* tracelog = GetInstance();
+ if (!tracelog) {
+ DCHECK(!g_category_group_enabled[g_category_already_shutdown]);
+ return &g_category_group_enabled[g_category_already_shutdown];
+ }
+ return tracelog->GetCategoryGroupEnabledInternal(category_group);
+}
+
+const char* TraceLog::GetCategoryGroupName(
+ const unsigned char* category_group_enabled) {
+ // Calculate the index of the category group by finding
+ // category_group_enabled in g_category_group_enabled array.
+ uintptr_t category_begin =
+ reinterpret_cast<uintptr_t>(g_category_group_enabled);
+ uintptr_t category_ptr = reinterpret_cast<uintptr_t>(category_group_enabled);
+ DCHECK(category_ptr >= category_begin &&
+ category_ptr < reinterpret_cast<uintptr_t>(
+ g_category_group_enabled + MAX_CATEGORY_GROUPS)) <<
+ "out of bounds category pointer";
+ uintptr_t category_index =
+ (category_ptr - category_begin) / sizeof(g_category_group_enabled[0]);
+ return g_category_groups[category_index];
+}
+
+void TraceLog::UpdateCategoryGroupEnabledFlag(size_t category_index) {
+ unsigned char enabled_flag = 0;
+ const char* category_group = g_category_groups[category_index];
+ if (mode_ == RECORDING_MODE &&
+ category_filter_.IsCategoryGroupEnabled(category_group))
+ enabled_flag |= ENABLED_FOR_RECORDING;
+ else if (mode_ == MONITORING_MODE &&
+ category_filter_.IsCategoryGroupEnabled(category_group))
+ enabled_flag |= ENABLED_FOR_MONITORING;
+ if (event_callback_ &&
+ event_callback_category_filter_.IsCategoryGroupEnabled(category_group))
+ enabled_flag |= ENABLED_FOR_EVENT_CALLBACK;
+ g_category_group_enabled[category_index] = enabled_flag;
+}
+
+void TraceLog::UpdateCategoryGroupEnabledFlags() {
+ size_t category_index = base::subtle::NoBarrier_Load(&g_category_index);
+ for (size_t i = 0; i < category_index; i++)
+ UpdateCategoryGroupEnabledFlag(i);
+}
+
+void TraceLog::UpdateSyntheticDelaysFromCategoryFilter() {
+ ResetTraceEventSyntheticDelays();
+ const CategoryFilter::StringList& delays =
+ category_filter_.GetSyntheticDelayValues();
+ CategoryFilter::StringList::const_iterator ci;
+ for (ci = delays.begin(); ci != delays.end(); ++ci) {
+ StringTokenizer tokens(*ci, ";");
+ if (!tokens.GetNext())
+ continue;
+ TraceEventSyntheticDelay* delay =
+ TraceEventSyntheticDelay::Lookup(tokens.token());
+ while (tokens.GetNext()) {
+ std::string token = tokens.token();
+ char* duration_end;
+ double target_duration = strtod(token.c_str(), &duration_end);
+ if (duration_end != token.c_str()) {
+ delay->SetTargetDuration(TimeDelta::FromMicroseconds(
+ static_cast<int64>(target_duration * 1e6)));
+ } else if (token == "static") {
+ delay->SetMode(TraceEventSyntheticDelay::STATIC);
+ } else if (token == "oneshot") {
+ delay->SetMode(TraceEventSyntheticDelay::ONE_SHOT);
+ } else if (token == "alternating") {
+ delay->SetMode(TraceEventSyntheticDelay::ALTERNATING);
+ }
+ }
+ }
+}
+
+const unsigned char* TraceLog::GetCategoryGroupEnabledInternal(
+ const char* category_group) {
+ DCHECK(!strchr(category_group, '"')) <<
+ "Category groups may not contain double quote";
+ // The g_category_groups is append only, avoid using a lock for the fast path.
+ size_t current_category_index = base::subtle::Acquire_Load(&g_category_index);
+
+ // Search for pre-existing category group.
+ for (size_t i = 0; i < current_category_index; ++i) {
+ if (strcmp(g_category_groups[i], category_group) == 0) {
+ return &g_category_group_enabled[i];
+ }
+ }
+
+ unsigned char* category_group_enabled = NULL;
+ // This is the slow path: the lock is not held in the case above, so more
+ // than one thread could have reached here trying to add the same category.
+ // Only hold to lock when actually appending a new category, and
+ // check the categories groups again.
+ AutoLock lock(lock_);
+ size_t category_index = base::subtle::Acquire_Load(&g_category_index);
+ for (size_t i = 0; i < category_index; ++i) {
+ if (strcmp(g_category_groups[i], category_group) == 0) {
+ return &g_category_group_enabled[i];
+ }
+ }
+
+ // Create a new category group.
+ DCHECK(category_index < MAX_CATEGORY_GROUPS) <<
+ "must increase MAX_CATEGORY_GROUPS";
+ if (category_index < MAX_CATEGORY_GROUPS) {
+ // Don't hold on to the category_group pointer, so that we can create
+ // category groups with strings not known at compile time (this is
+ // required by SetWatchEvent).
+ const char* new_group = strdup(category_group);
+ ANNOTATE_LEAKING_OBJECT_PTR(new_group);
+ g_category_groups[category_index] = new_group;
+ DCHECK(!g_category_group_enabled[category_index]);
+ // Note that if both included and excluded patterns in the
+ // CategoryFilter are empty, we exclude nothing,
+ // thereby enabling this category group.
+ UpdateCategoryGroupEnabledFlag(category_index);
+ category_group_enabled = &g_category_group_enabled[category_index];
+ // Update the max index now.
+ base::subtle::Release_Store(&g_category_index, category_index + 1);
+ } else {
+ category_group_enabled =
+ &g_category_group_enabled[g_category_categories_exhausted];
+ }
+ return category_group_enabled;
+}
+
+void TraceLog::GetKnownCategoryGroups(
+ std::vector<std::string>* category_groups) {
+ AutoLock lock(lock_);
+ category_groups->push_back(
+ g_category_groups[g_category_trace_event_overhead]);
+ size_t category_index = base::subtle::NoBarrier_Load(&g_category_index);
+ for (size_t i = g_num_builtin_categories; i < category_index; i++)
+ category_groups->push_back(g_category_groups[i]);
+}
+
+void TraceLog::SetEnabled(const CategoryFilter& category_filter,
+ Mode mode,
+ const TraceOptions& options) {
+ std::vector<EnabledStateObserver*> observer_list;
+ {
+ AutoLock lock(lock_);
+
+ // Can't enable tracing when Flush() is in progress.
+ DCHECK(!flush_message_loop_proxy_.get());
+
+ InternalTraceOptions new_options =
+ GetInternalOptionsFromTraceOptions(options);
+
+ InternalTraceOptions old_options = trace_options();
+
+ if (IsEnabled()) {
+ if (new_options != old_options) {
+ DLOG(ERROR) << "Attempting to re-enable tracing with a different "
+ << "set of options.";
+ }
+
+ if (mode != mode_) {
+ DLOG(ERROR) << "Attempting to re-enable tracing with a different mode.";
+ }
+
+ category_filter_.Merge(category_filter);
+ UpdateCategoryGroupEnabledFlags();
+ return;
+ }
+
+ if (dispatching_to_observer_list_) {
+ DLOG(ERROR) <<
+ "Cannot manipulate TraceLog::Enabled state from an observer.";
+ return;
+ }
+
+ mode_ = mode;
+
+ if (new_options != old_options) {
+ subtle::NoBarrier_Store(&trace_options_, new_options);
+ UseNextTraceBuffer();
+ }
+
+ num_traces_recorded_++;
+
+ category_filter_ = CategoryFilter(category_filter);
+ UpdateCategoryGroupEnabledFlags();
+ UpdateSyntheticDelaysFromCategoryFilter();
+
+ if (new_options & kInternalEnableSampling) {
+ sampling_thread_.reset(new TraceSamplingThread);
+ sampling_thread_->RegisterSampleBucket(
+ &g_trace_state[0],
+ "bucket0",
+ Bind(&TraceSamplingThread::DefaultSamplingCallback));
+ sampling_thread_->RegisterSampleBucket(
+ &g_trace_state[1],
+ "bucket1",
+ Bind(&TraceSamplingThread::DefaultSamplingCallback));
+ sampling_thread_->RegisterSampleBucket(
+ &g_trace_state[2],
+ "bucket2",
+ Bind(&TraceSamplingThread::DefaultSamplingCallback));
+ if (!PlatformThread::Create(
+ 0, sampling_thread_.get(), &sampling_thread_handle_)) {
+ DCHECK(false) << "failed to create thread";
+ }
+ }
+
+ dispatching_to_observer_list_ = true;
+ observer_list = enabled_state_observer_list_;
+ }
+ // Notify observers outside the lock in case they trigger trace events.
+ for (size_t i = 0; i < observer_list.size(); ++i)
+ observer_list[i]->OnTraceLogEnabled();
+
+ {
+ AutoLock lock(lock_);
+ dispatching_to_observer_list_ = false;
+ }
+}
+
+TraceLog::InternalTraceOptions TraceLog::GetInternalOptionsFromTraceOptions(
+ const TraceOptions& options) {
+ InternalTraceOptions ret =
+ options.enable_sampling ? kInternalEnableSampling : kInternalNone;
+ switch (options.record_mode) {
+ case RECORD_UNTIL_FULL:
+ return ret | kInternalRecordUntilFull;
+ case RECORD_CONTINUOUSLY:
+ return ret | kInternalRecordContinuously;
+ case ECHO_TO_CONSOLE:
+ return ret | kInternalEchoToConsole;
+ case RECORD_AS_MUCH_AS_POSSIBLE:
+ return ret | kInternalRecordAsMuchAsPossible;
+ }
+ NOTREACHED();
+ return kInternalNone;
+}
+
+CategoryFilter TraceLog::GetCurrentCategoryFilter() {
+ AutoLock lock(lock_);
+ return category_filter_;
+}
+
+TraceOptions TraceLog::GetCurrentTraceOptions() const {
+ TraceOptions ret;
+ InternalTraceOptions option = trace_options();
+ ret.enable_sampling = (option & kInternalEnableSampling) != 0;
+ if (option & kInternalRecordUntilFull)
+ ret.record_mode = RECORD_UNTIL_FULL;
+ else if (option & kInternalRecordContinuously)
+ ret.record_mode = RECORD_CONTINUOUSLY;
+ else if (option & kInternalEchoToConsole)
+ ret.record_mode = ECHO_TO_CONSOLE;
+ else if (option & kInternalRecordAsMuchAsPossible)
+ ret.record_mode = RECORD_AS_MUCH_AS_POSSIBLE;
+ else
+ NOTREACHED();
+ return ret;
+}
+
+void TraceLog::SetDisabled() {
+ AutoLock lock(lock_);
+ SetDisabledWhileLocked();
+}
+
+void TraceLog::SetDisabledWhileLocked() {
+ lock_.AssertAcquired();
+
+ if (!IsEnabled())
+ return;
+
+ if (dispatching_to_observer_list_) {
+ DLOG(ERROR)
+ << "Cannot manipulate TraceLog::Enabled state from an observer.";
+ return;
+ }
+
+ mode_ = DISABLED;
+
+ if (sampling_thread_.get()) {
+ // Stop the sampling thread.
+ sampling_thread_->Stop();
+ lock_.Release();
+ PlatformThread::Join(sampling_thread_handle_);
+ lock_.Acquire();
+ sampling_thread_handle_ = PlatformThreadHandle();
+ sampling_thread_.reset();
+ }
+
+ category_filter_.Clear();
+ subtle::NoBarrier_Store(&watch_category_, 0);
+ watch_event_name_ = "";
+ UpdateCategoryGroupEnabledFlags();
+ AddMetadataEventsWhileLocked();
+
+ dispatching_to_observer_list_ = true;
+ std::vector<EnabledStateObserver*> observer_list =
+ enabled_state_observer_list_;
+
+ {
+ // Dispatch to observers outside the lock in case the observer triggers a
+ // trace event.
+ AutoUnlock unlock(lock_);
+ for (size_t i = 0; i < observer_list.size(); ++i)
+ observer_list[i]->OnTraceLogDisabled();
+ }
+ dispatching_to_observer_list_ = false;
+}
+
+int TraceLog::GetNumTracesRecorded() {
+ AutoLock lock(lock_);
+ if (!IsEnabled())
+ return -1;
+ return num_traces_recorded_;
+}
+
+void TraceLog::AddEnabledStateObserver(EnabledStateObserver* listener) {
+ enabled_state_observer_list_.push_back(listener);
+}
+
+void TraceLog::RemoveEnabledStateObserver(EnabledStateObserver* listener) {
+ std::vector<EnabledStateObserver*>::iterator it =
+ std::find(enabled_state_observer_list_.begin(),
+ enabled_state_observer_list_.end(),
+ listener);
+ if (it != enabled_state_observer_list_.end())
+ enabled_state_observer_list_.erase(it);
+}
+
+bool TraceLog::HasEnabledStateObserver(EnabledStateObserver* listener) const {
+ std::vector<EnabledStateObserver*>::const_iterator it =
+ std::find(enabled_state_observer_list_.begin(),
+ enabled_state_observer_list_.end(),
+ listener);
+ return it != enabled_state_observer_list_.end();
+}
+
+TraceLogStatus TraceLog::GetStatus() const {
+ AutoLock lock(lock_);
+ TraceLogStatus result;
+ result.event_capacity = logged_events_->Capacity();
+ result.event_count = logged_events_->Size();
+ return result;
+}
+
+bool TraceLog::BufferIsFull() const {
+ AutoLock lock(lock_);
+ return logged_events_->IsFull();
+}
+
+TraceBuffer* TraceLog::CreateTraceBuffer() {
+ InternalTraceOptions options = trace_options();
+ if (options & kInternalRecordContinuously)
+ return new TraceBufferRingBuffer(kTraceEventRingBufferChunks);
+ else if ((options & kInternalEnableSampling) && mode_ == MONITORING_MODE)
+ return new TraceBufferRingBuffer(kMonitorTraceEventBufferChunks);
+ else if (options & kInternalEchoToConsole)
+ return new TraceBufferRingBuffer(kEchoToConsoleTraceEventBufferChunks);
+ else if (options & kInternalRecordAsMuchAsPossible)
+ return CreateTraceBufferVectorOfSize(kTraceEventVectorBigBufferChunks);
+ return CreateTraceBufferVectorOfSize(kTraceEventVectorBufferChunks);
+}
+
+TraceBuffer* TraceLog::CreateTraceBufferVectorOfSize(size_t max_chunks) {
+ return new TraceBufferVector(max_chunks);
+}
+
+TraceEvent* TraceLog::AddEventToThreadSharedChunkWhileLocked(
+ TraceEventHandle* handle, bool check_buffer_is_full) {
+ lock_.AssertAcquired();
+
+ if (thread_shared_chunk_ && thread_shared_chunk_->IsFull()) {
+ logged_events_->ReturnChunk(thread_shared_chunk_index_,
+ thread_shared_chunk_.Pass());
+ }
+
+ if (!thread_shared_chunk_) {
+ thread_shared_chunk_ = logged_events_->GetChunk(
+ &thread_shared_chunk_index_);
+ if (check_buffer_is_full)
+ CheckIfBufferIsFullWhileLocked();
+ }
+ if (!thread_shared_chunk_)
+ return NULL;
+
+ size_t event_index;
+ TraceEvent* trace_event = thread_shared_chunk_->AddTraceEvent(&event_index);
+ if (trace_event && handle) {
+ MakeHandle(thread_shared_chunk_->seq(), thread_shared_chunk_index_,
+ event_index, handle);
+ }
+ return trace_event;
+}
+
+void TraceLog::CheckIfBufferIsFullWhileLocked() {
+ lock_.AssertAcquired();
+ if (logged_events_->IsFull()) {
+ if (buffer_limit_reached_timestamp_.is_null()) {
+ buffer_limit_reached_timestamp_ = OffsetNow();
+ }
+ SetDisabledWhileLocked();
+ }
+}
+
+void TraceLog::SetEventCallbackEnabled(const CategoryFilter& category_filter,
+ EventCallback cb) {
+ AutoLock lock(lock_);
+ subtle::NoBarrier_Store(&event_callback_,
+ reinterpret_cast<subtle::AtomicWord>(cb));
+ event_callback_category_filter_ = category_filter;
+ UpdateCategoryGroupEnabledFlags();
+};
+
+void TraceLog::SetEventCallbackDisabled() {
+ AutoLock lock(lock_);
+ subtle::NoBarrier_Store(&event_callback_, 0);
+ UpdateCategoryGroupEnabledFlags();
+}
+
+// Flush() works as the following:
+// 1. Flush() is called in threadA whose message loop is saved in
+// flush_message_loop_proxy_;
+// 2. If thread_message_loops_ is not empty, threadA posts task to each message
+// loop to flush the thread local buffers; otherwise finish the flush;
+// 3. FlushCurrentThread() deletes the thread local event buffer:
+// - The last batch of events of the thread are flushed into the main buffer;
+// - The message loop will be removed from thread_message_loops_;
+// If this is the last message loop, finish the flush;
+// 4. If any thread hasn't finish its flush in time, finish the flush.
+void TraceLog::Flush(const TraceLog::OutputCallback& cb) {
+ if (IsEnabled()) {
+ // Can't flush when tracing is enabled because otherwise PostTask would
+ // - generate more trace events;
+ // - deschedule the calling thread on some platforms causing inaccurate
+ // timing of the trace events.
+ scoped_refptr<RefCountedString> empty_result = new RefCountedString;
+ if (!cb.is_null())
+ cb.Run(empty_result, false);
+ LOG(WARNING) << "Ignored TraceLog::Flush called when tracing is enabled";
+ return;
+ }
+
+ int generation = this->generation();
+ // Copy of thread_message_loops_ to be used without locking.
+ std::vector<scoped_refptr<SingleThreadTaskRunner> >
+ thread_message_loop_task_runners;
+ {
+ AutoLock lock(lock_);
+ DCHECK(!flush_message_loop_proxy_.get());
+ flush_message_loop_proxy_ = MessageLoopProxy::current();
+ DCHECK(!thread_message_loops_.size() || flush_message_loop_proxy_.get());
+ flush_output_callback_ = cb;
+
+ if (thread_shared_chunk_) {
+ logged_events_->ReturnChunk(thread_shared_chunk_index_,
+ thread_shared_chunk_.Pass());
+ }
+
+ if (thread_message_loops_.size()) {
+ for (hash_set<MessageLoop*>::const_iterator it =
+ thread_message_loops_.begin();
+ it != thread_message_loops_.end(); ++it) {
+ thread_message_loop_task_runners.push_back((*it)->task_runner());
+ }
+ }
+ }
+
+ if (thread_message_loop_task_runners.size()) {
+ for (size_t i = 0; i < thread_message_loop_task_runners.size(); ++i) {
+ thread_message_loop_task_runners[i]->PostTask(
+ FROM_HERE,
+ Bind(&TraceLog::FlushCurrentThread, Unretained(this), generation));
+ }
+ flush_message_loop_proxy_->PostDelayedTask(
+ FROM_HERE,
+ Bind(&TraceLog::OnFlushTimeout, Unretained(this), generation),
+ TimeDelta::FromMilliseconds(kThreadFlushTimeoutMs));
+ return;
+ }
+
+ FinishFlush(generation);
+}
+
+void TraceLog::ConvertTraceEventsToTraceFormat(
+ scoped_ptr<TraceBuffer> logged_events,
+ const TraceLog::OutputCallback& flush_output_callback) {
+
+ if (flush_output_callback.is_null())
+ return;
+
+ // The callback need to be called at least once even if there is no events
+ // to let the caller know the completion of flush.
+ bool has_more_events = true;
+ do {
+ scoped_refptr<RefCountedString> json_events_str_ptr =
+ new RefCountedString();
+
+ for (size_t i = 0; i < kTraceEventBatchChunks; ++i) {
+ const TraceBufferChunk* chunk = logged_events->NextChunk();
+ if (!chunk) {
+ has_more_events = false;
+ break;
+ }
+ for (size_t j = 0; j < chunk->size(); ++j) {
+ if (i > 0 || j > 0)
+ json_events_str_ptr->data().append(",\n");
+ chunk->GetEventAt(j)->AppendAsJSON(&(json_events_str_ptr->data()));
+ }
+ }
+
+ flush_output_callback.Run(json_events_str_ptr, has_more_events);
+ } while (has_more_events);
+}
+
+void TraceLog::FinishFlush(int generation) {
+ scoped_ptr<TraceBuffer> previous_logged_events;
+ OutputCallback flush_output_callback;
+
+ if (!CheckGeneration(generation))
+ return;
+
+ {
+ AutoLock lock(lock_);
+
+ previous_logged_events.swap(logged_events_);
+ UseNextTraceBuffer();
+ thread_message_loops_.clear();
+
+ flush_message_loop_proxy_ = NULL;
+ flush_output_callback = flush_output_callback_;
+ flush_output_callback_.Reset();
+ }
+
+ ConvertTraceEventsToTraceFormat(previous_logged_events.Pass(),
+ flush_output_callback);
+}
+
+// Run in each thread holding a local event buffer.
+void TraceLog::FlushCurrentThread(int generation) {
+ {
+ AutoLock lock(lock_);
+ if (!CheckGeneration(generation) || !flush_message_loop_proxy_.get()) {
+ // This is late. The corresponding flush has finished.
+ return;
+ }
+ }
+
+ // This will flush the thread local buffer.
+ delete thread_local_event_buffer_.Get();
+
+ AutoLock lock(lock_);
+ if (!CheckGeneration(generation) || !flush_message_loop_proxy_.get() ||
+ thread_message_loops_.size())
+ return;
+
+ flush_message_loop_proxy_->PostTask(
+ FROM_HERE,
+ Bind(&TraceLog::FinishFlush, Unretained(this), generation));
+}
+
+void TraceLog::OnFlushTimeout(int generation) {
+ {
+ AutoLock lock(lock_);
+ if (!CheckGeneration(generation) || !flush_message_loop_proxy_.get()) {
+ // Flush has finished before timeout.
+ return;
+ }
+
+ LOG(WARNING) <<
+ "The following threads haven't finished flush in time. "
+ "If this happens stably for some thread, please call "
+ "TraceLog::GetInstance()->SetCurrentThreadBlocksMessageLoop() from "
+ "the thread to avoid its trace events from being lost.";
+ for (hash_set<MessageLoop*>::const_iterator it =
+ thread_message_loops_.begin();
+ it != thread_message_loops_.end(); ++it) {
+ LOG(WARNING) << "Thread: " << (*it)->thread_name();
+ }
+ }
+ FinishFlush(generation);
+}
+
+void TraceLog::FlushButLeaveBufferIntact(
+ const TraceLog::OutputCallback& flush_output_callback) {
+ scoped_ptr<TraceBuffer> previous_logged_events;
+ {
+ AutoLock lock(lock_);
+ AddMetadataEventsWhileLocked();
+ if (thread_shared_chunk_) {
+ // Return the chunk to the main buffer to flush the sampling data.
+ logged_events_->ReturnChunk(thread_shared_chunk_index_,
+ thread_shared_chunk_.Pass());
+ }
+ previous_logged_events = logged_events_->CloneForIteration().Pass();
+ } // release lock
+
+ ConvertTraceEventsToTraceFormat(previous_logged_events.Pass(),
+ flush_output_callback);
+}
+
+void TraceLog::UseNextTraceBuffer() {
+ logged_events_.reset(CreateTraceBuffer());
+ subtle::NoBarrier_AtomicIncrement(&generation_, 1);
+ thread_shared_chunk_.reset();
+ thread_shared_chunk_index_ = 0;
+}
+
+TraceEventHandle TraceLog::AddTraceEvent(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ unsigned long long id,
+ int num_args,
+ const char** arg_names,
+ const unsigned char* arg_types,
+ const unsigned long long* arg_values,
+ const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
+ unsigned char flags) {
+ int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
+ base::TimeTicks now = base::TimeTicks::NowFromSystemTraceTime();
+ return AddTraceEventWithThreadIdAndTimestamp(phase, category_group_enabled,
+ name, id, thread_id, now,
+ num_args, arg_names,
+ arg_types, arg_values,
+ convertable_values, flags);
+}
+
+TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ unsigned long long id,
+ int thread_id,
+ const TimeTicks& timestamp,
+ int num_args,
+ const char** arg_names,
+ const unsigned char* arg_types,
+ const unsigned long long* arg_values,
+ const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
+ unsigned char flags) {
+ TraceEventHandle handle = { 0, 0, 0 };
+ if (!*category_group_enabled)
+ return handle;
+
+ // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
+ // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
+ // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
+ if (thread_is_in_trace_event_.Get())
+ return handle;
+
+ AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_);
+
+ DCHECK(name);
+
+ if (flags & TRACE_EVENT_FLAG_MANGLE_ID)
+ id ^= process_id_hash_;
+
+ TimeTicks now = OffsetTimestamp(timestamp);
+ TimeTicks thread_now = ThreadNow();
+
+ ThreadLocalEventBuffer* thread_local_event_buffer = NULL;
+ // A ThreadLocalEventBuffer needs the message loop
+ // - to know when the thread exits;
+ // - to handle the final flush.
+ // For a thread without a message loop or the message loop may be blocked, the
+ // trace events will be added into the main buffer directly.
+ if (!thread_blocks_message_loop_.Get() && MessageLoop::current()) {
+ thread_local_event_buffer = thread_local_event_buffer_.Get();
+ if (thread_local_event_buffer &&
+ !CheckGeneration(thread_local_event_buffer->generation())) {
+ delete thread_local_event_buffer;
+ thread_local_event_buffer = NULL;
+ }
+ if (!thread_local_event_buffer) {
+ thread_local_event_buffer = new ThreadLocalEventBuffer(this);
+ thread_local_event_buffer_.Set(thread_local_event_buffer);
+ }
+ }
+
+ // Check and update the current thread name only if the event is for the
+ // current thread to avoid locks in most cases.
+ if (thread_id == static_cast<int>(PlatformThread::CurrentId())) {
+ const char* new_name = ThreadIdNameManager::GetInstance()->
+ GetName(thread_id);
+ // Check if the thread name has been set or changed since the previous
+ // call (if any), but don't bother if the new name is empty. Note this will
+ // not detect a thread name change within the same char* buffer address: we
+ // favor common case performance over corner case correctness.
+ if (new_name != g_current_thread_name.Get().Get() &&
+ new_name && *new_name) {
+ g_current_thread_name.Get().Set(new_name);
+
+ AutoLock thread_info_lock(thread_info_lock_);
+
+ hash_map<int, std::string>::iterator existing_name =
+ thread_names_.find(thread_id);
+ if (existing_name == thread_names_.end()) {
+ // This is a new thread id, and a new name.
+ thread_names_[thread_id] = new_name;
+ } else {
+ // This is a thread id that we've seen before, but potentially with a
+ // new name.
+ std::vector<StringPiece> existing_names;
+ Tokenize(existing_name->second, ",", &existing_names);
+ bool found = std::find(existing_names.begin(),
+ existing_names.end(),
+ new_name) != existing_names.end();
+ if (!found) {
+ if (existing_names.size())
+ existing_name->second.push_back(',');
+ existing_name->second.append(new_name);
+ }
+ }
+ }
+ }
+
+ std::string console_message;
+ if (*category_group_enabled &
+ (ENABLED_FOR_RECORDING | ENABLED_FOR_MONITORING)) {
+ OptionalAutoLock lock(lock_);
+
+ TraceEvent* trace_event = NULL;
+ if (thread_local_event_buffer) {
+ trace_event = thread_local_event_buffer->AddTraceEvent(&handle);
+ } else {
+ lock.EnsureAcquired();
+ trace_event = AddEventToThreadSharedChunkWhileLocked(&handle, true);
+ }
+
+ if (trace_event) {
+ trace_event->Initialize(thread_id, now, thread_now, phase,
+ category_group_enabled, name, id,
+ num_args, arg_names, arg_types, arg_values,
+ convertable_values, flags);
+
+#if defined(OS_ANDROID)
+ trace_event->SendToATrace();
+#endif
+ }
+
+ if (trace_options() & kInternalEchoToConsole) {
+ console_message = EventToConsoleMessage(
+ phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase,
+ timestamp, trace_event);
+ }
+ }
+
+ if (console_message.size())
+ LOG(ERROR) << console_message;
+
+ if (reinterpret_cast<const unsigned char*>(subtle::NoBarrier_Load(
+ &watch_category_)) == category_group_enabled) {
+ bool event_name_matches;
+ WatchEventCallback watch_event_callback_copy;
+ {
+ AutoLock lock(lock_);
+ event_name_matches = watch_event_name_ == name;
+ watch_event_callback_copy = watch_event_callback_;
+ }
+ if (event_name_matches) {
+ if (!watch_event_callback_copy.is_null())
+ watch_event_callback_copy.Run();
+ }
+ }
+
+ if (*category_group_enabled & ENABLED_FOR_EVENT_CALLBACK) {
+ EventCallback event_callback = reinterpret_cast<EventCallback>(
+ subtle::NoBarrier_Load(&event_callback_));
+ if (event_callback) {
+ event_callback(now,
+ phase == TRACE_EVENT_PHASE_COMPLETE ?
+ TRACE_EVENT_PHASE_BEGIN : phase,
+ category_group_enabled, name, id,
+ num_args, arg_names, arg_types, arg_values,
+ flags);
+ }
+ }
+
+ if (thread_local_event_buffer)
+ thread_local_event_buffer->ReportOverhead(now, thread_now);
+
+ return handle;
+}
+
+// May be called when a COMPELETE event ends and the unfinished event has been
+// recycled (phase == TRACE_EVENT_PHASE_END and trace_event == NULL).
+std::string TraceLog::EventToConsoleMessage(unsigned char phase,
+ const TimeTicks& timestamp,
+ TraceEvent* trace_event) {
+ AutoLock thread_info_lock(thread_info_lock_);
+
+ // The caller should translate TRACE_EVENT_PHASE_COMPLETE to
+ // TRACE_EVENT_PHASE_BEGIN or TRACE_EVENT_END.
+ DCHECK(phase != TRACE_EVENT_PHASE_COMPLETE);
+
+ TimeDelta duration;
+ int thread_id = trace_event ?
+ trace_event->thread_id() : PlatformThread::CurrentId();
+ if (phase == TRACE_EVENT_PHASE_END) {
+ duration = timestamp - thread_event_start_times_[thread_id].top();
+ thread_event_start_times_[thread_id].pop();
+ }
+
+ std::string thread_name = thread_names_[thread_id];
+ if (thread_colors_.find(thread_name) == thread_colors_.end())
+ thread_colors_[thread_name] = (thread_colors_.size() % 6) + 1;
+
+ std::ostringstream log;
+ log << base::StringPrintf("%s: \x1b[0;3%dm",
+ thread_name.c_str(),
+ thread_colors_[thread_name]);
+
+ size_t depth = 0;
+ if (thread_event_start_times_.find(thread_id) !=
+ thread_event_start_times_.end())
+ depth = thread_event_start_times_[thread_id].size();
+
+ for (size_t i = 0; i < depth; ++i)
+ log << "| ";
+
+ if (trace_event)
+ trace_event->AppendPrettyPrinted(&log);
+ if (phase == TRACE_EVENT_PHASE_END)
+ log << base::StringPrintf(" (%.3f ms)", duration.InMillisecondsF());
+
+ log << "\x1b[0;m";
+
+ if (phase == TRACE_EVENT_PHASE_BEGIN)
+ thread_event_start_times_[thread_id].push(timestamp);
+
+ return log.str();
+}
+
+void TraceLog::AddTraceEventEtw(char phase,
+ const char* name,
+ const void* id,
+ const char* extra) {
+#if defined(OS_WIN)
+ TraceEventETWProvider::Trace(name, phase, id, extra);
+#endif
+ INTERNAL_TRACE_EVENT_ADD(phase, "ETW Trace Event", name,
+ TRACE_EVENT_FLAG_COPY, "id", id, "extra", extra);
+}
+
+void TraceLog::AddTraceEventEtw(char phase,
+ const char* name,
+ const void* id,
+ const std::string& extra) {
+#if defined(OS_WIN)
+ TraceEventETWProvider::Trace(name, phase, id, extra);
+#endif
+ INTERNAL_TRACE_EVENT_ADD(phase, "ETW Trace Event", name,
+ TRACE_EVENT_FLAG_COPY, "id", id, "extra", extra);
+}
+
+void TraceLog::UpdateTraceEventDuration(
+ const unsigned char* category_group_enabled,
+ const char* name,
+ TraceEventHandle handle) {
+ // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
+ // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
+ // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
+ if (thread_is_in_trace_event_.Get())
+ return;
+
+ AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_);
+
+ TimeTicks thread_now = ThreadNow();
+ TimeTicks now = OffsetNow();
+
+ std::string console_message;
+ if (*category_group_enabled & ENABLED_FOR_RECORDING) {
+ OptionalAutoLock lock(lock_);
+
+ TraceEvent* trace_event = GetEventByHandleInternal(handle, &lock);
+ if (trace_event) {
+ DCHECK(trace_event->phase() == TRACE_EVENT_PHASE_COMPLETE);
+ trace_event->UpdateDuration(now, thread_now);
+#if defined(OS_ANDROID)
+ trace_event->SendToATrace();
+#endif
+ }
+
+ if (trace_options() & kInternalEchoToConsole) {
+ console_message = EventToConsoleMessage(TRACE_EVENT_PHASE_END,
+ now, trace_event);
+ }
+ }
+
+ if (console_message.size())
+ LOG(ERROR) << console_message;
+
+ if (*category_group_enabled & ENABLED_FOR_EVENT_CALLBACK) {
+ EventCallback event_callback = reinterpret_cast<EventCallback>(
+ subtle::NoBarrier_Load(&event_callback_));
+ if (event_callback) {
+ event_callback(now, TRACE_EVENT_PHASE_END, category_group_enabled, name,
+ trace_event_internal::kNoEventId, 0, NULL, NULL, NULL,
+ TRACE_EVENT_FLAG_NONE);
+ }
+ }
+}
+
+void TraceLog::SetWatchEvent(const std::string& category_name,
+ const std::string& event_name,
+ const WatchEventCallback& callback) {
+ const unsigned char* category = GetCategoryGroupEnabled(
+ category_name.c_str());
+ AutoLock lock(lock_);
+ subtle::NoBarrier_Store(&watch_category_,
+ reinterpret_cast<subtle::AtomicWord>(category));
+ watch_event_name_ = event_name;
+ watch_event_callback_ = callback;
+}
+
+void TraceLog::CancelWatchEvent() {
+ AutoLock lock(lock_);
+ subtle::NoBarrier_Store(&watch_category_, 0);
+ watch_event_name_ = "";
+ watch_event_callback_.Reset();
+}
+
+void TraceLog::AddMetadataEventsWhileLocked() {
+ lock_.AssertAcquired();
+
+#if !defined(OS_NACL) // NaCl shouldn't expose the process id.
+ InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
+ 0,
+ "num_cpus", "number",
+ base::SysInfo::NumberOfProcessors());
+#endif
+
+
+ int current_thread_id = static_cast<int>(base::PlatformThread::CurrentId());
+ if (process_sort_index_ != 0) {
+ InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
+ current_thread_id,
+ "process_sort_index", "sort_index",
+ process_sort_index_);
+ }
+
+ if (process_name_.size()) {
+ InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
+ current_thread_id,
+ "process_name", "name",
+ process_name_);
+ }
+
+ if (process_labels_.size() > 0) {
+ std::vector<std::string> labels;
+ for(base::hash_map<int, std::string>::iterator it = process_labels_.begin();
+ it != process_labels_.end();
+ it++) {
+ labels.push_back(it->second);
+ }
+ InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
+ current_thread_id,
+ "process_labels", "labels",
+ JoinString(labels, ','));
+ }
+
+ // Thread sort indices.
+ for(hash_map<int, int>::iterator it = thread_sort_indices_.begin();
+ it != thread_sort_indices_.end();
+ it++) {
+ if (it->second == 0)
+ continue;
+ InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
+ it->first,
+ "thread_sort_index", "sort_index",
+ it->second);
+ }
+
+ // Thread names.
+ AutoLock thread_info_lock(thread_info_lock_);
+ for(hash_map<int, std::string>::iterator it = thread_names_.begin();
+ it != thread_names_.end();
+ it++) {
+ if (it->second.empty())
+ continue;
+ InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
+ it->first,
+ "thread_name", "name",
+ it->second);
+ }
+
+ // If buffer is full, add a metadata record to report this.
+ if (!buffer_limit_reached_timestamp_.is_null()) {
+ InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
+ current_thread_id,
+ "trace_buffer_overflowed",
+ "overflowed_at_ts",
+ buffer_limit_reached_timestamp_);
+ }
+}
+
+void TraceLog::WaitSamplingEventForTesting() {
+ if (!sampling_thread_)
+ return;
+ sampling_thread_->WaitSamplingEventForTesting();
+}
+
+void TraceLog::DeleteForTesting() {
+ DeleteTraceLogForTesting::Delete();
+}
+
+TraceEvent* TraceLog::GetEventByHandle(TraceEventHandle handle) {
+ return GetEventByHandleInternal(handle, NULL);
+}
+
+TraceEvent* TraceLog::GetEventByHandleInternal(TraceEventHandle handle,
+ OptionalAutoLock* lock) {
+ if (!handle.chunk_seq)
+ return NULL;
+
+ if (thread_local_event_buffer_.Get()) {
+ TraceEvent* trace_event =
+ thread_local_event_buffer_.Get()->GetEventByHandle(handle);
+ if (trace_event)
+ return trace_event;
+ }
+
+ // The event has been out-of-control of the thread local buffer.
+ // Try to get the event from the main buffer with a lock.
+ if (lock)
+ lock->EnsureAcquired();
+
+ if (thread_shared_chunk_ &&
+ handle.chunk_index == thread_shared_chunk_index_) {
+ return handle.chunk_seq == thread_shared_chunk_->seq() ?
+ thread_shared_chunk_->GetEventAt(handle.event_index) : NULL;
+ }
+
+ return logged_events_->GetEventByHandle(handle);
+}
+
+void TraceLog::SetProcessID(int process_id) {
+ process_id_ = process_id;
+ // Create a FNV hash from the process ID for XORing.
+ // See http://isthe.com/chongo/tech/comp/fnv/ for algorithm details.
+ unsigned long long offset_basis = 14695981039346656037ull;
+ unsigned long long fnv_prime = 1099511628211ull;
+ unsigned long long pid = static_cast<unsigned long long>(process_id_);
+ process_id_hash_ = (offset_basis ^ pid) * fnv_prime;
+}
+
+void TraceLog::SetProcessSortIndex(int sort_index) {
+ AutoLock lock(lock_);
+ process_sort_index_ = sort_index;
+}
+
+void TraceLog::SetProcessName(const std::string& process_name) {
+ AutoLock lock(lock_);
+ process_name_ = process_name;
+}
+
+void TraceLog::UpdateProcessLabel(
+ int label_id, const std::string& current_label) {
+ if(!current_label.length())
+ return RemoveProcessLabel(label_id);
+
+ AutoLock lock(lock_);
+ process_labels_[label_id] = current_label;
+}
+
+void TraceLog::RemoveProcessLabel(int label_id) {
+ AutoLock lock(lock_);
+ base::hash_map<int, std::string>::iterator it = process_labels_.find(
+ label_id);
+ if (it == process_labels_.end())
+ return;
+
+ process_labels_.erase(it);
+}
+
+void TraceLog::SetThreadSortIndex(PlatformThreadId thread_id, int sort_index) {
+ AutoLock lock(lock_);
+ thread_sort_indices_[static_cast<int>(thread_id)] = sort_index;
+}
+
+void TraceLog::SetTimeOffset(TimeDelta offset) {
+ time_offset_ = offset;
+}
+
+size_t TraceLog::GetObserverCountForTest() const {
+ return enabled_state_observer_list_.size();
+}
+
+void TraceLog::SetCurrentThreadBlocksMessageLoop() {
+ thread_blocks_message_loop_.Set(true);
+ if (thread_local_event_buffer_.Get()) {
+ // This will flush the thread local buffer.
+ delete thread_local_event_buffer_.Get();
+ }
+}
+
+bool CategoryFilter::IsEmptyOrContainsLeadingOrTrailingWhitespace(
+ const std::string& str) {
+ return str.empty() ||
+ str.at(0) == ' ' ||
+ str.at(str.length() - 1) == ' ';
+}
+
+CategoryFilter::CategoryFilter(const std::string& filter_string) {
+ if (!filter_string.empty())
+ Initialize(filter_string);
+ else
+ Initialize(CategoryFilter::kDefaultCategoryFilterString);
+}
+
+CategoryFilter::CategoryFilter() {
+ Initialize(CategoryFilter::kDefaultCategoryFilterString);
+}
+
+CategoryFilter::CategoryFilter(const CategoryFilter& cf)
+ : included_(cf.included_),
+ disabled_(cf.disabled_),
+ excluded_(cf.excluded_),
+ delays_(cf.delays_) {
+}
+
+CategoryFilter::~CategoryFilter() {
+}
+
+CategoryFilter& CategoryFilter::operator=(const CategoryFilter& rhs) {
+ if (this == &rhs)
+ return *this;
+
+ included_ = rhs.included_;
+ disabled_ = rhs.disabled_;
+ excluded_ = rhs.excluded_;
+ delays_ = rhs.delays_;
+ return *this;
+}
+
+void CategoryFilter::Initialize(const std::string& filter_string) {
+ // Tokenize list of categories, delimited by ','.
+ StringTokenizer tokens(filter_string, ",");
+ // Add each token to the appropriate list (included_,excluded_).
+ while (tokens.GetNext()) {
+ std::string category = tokens.token();
+ // Ignore empty categories.
+ if (category.empty())
+ continue;
+ // Synthetic delays are of the form 'DELAY(delay;option;option;...)'.
+ if (category.find(kSyntheticDelayCategoryFilterPrefix) == 0 &&
+ category.at(category.size() - 1) == ')') {
+ category = category.substr(
+ strlen(kSyntheticDelayCategoryFilterPrefix),
+ category.size() - strlen(kSyntheticDelayCategoryFilterPrefix) - 1);
+ size_t name_length = category.find(';');
+ if (name_length != std::string::npos && name_length > 0 &&
+ name_length != category.size() - 1) {
+ delays_.push_back(category);
+ }
+ } else if (category.at(0) == '-') {
+ // Excluded categories start with '-'.
+ // Remove '-' from category string.
+ category = category.substr(1);
+ excluded_.push_back(category);
+ } else if (category.compare(0, strlen(TRACE_DISABLED_BY_DEFAULT("")),
+ TRACE_DISABLED_BY_DEFAULT("")) == 0) {
+ disabled_.push_back(category);
+ } else {
+ included_.push_back(category);
+ }
+ }
+}
+
+void CategoryFilter::WriteString(const StringList& values,
+ std::string* out,
+ bool included) const {
+ bool prepend_comma = !out->empty();
+ int token_cnt = 0;
+ for (StringList::const_iterator ci = values.begin();
+ ci != values.end(); ++ci) {
+ if (token_cnt > 0 || prepend_comma)
+ StringAppendF(out, ",");
+ StringAppendF(out, "%s%s", (included ? "" : "-"), ci->c_str());
+ ++token_cnt;
+ }
+}
+
+void CategoryFilter::WriteString(const StringList& delays,
+ std::string* out) const {
+ bool prepend_comma = !out->empty();
+ int token_cnt = 0;
+ for (StringList::const_iterator ci = delays.begin();
+ ci != delays.end(); ++ci) {
+ if (token_cnt > 0 || prepend_comma)
+ StringAppendF(out, ",");
+ StringAppendF(out, "%s%s)", kSyntheticDelayCategoryFilterPrefix,
+ ci->c_str());
+ ++token_cnt;
+ }
+}
+
+std::string CategoryFilter::ToString() const {
+ std::string filter_string;
+ WriteString(included_, &filter_string, true);
+ WriteString(disabled_, &filter_string, true);
+ WriteString(excluded_, &filter_string, false);
+ WriteString(delays_, &filter_string);
+ return filter_string;
+}
+
+bool CategoryFilter::IsCategoryGroupEnabled(
+ const char* category_group_name) const {
+ // TraceLog should call this method only as part of enabling/disabling
+ // categories.
+
+ bool had_enabled_by_default = false;
+ DCHECK(category_group_name);
+ CStringTokenizer category_group_tokens(
+ category_group_name, category_group_name + strlen(category_group_name),
+ ",");
+ while (category_group_tokens.GetNext()) {
+ std::string category_group_token = category_group_tokens.token();
+ // Don't allow empty tokens, nor tokens with leading or trailing space.
+ DCHECK(!CategoryFilter::IsEmptyOrContainsLeadingOrTrailingWhitespace(
+ category_group_token))
+ << "Disallowed category string";
+ if (IsCategoryEnabled(category_group_token.c_str())) {
+ return true;
+ }
+ if (!MatchPattern(category_group_token.c_str(),
+ TRACE_DISABLED_BY_DEFAULT("*")))
+ had_enabled_by_default = true;
+ }
+ // Do a second pass to check for explicitly disabled categories
+ // (those explicitly enabled have priority due to first pass).
+ category_group_tokens.Reset();
+ while (category_group_tokens.GetNext()) {
+ std::string category_group_token = category_group_tokens.token();
+ for (StringList::const_iterator ci = excluded_.begin();
+ ci != excluded_.end(); ++ci) {
+ if (MatchPattern(category_group_token.c_str(), ci->c_str()))
+ return false;
+ }
+ }
+ // If the category group is not excluded, and there are no included patterns
+ // we consider this category group enabled, as long as it had categories
+ // other than disabled-by-default.
+ return included_.empty() && had_enabled_by_default;
+}
+
+bool CategoryFilter::IsCategoryEnabled(const char* category_name) const {
+ StringList::const_iterator ci;
+
+ // Check the disabled- filters and the disabled-* wildcard first so that a
+ // "*" filter does not include the disabled.
+ for (ci = disabled_.begin(); ci != disabled_.end(); ++ci) {
+ if (MatchPattern(category_name, ci->c_str()))
+ return true;
+ }
+
+ if (MatchPattern(category_name, TRACE_DISABLED_BY_DEFAULT("*")))
+ return false;
+
+ for (ci = included_.begin(); ci != included_.end(); ++ci) {
+ if (MatchPattern(category_name, ci->c_str()))
+ return true;
+ }
+
+ return false;
+}
+
+bool CategoryFilter::HasIncludedPatterns() const {
+ return !included_.empty();
+}
+
+void CategoryFilter::Merge(const CategoryFilter& nested_filter) {
+ // Keep included patterns only if both filters have an included entry.
+ // Otherwise, one of the filter was specifying "*" and we want to honour the
+ // broadest filter.
+ if (HasIncludedPatterns() && nested_filter.HasIncludedPatterns()) {
+ included_.insert(included_.end(),
+ nested_filter.included_.begin(),
+ nested_filter.included_.end());
+ } else {
+ included_.clear();
+ }
+
+ disabled_.insert(disabled_.end(),
+ nested_filter.disabled_.begin(),
+ nested_filter.disabled_.end());
+ excluded_.insert(excluded_.end(),
+ nested_filter.excluded_.begin(),
+ nested_filter.excluded_.end());
+ delays_.insert(delays_.end(),
+ nested_filter.delays_.begin(),
+ nested_filter.delays_.end());
+}
+
+void CategoryFilter::Clear() {
+ included_.clear();
+ disabled_.clear();
+ excluded_.clear();
+}
+
+const CategoryFilter::StringList&
+ CategoryFilter::GetSyntheticDelayValues() const {
+ return delays_;
+}
+
+} // namespace debug
+} // namespace base
+
+namespace trace_event_internal {
+
+ScopedTraceBinaryEfficient::ScopedTraceBinaryEfficient(
+ const char* category_group, const char* name) {
+ // The single atom works because for now the category_group can only be "gpu".
+ DCHECK(strcmp(category_group, "gpu") == 0);
+ static TRACE_EVENT_API_ATOMIC_WORD atomic = 0;
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES(
+ category_group, atomic, category_group_enabled_);
+ name_ = name;
+ if (*category_group_enabled_) {
+ event_handle_ =
+ TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
+ TRACE_EVENT_PHASE_COMPLETE, category_group_enabled_, name,
+ trace_event_internal::kNoEventId,
+ static_cast<int>(base::PlatformThread::CurrentId()),
+ base::TimeTicks::NowFromSystemTraceTime(),
+ 0, NULL, NULL, NULL, NULL, TRACE_EVENT_FLAG_NONE);
+ }
+}
+
+ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() {
+ if (*category_group_enabled_) {
+ TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_,
+ name_, event_handle_);
+ }
+}
+
+} // namespace trace_event_internal
diff --git a/base/trace_event/trace_event_impl.h b/base/trace_event/trace_event_impl.h
new file mode 100644
index 0000000..6d04c76
--- /dev/null
+++ b/base/trace_event/trace_event_impl.h
@@ -0,0 +1,813 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+#ifndef BASE_TRACE_EVENT_TRACE_EVENT_IMPL_H_
+#define BASE_TRACE_EVENT_TRACE_EVENT_IMPL_H_
+
+#include <stack>
+#include <string>
+#include <vector>
+
+#include "base/atomicops.h"
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/containers/hash_tables.h"
+#include "base/gtest_prod_util.h"
+#include "base/memory/ref_counted_memory.h"
+#include "base/memory/scoped_vector.h"
+#include "base/observer_list.h"
+#include "base/strings/string_util.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_local.h"
+
+// Older style trace macros with explicit id and extra data
+// Only these macros result in publishing data to ETW as currently implemented.
+#define TRACE_EVENT_BEGIN_ETW(name, id, extra) \
+ base::debug::TraceLog::AddTraceEventEtw( \
+ TRACE_EVENT_PHASE_BEGIN, \
+ name, reinterpret_cast<const void*>(id), extra)
+
+#define TRACE_EVENT_END_ETW(name, id, extra) \
+ base::debug::TraceLog::AddTraceEventEtw( \
+ TRACE_EVENT_PHASE_END, \
+ name, reinterpret_cast<const void*>(id), extra)
+
+#define TRACE_EVENT_INSTANT_ETW(name, id, extra) \
+ base::debug::TraceLog::AddTraceEventEtw( \
+ TRACE_EVENT_PHASE_INSTANT, \
+ name, reinterpret_cast<const void*>(id), extra)
+
+template <typename Type>
+struct DefaultSingletonTraits;
+
+namespace base {
+
+class WaitableEvent;
+class MessageLoop;
+
+namespace debug {
+
+// For any argument of type TRACE_VALUE_TYPE_CONVERTABLE the provided
+// class must implement this interface.
+class BASE_EXPORT ConvertableToTraceFormat
+ : public RefCounted<ConvertableToTraceFormat> {
+ public:
+ // Append the class info to the provided |out| string. The appended
+ // data must be a valid JSON object. Strings must be properly quoted, and
+ // escaped. There is no processing applied to the content after it is
+ // appended.
+ virtual void AppendAsTraceFormat(std::string* out) const = 0;
+
+ std::string ToString() const {
+ std::string result;
+ AppendAsTraceFormat(&result);
+ return result;
+ }
+
+ protected:
+ virtual ~ConvertableToTraceFormat() {}
+
+ private:
+ friend class RefCounted<ConvertableToTraceFormat>;
+};
+
+struct TraceEventHandle {
+ uint32 chunk_seq;
+ uint16 chunk_index;
+ uint16 event_index;
+};
+
+const int kTraceMaxNumArgs = 2;
+
+class BASE_EXPORT TraceEvent {
+ public:
+ union TraceValue {
+ bool as_bool;
+ unsigned long long as_uint;
+ long long as_int;
+ double as_double;
+ const void* as_pointer;
+ const char* as_string;
+ };
+
+ TraceEvent();
+ ~TraceEvent();
+
+ // We don't need to copy TraceEvent except when TraceEventBuffer is cloned.
+ // Use explicit copy method to avoid accidentally misuse of copy.
+ void CopyFrom(const TraceEvent& other);
+
+ void Initialize(
+ int thread_id,
+ TimeTicks timestamp,
+ TimeTicks thread_timestamp,
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ unsigned long long id,
+ int num_args,
+ const char** arg_names,
+ const unsigned char* arg_types,
+ const unsigned long long* arg_values,
+ const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
+ unsigned char flags);
+
+ void Reset();
+
+ void UpdateDuration(const TimeTicks& now, const TimeTicks& thread_now);
+
+ // Serialize event data to JSON
+ void AppendAsJSON(std::string* out) const;
+ void AppendPrettyPrinted(std::ostringstream* out) const;
+
+ static void AppendValueAsJSON(unsigned char type,
+ TraceValue value,
+ std::string* out);
+
+ TimeTicks timestamp() const { return timestamp_; }
+ TimeTicks thread_timestamp() const { return thread_timestamp_; }
+ char phase() const { return phase_; }
+ int thread_id() const { return thread_id_; }
+ TimeDelta duration() const { return duration_; }
+ TimeDelta thread_duration() const { return thread_duration_; }
+ unsigned long long id() const { return id_; }
+ unsigned char flags() const { return flags_; }
+
+ // Exposed for unittesting:
+
+ const base::RefCountedString* parameter_copy_storage() const {
+ return parameter_copy_storage_.get();
+ }
+
+ const unsigned char* category_group_enabled() const {
+ return category_group_enabled_;
+ }
+
+ const char* name() const { return name_; }
+
+#if defined(OS_ANDROID)
+ void SendToATrace();
+#endif
+
+ private:
+ // Note: these are ordered by size (largest first) for optimal packing.
+ TimeTicks timestamp_;
+ TimeTicks thread_timestamp_;
+ TimeDelta duration_;
+ TimeDelta thread_duration_;
+ // id_ can be used to store phase-specific data.
+ unsigned long long id_;
+ TraceValue arg_values_[kTraceMaxNumArgs];
+ const char* arg_names_[kTraceMaxNumArgs];
+ scoped_refptr<ConvertableToTraceFormat> convertable_values_[kTraceMaxNumArgs];
+ const unsigned char* category_group_enabled_;
+ const char* name_;
+ scoped_refptr<base::RefCountedString> parameter_copy_storage_;
+ int thread_id_;
+ char phase_;
+ unsigned char flags_;
+ unsigned char arg_types_[kTraceMaxNumArgs];
+
+ DISALLOW_COPY_AND_ASSIGN(TraceEvent);
+};
+
+// TraceBufferChunk is the basic unit of TraceBuffer.
+class BASE_EXPORT TraceBufferChunk {
+ public:
+ explicit TraceBufferChunk(uint32 seq)
+ : next_free_(0),
+ seq_(seq) {
+ }
+
+ void Reset(uint32 new_seq);
+ TraceEvent* AddTraceEvent(size_t* event_index);
+ bool IsFull() const { return next_free_ == kTraceBufferChunkSize; }
+
+ uint32 seq() const { return seq_; }
+ size_t capacity() const { return kTraceBufferChunkSize; }
+ size_t size() const { return next_free_; }
+
+ TraceEvent* GetEventAt(size_t index) {
+ DCHECK(index < size());
+ return &chunk_[index];
+ }
+ const TraceEvent* GetEventAt(size_t index) const {
+ DCHECK(index < size());
+ return &chunk_[index];
+ }
+
+ scoped_ptr<TraceBufferChunk> Clone() const;
+
+ static const size_t kTraceBufferChunkSize = 64;
+
+ private:
+ size_t next_free_;
+ TraceEvent chunk_[kTraceBufferChunkSize];
+ uint32 seq_;
+};
+
+// TraceBuffer holds the events as they are collected.
+class BASE_EXPORT TraceBuffer {
+ public:
+ virtual ~TraceBuffer() {}
+
+ virtual scoped_ptr<TraceBufferChunk> GetChunk(size_t *index) = 0;
+ virtual void ReturnChunk(size_t index,
+ scoped_ptr<TraceBufferChunk> chunk) = 0;
+
+ virtual bool IsFull() const = 0;
+ virtual size_t Size() const = 0;
+ virtual size_t Capacity() const = 0;
+ virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) = 0;
+
+ // For iteration. Each TraceBuffer can only be iterated once.
+ virtual const TraceBufferChunk* NextChunk() = 0;
+
+ virtual scoped_ptr<TraceBuffer> CloneForIteration() const = 0;
+};
+
+// TraceResultBuffer collects and converts trace fragments returned by TraceLog
+// to JSON output.
+class BASE_EXPORT TraceResultBuffer {
+ public:
+ typedef base::Callback<void(const std::string&)> OutputCallback;
+
+ // If you don't need to stream JSON chunks out efficiently, and just want to
+ // get a complete JSON string after calling Finish, use this struct to collect
+ // JSON trace output.
+ struct BASE_EXPORT SimpleOutput {
+ OutputCallback GetCallback();
+ void Append(const std::string& json_string);
+
+ // Do what you want with the json_output_ string after calling
+ // TraceResultBuffer::Finish.
+ std::string json_output;
+ };
+
+ TraceResultBuffer();
+ ~TraceResultBuffer();
+
+ // Set callback. The callback will be called during Start with the initial
+ // JSON output and during AddFragment and Finish with following JSON output
+ // chunks. The callback target must live past the last calls to
+ // TraceResultBuffer::Start/AddFragment/Finish.
+ void SetOutputCallback(const OutputCallback& json_chunk_callback);
+
+ // Start JSON output. This resets all internal state, so you can reuse
+ // the TraceResultBuffer by calling Start.
+ void Start();
+
+ // Call AddFragment 0 or more times to add trace fragments from TraceLog.
+ void AddFragment(const std::string& trace_fragment);
+
+ // When all fragments have been added, call Finish to complete the JSON
+ // formatted output.
+ void Finish();
+
+ private:
+ OutputCallback output_callback_;
+ bool append_comma_;
+};
+
+class BASE_EXPORT CategoryFilter {
+ public:
+ typedef std::vector<std::string> StringList;
+
+ // The default category filter, used when none is provided.
+ // Allows all categories through, except if they end in the suffix 'Debug' or
+ // 'Test'.
+ static const char kDefaultCategoryFilterString[];
+
+ // |filter_string| is a comma-delimited list of category wildcards.
+ // A category can have an optional '-' prefix to make it an excluded category.
+ // All the same rules apply above, so for example, having both included and
+ // excluded categories in the same list would not be supported.
+ //
+ // Example: CategoryFilter"test_MyTest*");
+ // Example: CategoryFilter("test_MyTest*,test_OtherStuff");
+ // Example: CategoryFilter("-excluded_category1,-excluded_category2");
+ // Example: CategoryFilter("-*,webkit"); would disable everything but webkit.
+ // Example: CategoryFilter("-webkit"); would enable everything but webkit.
+ //
+ // Category filters can also be used to configure synthetic delays.
+ //
+ // Example: CategoryFilter("DELAY(gpu.PresentingFrame;16)"); would make swap
+ // buffers always take at least 16 ms.
+ // Example: CategoryFilter("DELAY(gpu.PresentingFrame;16;oneshot)"); would
+ // make swap buffers take at least 16 ms the first time it is
+ // called.
+ // Example: CategoryFilter("DELAY(gpu.PresentingFrame;16;alternating)");
+ // would make swap buffers take at least 16 ms every other time it
+ // is called.
+ explicit CategoryFilter(const std::string& filter_string);
+
+ CategoryFilter();
+
+ CategoryFilter(const CategoryFilter& cf);
+
+ ~CategoryFilter();
+
+ CategoryFilter& operator=(const CategoryFilter& rhs);
+
+ // Writes the string representation of the CategoryFilter. This is a comma
+ // separated string, similar in nature to the one used to determine
+ // enabled/disabled category patterns, except here there is an arbitrary
+ // order, included categories go first, then excluded categories. Excluded
+ // categories are distinguished from included categories by the prefix '-'.
+ std::string ToString() const;
+
+ // Returns true if at least one category in the list is enabled by this
+ // category filter.
+ bool IsCategoryGroupEnabled(const char* category_group) const;
+
+ // Return a list of the synthetic delays specified in this category filter.
+ const StringList& GetSyntheticDelayValues() const;
+
+ // Merges nested_filter with the current CategoryFilter
+ void Merge(const CategoryFilter& nested_filter);
+
+ // Clears both included/excluded pattern lists. This would be equivalent to
+ // creating a CategoryFilter with an empty string, through the constructor.
+ // i.e: CategoryFilter().
+ //
+ // When using an empty filter, all categories are considered included as we
+ // are not excluding anything.
+ void Clear();
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture, CategoryFilter);
+
+ // Returns true if category is enable according to this filter.
+ bool IsCategoryEnabled(const char* category_name) const;
+
+ static bool IsEmptyOrContainsLeadingOrTrailingWhitespace(
+ const std::string& str);
+
+ void Initialize(const std::string& filter_string);
+ void WriteString(const StringList& values,
+ std::string* out,
+ bool included) const;
+ void WriteString(const StringList& delays, std::string* out) const;
+ bool HasIncludedPatterns() const;
+
+ StringList included_;
+ StringList disabled_;
+ StringList excluded_;
+ StringList delays_;
+};
+
+class TraceSamplingThread;
+
+// Options determines how the trace buffer stores data.
+enum TraceRecordMode {
+ // Record until the trace buffer is full.
+ RECORD_UNTIL_FULL,
+
+ // Record until the user ends the trace. The trace buffer is a fixed size
+ // and we use it as a ring buffer during recording.
+ RECORD_CONTINUOUSLY,
+
+ // Echo to console. Events are discarded.
+ ECHO_TO_CONSOLE,
+
+ // Record until the trace buffer is full, but with a huge buffer size.
+ RECORD_AS_MUCH_AS_POSSIBLE
+};
+
+struct BASE_EXPORT TraceOptions {
+ TraceOptions()
+ : record_mode(RECORD_UNTIL_FULL),
+ enable_sampling(false),
+ enable_systrace(false) {}
+
+ explicit TraceOptions(TraceRecordMode record_mode)
+ : record_mode(record_mode),
+ enable_sampling(false),
+ enable_systrace(false) {}
+
+ // |options_string| is a comma-delimited list of trace options.
+ // Possible options are: "record-until-full", "record-continuously",
+ // "trace-to-console", "enable-sampling" and "enable-systrace".
+ // The first 3 options are trace recoding modes and hence
+ // mutually exclusive. If more than one trace recording modes appear in the
+ // options_string, the last one takes precedence. If none of the trace
+ // recording mode is specified, recording mode is RECORD_UNTIL_FULL.
+ //
+ // The trace option will first be reset to the default option
+ // (record_mode set to RECORD_UNTIL_FULL, enable_sampling and enable_systrace
+ // set to false) before options parsed from |options_string| are applied on
+ // it.
+ // If |options_string| is invalid, the final state of trace_options is
+ // undefined.
+ //
+ // Example: trace_options.SetFromString("record-until-full")
+ // Example: trace_options.SetFromString(
+ // "record-continuously, enable-sampling")
+ // Example: trace_options.SetFromString("record-until-full, trace-to-console")
+ // will set ECHO_TO_CONSOLE as the recording mode.
+ //
+ // Returns true on success.
+ bool SetFromString(const std::string& options_string);
+
+ std::string ToString() const;
+
+ TraceRecordMode record_mode;
+ bool enable_sampling;
+ bool enable_systrace;
+};
+
+struct BASE_EXPORT TraceLogStatus {
+ TraceLogStatus();
+ ~TraceLogStatus();
+ size_t event_capacity;
+ size_t event_count;
+};
+
+class BASE_EXPORT TraceLog {
+ public:
+ enum Mode {
+ DISABLED = 0,
+ RECORDING_MODE,
+ MONITORING_MODE,
+ };
+
+ // The pointer returned from GetCategoryGroupEnabledInternal() points to a
+ // value with zero or more of the following bits. Used in this class only.
+ // The TRACE_EVENT macros should only use the value as a bool.
+ // These values must be in sync with macro values in TraceEvent.h in Blink.
+ enum CategoryGroupEnabledFlags {
+ // Category group enabled for the recording mode.
+ ENABLED_FOR_RECORDING = 1 << 0,
+ // Category group enabled for the monitoring mode.
+ ENABLED_FOR_MONITORING = 1 << 1,
+ // Category group enabled by SetEventCallbackEnabled().
+ ENABLED_FOR_EVENT_CALLBACK = 1 << 2,
+ };
+
+ static TraceLog* GetInstance();
+
+ // Get set of known category groups. This can change as new code paths are
+ // reached. The known category groups are inserted into |category_groups|.
+ void GetKnownCategoryGroups(std::vector<std::string>* category_groups);
+
+ // Retrieves a copy (for thread-safety) of the current CategoryFilter.
+ CategoryFilter GetCurrentCategoryFilter();
+
+ // Retrieves a copy (for thread-safety) of the current TraceOptions.
+ TraceOptions GetCurrentTraceOptions() const;
+
+ // Enables normal tracing (recording trace events in the trace buffer).
+ // See CategoryFilter comments for details on how to control what categories
+ // will be traced. If tracing has already been enabled, |category_filter| will
+ // be merged into the current category filter.
+ void SetEnabled(const CategoryFilter& category_filter,
+ Mode mode, const TraceOptions& options);
+
+ // Disables normal tracing for all categories.
+ void SetDisabled();
+
+ bool IsEnabled() { return mode_ != DISABLED; }
+
+ // The number of times we have begun recording traces. If tracing is off,
+ // returns -1. If tracing is on, then it returns the number of times we have
+ // recorded a trace. By watching for this number to increment, you can
+ // passively discover when a new trace has begun. This is then used to
+ // implement the TRACE_EVENT_IS_NEW_TRACE() primitive.
+ int GetNumTracesRecorded();
+
+#if defined(OS_ANDROID)
+ void StartATrace();
+ void StopATrace();
+ void AddClockSyncMetadataEvent();
+#endif
+
+ // Enabled state listeners give a callback when tracing is enabled or
+ // disabled. This can be used to tie into other library's tracing systems
+ // on-demand.
+ class BASE_EXPORT EnabledStateObserver {
+ public:
+ // Called just after the tracing system becomes enabled, outside of the
+ // |lock_|. TraceLog::IsEnabled() is true at this point.
+ virtual void OnTraceLogEnabled() = 0;
+
+ // Called just after the tracing system disables, outside of the |lock_|.
+ // TraceLog::IsEnabled() is false at this point.
+ virtual void OnTraceLogDisabled() = 0;
+ };
+ void AddEnabledStateObserver(EnabledStateObserver* listener);
+ void RemoveEnabledStateObserver(EnabledStateObserver* listener);
+ bool HasEnabledStateObserver(EnabledStateObserver* listener) const;
+
+ TraceLogStatus GetStatus() const;
+ bool BufferIsFull() const;
+
+ // Not using base::Callback because of its limited by 7 parameters.
+ // Also, using primitive type allows directly passing callback from WebCore.
+ // WARNING: It is possible for the previously set callback to be called
+ // after a call to SetEventCallbackEnabled() that replaces or a call to
+ // SetEventCallbackDisabled() that disables the callback.
+ // This callback may be invoked on any thread.
+ // For TRACE_EVENT_PHASE_COMPLETE events, the client will still receive pairs
+ // of TRACE_EVENT_PHASE_BEGIN and TRACE_EVENT_PHASE_END events to keep the
+ // interface simple.
+ typedef void (*EventCallback)(TimeTicks timestamp,
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ unsigned long long id,
+ int num_args,
+ const char* const arg_names[],
+ const unsigned char arg_types[],
+ const unsigned long long arg_values[],
+ unsigned char flags);
+
+ // Enable tracing for EventCallback.
+ void SetEventCallbackEnabled(const CategoryFilter& category_filter,
+ EventCallback cb);
+ void SetEventCallbackDisabled();
+
+ // Flush all collected events to the given output callback. The callback will
+ // be called one or more times either synchronously or asynchronously from
+ // the current thread with IPC-bite-size chunks. The string format is
+ // undefined. Use TraceResultBuffer to convert one or more trace strings to
+ // JSON. The callback can be null if the caller doesn't want any data.
+ // Due to the implementation of thread-local buffers, flush can't be
+ // done when tracing is enabled. If called when tracing is enabled, the
+ // callback will be called directly with (empty_string, false) to indicate
+ // the end of this unsuccessful flush.
+ typedef base::Callback<void(const scoped_refptr<base::RefCountedString>&,
+ bool has_more_events)> OutputCallback;
+ void Flush(const OutputCallback& cb);
+ void FlushButLeaveBufferIntact(const OutputCallback& flush_output_callback);
+
+ // Called by TRACE_EVENT* macros, don't call this directly.
+ // The name parameter is a category group for example:
+ // TRACE_EVENT0("renderer,webkit", "WebViewImpl::HandleInputEvent")
+ static const unsigned char* GetCategoryGroupEnabled(const char* name);
+ static const char* GetCategoryGroupName(
+ const unsigned char* category_group_enabled);
+
+ // Called by TRACE_EVENT* macros, don't call this directly.
+ // If |copy| is set, |name|, |arg_name1| and |arg_name2| will be deep copied
+ // into the event; see "Memory scoping note" and TRACE_EVENT_COPY_XXX above.
+ TraceEventHandle AddTraceEvent(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ unsigned long long id,
+ int num_args,
+ const char** arg_names,
+ const unsigned char* arg_types,
+ const unsigned long long* arg_values,
+ const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
+ unsigned char flags);
+ TraceEventHandle AddTraceEventWithThreadIdAndTimestamp(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ unsigned long long id,
+ int thread_id,
+ const TimeTicks& timestamp,
+ int num_args,
+ const char** arg_names,
+ const unsigned char* arg_types,
+ const unsigned long long* arg_values,
+ const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
+ unsigned char flags);
+ static void AddTraceEventEtw(char phase,
+ const char* category_group,
+ const void* id,
+ const char* extra);
+ static void AddTraceEventEtw(char phase,
+ const char* category_group,
+ const void* id,
+ const std::string& extra);
+
+ void UpdateTraceEventDuration(const unsigned char* category_group_enabled,
+ const char* name,
+ TraceEventHandle handle);
+
+ // For every matching event, the callback will be called.
+ typedef base::Callback<void()> WatchEventCallback;
+ void SetWatchEvent(const std::string& category_name,
+ const std::string& event_name,
+ const WatchEventCallback& callback);
+ // Cancel the watch event. If tracing is enabled, this may race with the
+ // watch event notification firing.
+ void CancelWatchEvent();
+
+ int process_id() const { return process_id_; }
+
+ // Exposed for unittesting:
+
+ void WaitSamplingEventForTesting();
+
+ // Allows deleting our singleton instance.
+ static void DeleteForTesting();
+
+ // Allow tests to inspect TraceEvents.
+ TraceEvent* GetEventByHandle(TraceEventHandle handle);
+
+ void SetProcessID(int process_id);
+
+ // Process sort indices, if set, override the order of a process will appear
+ // relative to other processes in the trace viewer. Processes are sorted first
+ // on their sort index, ascending, then by their name, and then tid.
+ void SetProcessSortIndex(int sort_index);
+
+ // Sets the name of the process.
+ void SetProcessName(const std::string& process_name);
+
+ // Processes can have labels in addition to their names. Use labels, for
+ // instance, to list out the web page titles that a process is handling.
+ void UpdateProcessLabel(int label_id, const std::string& current_label);
+ void RemoveProcessLabel(int label_id);
+
+ // Thread sort indices, if set, override the order of a thread will appear
+ // within its process in the trace viewer. Threads are sorted first on their
+ // sort index, ascending, then by their name, and then tid.
+ void SetThreadSortIndex(PlatformThreadId , int sort_index);
+
+ // Allow setting an offset between the current TimeTicks time and the time
+ // that should be reported.
+ void SetTimeOffset(TimeDelta offset);
+
+ size_t GetObserverCountForTest() const;
+
+ // Call this method if the current thread may block the message loop to
+ // prevent the thread from using the thread-local buffer because the thread
+ // may not handle the flush request in time causing lost of unflushed events.
+ void SetCurrentThreadBlocksMessageLoop();
+
+ private:
+ typedef unsigned int InternalTraceOptions;
+
+ FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
+ TraceBufferRingBufferGetReturnChunk);
+ FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
+ TraceBufferRingBufferHalfIteration);
+ FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
+ TraceBufferRingBufferFullIteration);
+ FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
+ TraceBufferVectorReportFull);
+ FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
+ ConvertTraceOptionsToInternalOptions);
+ FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
+ TraceRecordAsMuchAsPossibleMode);
+
+ // This allows constructor and destructor to be private and usable only
+ // by the Singleton class.
+ friend struct DefaultSingletonTraits<TraceLog>;
+
+ // Enable/disable each category group based on the current mode_,
+ // category_filter_, event_callback_ and event_callback_category_filter_.
+ // Enable the category group in the enabled mode if category_filter_ matches
+ // the category group, or event_callback_ is not null and
+ // event_callback_category_filter_ matches the category group.
+ void UpdateCategoryGroupEnabledFlags();
+ void UpdateCategoryGroupEnabledFlag(size_t category_index);
+
+ // Configure synthetic delays based on the values set in the current
+ // category filter.
+ void UpdateSyntheticDelaysFromCategoryFilter();
+
+ InternalTraceOptions GetInternalOptionsFromTraceOptions(
+ const TraceOptions& options);
+
+ class ThreadLocalEventBuffer;
+ class OptionalAutoLock;
+
+ TraceLog();
+ ~TraceLog();
+ const unsigned char* GetCategoryGroupEnabledInternal(const char* name);
+ void AddMetadataEventsWhileLocked();
+
+ InternalTraceOptions trace_options() const {
+ return static_cast<InternalTraceOptions>(
+ subtle::NoBarrier_Load(&trace_options_));
+ }
+
+ TraceBuffer* trace_buffer() const { return logged_events_.get(); }
+ TraceBuffer* CreateTraceBuffer();
+ TraceBuffer* CreateTraceBufferVectorOfSize(size_t max_chunks);
+
+ std::string EventToConsoleMessage(unsigned char phase,
+ const TimeTicks& timestamp,
+ TraceEvent* trace_event);
+
+ TraceEvent* AddEventToThreadSharedChunkWhileLocked(TraceEventHandle* handle,
+ bool check_buffer_is_full);
+ void CheckIfBufferIsFullWhileLocked();
+ void SetDisabledWhileLocked();
+
+ TraceEvent* GetEventByHandleInternal(TraceEventHandle handle,
+ OptionalAutoLock* lock);
+
+ // |generation| is used in the following callbacks to check if the callback
+ // is called for the flush of the current |logged_events_|.
+ void FlushCurrentThread(int generation);
+ void ConvertTraceEventsToTraceFormat(scoped_ptr<TraceBuffer> logged_events,
+ const TraceLog::OutputCallback& flush_output_callback);
+ void FinishFlush(int generation);
+ void OnFlushTimeout(int generation);
+
+ int generation() const {
+ return static_cast<int>(subtle::NoBarrier_Load(&generation_));
+ }
+ bool CheckGeneration(int generation) const {
+ return generation == this->generation();
+ }
+ void UseNextTraceBuffer();
+
+ TimeTicks OffsetNow() const {
+ return OffsetTimestamp(TimeTicks::NowFromSystemTraceTime());
+ }
+ TimeTicks OffsetTimestamp(const TimeTicks& timestamp) const {
+ return timestamp - time_offset_;
+ }
+
+ // Internal representation of trace options since we store the currently used
+ // trace option as an AtomicWord.
+ static const InternalTraceOptions kInternalNone;
+ static const InternalTraceOptions kInternalRecordUntilFull;
+ static const InternalTraceOptions kInternalRecordContinuously;
+ static const InternalTraceOptions kInternalEchoToConsole;
+ static const InternalTraceOptions kInternalEnableSampling;
+ static const InternalTraceOptions kInternalRecordAsMuchAsPossible;
+
+ // This lock protects TraceLog member accesses (except for members protected
+ // by thread_info_lock_) from arbitrary threads.
+ mutable Lock lock_;
+ // This lock protects accesses to thread_names_, thread_event_start_times_
+ // and thread_colors_.
+ Lock thread_info_lock_;
+ Mode mode_;
+ int num_traces_recorded_;
+ scoped_ptr<TraceBuffer> logged_events_;
+ subtle::AtomicWord /* EventCallback */ event_callback_;
+ bool dispatching_to_observer_list_;
+ std::vector<EnabledStateObserver*> enabled_state_observer_list_;
+
+ std::string process_name_;
+ base::hash_map<int, std::string> process_labels_;
+ int process_sort_index_;
+ base::hash_map<int, int> thread_sort_indices_;
+ base::hash_map<int, std::string> thread_names_;
+
+ // The following two maps are used only when ECHO_TO_CONSOLE.
+ base::hash_map<int, std::stack<TimeTicks> > thread_event_start_times_;
+ base::hash_map<std::string, int> thread_colors_;
+
+ TimeTicks buffer_limit_reached_timestamp_;
+
+ // XORed with TraceID to make it unlikely to collide with other processes.
+ unsigned long long process_id_hash_;
+
+ int process_id_;
+
+ TimeDelta time_offset_;
+
+ // Allow tests to wake up when certain events occur.
+ WatchEventCallback watch_event_callback_;
+ subtle::AtomicWord /* const unsigned char* */ watch_category_;
+ std::string watch_event_name_;
+
+ subtle::AtomicWord /* Options */ trace_options_;
+
+ // Sampling thread handles.
+ scoped_ptr<TraceSamplingThread> sampling_thread_;
+ PlatformThreadHandle sampling_thread_handle_;
+
+ CategoryFilter category_filter_;
+ CategoryFilter event_callback_category_filter_;
+
+ ThreadLocalPointer<ThreadLocalEventBuffer> thread_local_event_buffer_;
+ ThreadLocalBoolean thread_blocks_message_loop_;
+ ThreadLocalBoolean thread_is_in_trace_event_;
+
+ // Contains the message loops of threads that have had at least one event
+ // added into the local event buffer. Not using MessageLoopProxy because we
+ // need to know the life time of the message loops.
+ hash_set<MessageLoop*> thread_message_loops_;
+
+ // For events which can't be added into the thread local buffer, e.g. events
+ // from threads without a message loop.
+ scoped_ptr<TraceBufferChunk> thread_shared_chunk_;
+ size_t thread_shared_chunk_index_;
+
+ // Set when asynchronous Flush is in progress.
+ OutputCallback flush_output_callback_;
+ scoped_refptr<MessageLoopProxy> flush_message_loop_proxy_;
+ subtle::AtomicWord generation_;
+
+ DISALLOW_COPY_AND_ASSIGN(TraceLog);
+};
+
+} // namespace debug
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_TRACE_EVENT_IMPL_H_
diff --git a/base/trace_event/trace_event_impl_constants.cc b/base/trace_event/trace_event_impl_constants.cc
new file mode 100644
index 0000000..c46cf49
--- /dev/null
+++ b/base/trace_event/trace_event_impl_constants.cc
@@ -0,0 +1,28 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_event_impl.h"
+
+namespace base {
+namespace debug {
+
+// Enable everything but debug and test categories by default.
+const char CategoryFilter::kDefaultCategoryFilterString[] = "-*Debug,-*Test";
+
+// Constant used by TraceLog's internal implementation of trace_option.
+const TraceLog::InternalTraceOptions
+ TraceLog::kInternalNone = 0;
+const TraceLog::InternalTraceOptions
+ TraceLog::kInternalRecordUntilFull = 1 << 0;
+const TraceLog::InternalTraceOptions
+ TraceLog::kInternalRecordContinuously = 1 << 1;
+const TraceLog::InternalTraceOptions
+ TraceLog::kInternalEnableSampling = 1 << 2;
+const TraceLog::InternalTraceOptions
+ TraceLog::kInternalEchoToConsole = 1 << 3;
+const TraceLog::InternalTraceOptions
+ TraceLog::kInternalRecordAsMuchAsPossible = 1 << 4;
+
+} // namespace debug
+} // namespace base
diff --git a/base/trace_event/trace_event_memory.cc b/base/trace_event/trace_event_memory.cc
new file mode 100644
index 0000000..96b28e4
--- /dev/null
+++ b/base/trace_event/trace_event_memory.cc
@@ -0,0 +1,440 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_event_memory.h"
+
+#include "base/debug/leak_annotations.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_util.h"
+#include "base/threading/thread_local_storage.h"
+#include "base/trace_event/trace_event.h"
+
+namespace base {
+namespace debug {
+
+namespace {
+
+// Maximum number of nested TRACE_EVENT scopes to record. Must be less than
+// or equal to HeapProfileTable::kMaxStackDepth / 2 because we record two
+// entries on the pseudo-stack per scope.
+const size_t kMaxScopeDepth = 16;
+
+/////////////////////////////////////////////////////////////////////////////
+// Holds a memory dump until the tracing system needs to serialize it.
+class MemoryDumpHolder : public base::debug::ConvertableToTraceFormat {
+ public:
+ // Takes ownership of dump, which must be a JSON string, allocated with
+ // malloc() and NULL terminated.
+ explicit MemoryDumpHolder(char* dump) : dump_(dump) {}
+
+ // base::debug::ConvertableToTraceFormat overrides:
+ void AppendAsTraceFormat(std::string* out) const override {
+ AppendHeapProfileAsTraceFormat(dump_, out);
+ }
+
+ private:
+ ~MemoryDumpHolder() override { free(dump_); }
+
+ char* dump_;
+
+ DISALLOW_COPY_AND_ASSIGN(MemoryDumpHolder);
+};
+
+/////////////////////////////////////////////////////////////////////////////
+// Records a stack of TRACE_MEMORY events. One per thread is required.
+struct TraceMemoryStack {
+ TraceMemoryStack() : scope_depth(0) {
+ memset(scope_data, 0, kMaxScopeDepth * sizeof(scope_data[0]));
+ }
+
+ // Depth of the currently nested TRACE_EVENT scopes. Allowed to be greater
+ // than kMaxScopeDepth so we can match scope pushes and pops even if we don't
+ // have enough space to store the EventData.
+ size_t scope_depth;
+
+ // Stack of categories and names.
+ ScopedTraceMemory::ScopeData scope_data[kMaxScopeDepth];
+};
+
+// Pointer to a TraceMemoryStack per thread.
+base::ThreadLocalStorage::StaticSlot tls_trace_memory_stack = TLS_INITIALIZER;
+
+// Clean up memory pointed to by our thread-local storage.
+void DeleteStackOnThreadCleanup(void* value) {
+ TraceMemoryStack* stack = static_cast<TraceMemoryStack*>(value);
+ delete stack;
+}
+
+// Initializes the thread-local TraceMemoryStack pointer. Returns true on
+// success or if it is already initialized.
+bool InitThreadLocalStorage() {
+ if (tls_trace_memory_stack.initialized())
+ return true;
+ // Initialize the thread-local storage key, returning true on success.
+ return tls_trace_memory_stack.Initialize(&DeleteStackOnThreadCleanup);
+}
+
+// Clean up thread-local-storage in the main thread.
+void CleanupThreadLocalStorage() {
+ if (!tls_trace_memory_stack.initialized())
+ return;
+ TraceMemoryStack* stack =
+ static_cast<TraceMemoryStack*>(tls_trace_memory_stack.Get());
+ delete stack;
+ tls_trace_memory_stack.Set(NULL);
+ // Intentionally do not release the thread-local-storage key here, that is,
+ // do not call tls_trace_memory_stack.Free(). Other threads have lazily
+ // created pointers in thread-local-storage via GetTraceMemoryStack() below.
+ // Those threads need to run the DeleteStack() destructor function when they
+ // exit. If we release the key the destructor will not be called and those
+ // threads will not clean up their memory.
+}
+
+// Returns the thread-local trace memory stack for the current thread, creating
+// one if needed. Returns NULL if the thread-local storage key isn't
+// initialized, which indicates that heap profiling isn't running.
+TraceMemoryStack* GetTraceMemoryStack() {
+ TraceMemoryStack* stack =
+ static_cast<TraceMemoryStack*>(tls_trace_memory_stack.Get());
+ // Lazily initialize TraceMemoryStack objects for new threads.
+ if (!stack) {
+ stack = new TraceMemoryStack;
+ tls_trace_memory_stack.Set(stack);
+ }
+ return stack;
+}
+
+// Returns a "pseudo-stack" of pointers to trace event categories and names.
+// Because tcmalloc stores one pointer per stack frame this converts N nested
+// trace events into N * 2 pseudo-stack entries. Thus this macro invocation:
+// TRACE_EVENT0("category1", "name1");
+// TRACE_EVENT0("category2", "name2");
+// becomes this pseudo-stack:
+// stack_out[0] = "category1"
+// stack_out[1] = "name1"
+// stack_out[2] = "category2"
+// stack_out[3] = "name2"
+// Returns int instead of size_t to match the signature required by tcmalloc.
+int GetPseudoStack(int skip_count_ignored, void** stack_out) {
+ // If the tracing system isn't fully initialized, just skip this allocation.
+ // Attempting to initialize will allocate memory, causing this function to
+ // be called recursively from inside the allocator.
+ if (!tls_trace_memory_stack.initialized() || !tls_trace_memory_stack.Get())
+ return 0;
+ TraceMemoryStack* stack =
+ static_cast<TraceMemoryStack*>(tls_trace_memory_stack.Get());
+ // Copy at most kMaxScopeDepth scope entries.
+ const size_t count = std::min(stack->scope_depth, kMaxScopeDepth);
+ // Notes that memcpy() works for zero bytes.
+ memcpy(stack_out,
+ stack->scope_data,
+ count * sizeof(stack->scope_data[0]));
+ // Each item in the trace event stack contains both name and category so tell
+ // tcmalloc that we have returned |count| * 2 stack frames.
+ return static_cast<int>(count * 2);
+}
+
+} // namespace
+
+//////////////////////////////////////////////////////////////////////////////
+
+TraceMemoryController::TraceMemoryController(
+ scoped_refptr<MessageLoopProxy> message_loop_proxy,
+ HeapProfilerStartFunction heap_profiler_start_function,
+ HeapProfilerStopFunction heap_profiler_stop_function,
+ GetHeapProfileFunction get_heap_profile_function)
+ : message_loop_proxy_(message_loop_proxy),
+ heap_profiler_start_function_(heap_profiler_start_function),
+ heap_profiler_stop_function_(heap_profiler_stop_function),
+ get_heap_profile_function_(get_heap_profile_function),
+ weak_factory_(this) {
+ // Force the "memory" category to show up in the trace viewer.
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("memory"), "init");
+ // Watch for the tracing system being enabled.
+ TraceLog::GetInstance()->AddEnabledStateObserver(this);
+}
+
+TraceMemoryController::~TraceMemoryController() {
+ if (dump_timer_.IsRunning())
+ StopProfiling();
+ TraceLog::GetInstance()->RemoveEnabledStateObserver(this);
+}
+
+ // base::debug::TraceLog::EnabledStateChangedObserver overrides:
+void TraceMemoryController::OnTraceLogEnabled() {
+ // Check to see if tracing is enabled for the memory category.
+ bool enabled;
+ TRACE_EVENT_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT("memory"),
+ &enabled);
+ if (!enabled)
+ return;
+ DVLOG(1) << "OnTraceLogEnabled";
+ message_loop_proxy_->PostTask(
+ FROM_HERE,
+ base::Bind(&TraceMemoryController::StartProfiling,
+ weak_factory_.GetWeakPtr()));
+}
+
+void TraceMemoryController::OnTraceLogDisabled() {
+ // The memory category is always disabled before OnTraceLogDisabled() is
+ // called, so we cannot tell if it was enabled before. Always try to turn
+ // off profiling.
+ DVLOG(1) << "OnTraceLogDisabled";
+ message_loop_proxy_->PostTask(
+ FROM_HERE,
+ base::Bind(&TraceMemoryController::StopProfiling,
+ weak_factory_.GetWeakPtr()));
+}
+
+void TraceMemoryController::StartProfiling() {
+ // Watch for the tracing framework sending enabling more than once.
+ if (dump_timer_.IsRunning())
+ return;
+ DVLOG(1) << "Starting trace memory";
+ if (!InitThreadLocalStorage())
+ return;
+ ScopedTraceMemory::set_enabled(true);
+ // Call ::HeapProfilerWithPseudoStackStart().
+ heap_profiler_start_function_(&GetPseudoStack);
+ const int kDumpIntervalSeconds = 5;
+ dump_timer_.Start(FROM_HERE,
+ TimeDelta::FromSeconds(kDumpIntervalSeconds),
+ base::Bind(&TraceMemoryController::DumpMemoryProfile,
+ weak_factory_.GetWeakPtr()));
+}
+
+void TraceMemoryController::DumpMemoryProfile() {
+ // Don't trace allocations here in the memory tracing system.
+ INTERNAL_TRACE_MEMORY(TRACE_DISABLED_BY_DEFAULT("memory"),
+ TRACE_MEMORY_IGNORE);
+
+ DVLOG(1) << "DumpMemoryProfile";
+ // MemoryDumpHolder takes ownership of this string. See GetHeapProfile() in
+ // tcmalloc for details.
+ char* dump = get_heap_profile_function_();
+ const int kSnapshotId = 1;
+ TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
+ TRACE_DISABLED_BY_DEFAULT("memory"),
+ "memory::Heap",
+ kSnapshotId,
+ scoped_refptr<ConvertableToTraceFormat>(new MemoryDumpHolder(dump)));
+}
+
+void TraceMemoryController::StopProfiling() {
+ // Watch for the tracing framework sending disabled more than once.
+ if (!dump_timer_.IsRunning())
+ return;
+ DVLOG(1) << "Stopping trace memory";
+ dump_timer_.Stop();
+ ScopedTraceMemory::set_enabled(false);
+ CleanupThreadLocalStorage();
+ // Call ::HeapProfilerStop().
+ heap_profiler_stop_function_();
+}
+
+bool TraceMemoryController::IsTimerRunningForTest() const {
+ return dump_timer_.IsRunning();
+}
+
+/////////////////////////////////////////////////////////////////////////////
+
+// static
+bool ScopedTraceMemory::enabled_ = false;
+
+void ScopedTraceMemory::Initialize(const char* category, const char* name) {
+ DCHECK(enabled_);
+ // Get our thread's copy of the stack.
+ TraceMemoryStack* trace_memory_stack = GetTraceMemoryStack();
+ const size_t index = trace_memory_stack->scope_depth;
+ // Don't record data for deeply nested scopes, but continue to increment
+ // |stack_depth| so we can match pushes and pops.
+ if (index < kMaxScopeDepth) {
+ ScopeData& event = trace_memory_stack->scope_data[index];
+ event.category = category;
+ event.name = name;
+ }
+ trace_memory_stack->scope_depth++;
+}
+
+void ScopedTraceMemory::Destroy() {
+ DCHECK(enabled_);
+ // Get our thread's copy of the stack.
+ TraceMemoryStack* trace_memory_stack = GetTraceMemoryStack();
+ // The tracing system can be turned on with ScopedTraceMemory objects
+ // allocated on the stack, so avoid potential underflow as they are destroyed.
+ if (trace_memory_stack->scope_depth > 0)
+ trace_memory_stack->scope_depth--;
+}
+
+// static
+void ScopedTraceMemory::InitForTest() {
+ InitThreadLocalStorage();
+ enabled_ = true;
+}
+
+// static
+void ScopedTraceMemory::CleanupForTest() {
+ enabled_ = false;
+ CleanupThreadLocalStorage();
+}
+
+// static
+int ScopedTraceMemory::GetStackDepthForTest() {
+ TraceMemoryStack* stack = GetTraceMemoryStack();
+ return static_cast<int>(stack->scope_depth);
+}
+
+// static
+ScopedTraceMemory::ScopeData ScopedTraceMemory::GetScopeDataForTest(
+ int stack_index) {
+ TraceMemoryStack* stack = GetTraceMemoryStack();
+ return stack->scope_data[stack_index];
+}
+
+/////////////////////////////////////////////////////////////////////////////
+
+void AppendHeapProfileAsTraceFormat(const char* input, std::string* output) {
+ // Heap profile output has a header total line, then a list of stacks with
+ // memory totals, like this:
+ //
+ // heap profile: 357: 55227 [ 14653: 2624014] @ heapprofile
+ // 95: 40940 [ 649: 114260] @ 0x7fa7f4b3be13
+ // 77: 32546 [ 742: 106234] @
+ // 68: 4195 [ 1087: 98009] @ 0x7fa7fa9b9ba0 0x7fa7f4b3be13
+ //
+ // MAPPED_LIBRARIES:
+ // 1be411fc1000-1be4139e4000 rw-p 00000000 00:00 0
+ // 1be4139e4000-1be4139e5000 ---p 00000000 00:00 0
+ // ...
+ //
+ // Skip input after MAPPED_LIBRARIES.
+ std::string input_string;
+ const char* mapped_libraries = strstr(input, "MAPPED_LIBRARIES");
+ if (mapped_libraries) {
+ input_string.assign(input, mapped_libraries - input);
+ } else {
+ input_string.assign(input);
+ }
+
+ std::vector<std::string> lines;
+ size_t line_count = Tokenize(input_string, "\n", &lines);
+ if (line_count == 0) {
+ DLOG(WARNING) << "No lines found";
+ return;
+ }
+
+ // Handle the initial summary line.
+ output->append("[");
+ AppendHeapProfileTotalsAsTraceFormat(lines[0], output);
+
+ // Handle the following stack trace lines.
+ for (size_t i = 1; i < line_count; ++i) {
+ const std::string& line = lines[i];
+ AppendHeapProfileLineAsTraceFormat(line, output);
+ }
+ output->append("]\n");
+}
+
+void AppendHeapProfileTotalsAsTraceFormat(const std::string& line,
+ std::string* output) {
+ // This is what a line looks like:
+ // heap profile: 357: 55227 [ 14653: 2624014] @ heapprofile
+ //
+ // The numbers represent total allocations since profiling was enabled.
+ // From the example above:
+ // 357 = Outstanding allocations (mallocs - frees)
+ // 55227 = Outstanding bytes (malloc bytes - free bytes)
+ // 14653 = Total allocations (mallocs)
+ // 2624014 = Total bytes (malloc bytes)
+ std::vector<std::string> tokens;
+ Tokenize(line, " :[]@", &tokens);
+ if (tokens.size() < 4) {
+ DLOG(WARNING) << "Invalid totals line " << line;
+ return;
+ }
+ DCHECK_EQ(tokens[0], "heap");
+ DCHECK_EQ(tokens[1], "profile");
+ output->append("{\"current_allocs\": ");
+ output->append(tokens[2]);
+ output->append(", \"current_bytes\": ");
+ output->append(tokens[3]);
+ output->append(", \"trace\": \"\"}");
+}
+
+bool AppendHeapProfileLineAsTraceFormat(const std::string& line,
+ std::string* output) {
+ // This is what a line looks like:
+ // 68: 4195 [ 1087: 98009] @ 0x7fa7fa9b9ba0 0x7fa7f4b3be13
+ //
+ // The numbers represent allocations for a particular stack trace since
+ // profiling was enabled. From the example above:
+ // 68 = Outstanding allocations (mallocs - frees)
+ // 4195 = Outstanding bytes (malloc bytes - free bytes)
+ // 1087 = Total allocations (mallocs)
+ // 98009 = Total bytes (malloc bytes)
+ //
+ // 0x7fa7fa9b9ba0 0x7fa7f4b3be13 = Stack trace represented as pointers to
+ // static strings from trace event categories
+ // and names.
+ std::vector<std::string> tokens;
+ Tokenize(line, " :[]@", &tokens);
+ // It's valid to have no stack addresses, so only require 4 tokens.
+ if (tokens.size() < 4) {
+ DLOG(WARNING) << "Invalid line " << line;
+ return false;
+ }
+ // Don't bother with stacks that have no current allocations.
+ if (tokens[0] == "0")
+ return false;
+ output->append(",\n");
+ output->append("{\"current_allocs\": ");
+ output->append(tokens[0]);
+ output->append(", \"current_bytes\": ");
+ output->append(tokens[1]);
+ output->append(", \"trace\": \"");
+
+ // Convert pairs of "stack addresses" into category and name strings.
+ const std::string kSingleQuote = "'";
+ for (size_t t = 4; t < tokens.size(); t += 2) {
+ // Casting strings into pointers is ugly but otherwise tcmalloc would need
+ // to gain a special output serializer just for pseudo-stacks.
+ const char* trace_category = StringFromHexAddress(tokens[t]);
+ DCHECK_LT(t + 1, tokens.size());
+ const char* trace_name = StringFromHexAddress(tokens[t + 1]);
+
+ // TODO(jamescook): Report the trace category and name separately to the
+ // trace viewer and allow it to decide what decorations to apply. For now
+ // just hard-code a decoration for posted tasks (toplevel).
+ std::string trace_string(trace_name);
+ if (!strcmp(trace_category, "toplevel"))
+ trace_string.append("->PostTask");
+
+ // Some trace name strings have double quotes, convert them to single.
+ ReplaceChars(trace_string, "\"", kSingleQuote, &trace_string);
+
+ output->append(trace_string);
+
+ // Trace viewer expects a trailing space.
+ output->append(" ");
+ }
+ output->append("\"}");
+ return true;
+}
+
+const char* StringFromHexAddress(const std::string& hex_address) {
+ uint64 address = 0;
+ if (!base::HexStringToUInt64(hex_address, &address))
+ return "error";
+ if (!address)
+ return "null";
+ // Note that this cast handles 64-bit to 32-bit conversion if necessary.
+ return reinterpret_cast<const char*>(address);
+}
+
+} // namespace debug
+} // namespace base
diff --git a/base/trace_event/trace_event_memory.h b/base/trace_event/trace_event_memory.h
new file mode 100644
index 0000000..f16467f
--- /dev/null
+++ b/base/trace_event/trace_event_memory.h
@@ -0,0 +1,172 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_EVENT_MEMORY_H_
+#define BASE_TRACE_EVENT_TRACE_EVENT_MEMORY_H_
+
+#include "base/base_export.h"
+#include "base/gtest_prod_util.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/weak_ptr.h"
+#include "base/timer/timer.h"
+#include "base/trace_event/trace_event_impl.h"
+
+// TODO(jamescook): Windows support for memory tracing.
+#if !defined(NO_TCMALLOC) && !defined(OS_NACL) && \
+ (defined(OS_LINUX) || defined(OS_ANDROID))
+#define TCMALLOC_TRACE_MEMORY_SUPPORTED 1
+#endif
+
+namespace base {
+
+class MessageLoopProxy;
+
+namespace debug {
+
+// Watches for chrome://tracing to be enabled or disabled. When tracing is
+// enabled, also enables tcmalloc heap profiling. This class is the preferred
+// way to turn trace-base heap memory profiling on and off.
+class BASE_EXPORT TraceMemoryController
+ : public TraceLog::EnabledStateObserver {
+ public:
+ typedef int (*StackGeneratorFunction)(int skip_count, void** stack);
+ typedef void (*HeapProfilerStartFunction)(StackGeneratorFunction callback);
+ typedef void (*HeapProfilerStopFunction)();
+ typedef char* (*GetHeapProfileFunction)();
+
+ // |message_loop_proxy| must be a proxy to the primary thread for the client
+ // process, e.g. the UI thread in a browser. The function pointers must be
+ // pointers to tcmalloc heap profiling functions; by avoiding direct calls to
+ // these functions we avoid a dependency on third_party/tcmalloc from base.
+ TraceMemoryController(
+ scoped_refptr<MessageLoopProxy> message_loop_proxy,
+ HeapProfilerStartFunction heap_profiler_start_function,
+ HeapProfilerStopFunction heap_profiler_stop_function,
+ GetHeapProfileFunction get_heap_profile_function);
+ virtual ~TraceMemoryController();
+
+ // base::debug::TraceLog::EnabledStateChangedObserver overrides:
+ void OnTraceLogEnabled() override;
+ void OnTraceLogDisabled() override;
+
+ // Starts heap memory profiling.
+ void StartProfiling();
+
+ // Captures a heap profile.
+ void DumpMemoryProfile();
+
+ // If memory tracing is enabled, dumps a memory profile to the tracing system.
+ void StopProfiling();
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(TraceMemoryTest, TraceMemoryController);
+
+ bool IsTimerRunningForTest() const;
+
+ // Ensures the observer starts and stops tracing on the primary thread.
+ scoped_refptr<MessageLoopProxy> message_loop_proxy_;
+
+ // Pointers to tcmalloc heap profiling functions. Allows this class to use
+ // tcmalloc functions without introducing a dependency from base to tcmalloc.
+ HeapProfilerStartFunction heap_profiler_start_function_;
+ HeapProfilerStopFunction heap_profiler_stop_function_;
+ GetHeapProfileFunction get_heap_profile_function_;
+
+ // Timer to schedule memory profile dumps.
+ RepeatingTimer<TraceMemoryController> dump_timer_;
+
+ WeakPtrFactory<TraceMemoryController> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(TraceMemoryController);
+};
+
+//////////////////////////////////////////////////////////////////////////////
+
+// A scoped context for memory tracing. Pushes the name onto a stack for
+// recording by tcmalloc heap profiling.
+class BASE_EXPORT ScopedTraceMemory {
+ public:
+ struct ScopeData {
+ const char* category;
+ const char* name;
+ };
+
+ // Memory for |category| and |name| must be static, for example, literal
+ // strings in a TRACE_EVENT macro.
+ ScopedTraceMemory(const char* category, const char* name) {
+ if (!enabled_)
+ return;
+ Initialize(category, name);
+ }
+ ~ScopedTraceMemory() {
+ if (!enabled_)
+ return;
+ Destroy();
+ }
+
+ // Enables the storing of trace names on a per-thread stack.
+ static void set_enabled(bool enabled) { enabled_ = enabled; }
+
+ // Testing interface:
+ static void InitForTest();
+ static void CleanupForTest();
+ static int GetStackDepthForTest();
+ static ScopeData GetScopeDataForTest(int stack_index);
+
+ private:
+ void Initialize(const char* category, const char* name);
+ void Destroy();
+
+ static bool enabled_;
+ DISALLOW_COPY_AND_ASSIGN(ScopedTraceMemory);
+};
+
+//////////////////////////////////////////////////////////////////////////////
+
+// Converts tcmalloc's heap profiler data with pseudo-stacks in |input| to
+// trace event compatible JSON and appends to |output|. Visible for testing.
+BASE_EXPORT void AppendHeapProfileAsTraceFormat(const char* input,
+ std::string* output);
+
+// Converts the first |line| of heap profiler data, which contains totals for
+// all allocations in a special format, into trace event compatible JSON and
+// appends to |output|. Visible for testing.
+BASE_EXPORT void AppendHeapProfileTotalsAsTraceFormat(const std::string& line,
+ std::string* output);
+
+// Converts a single |line| of heap profiler data into trace event compatible
+// JSON and appends to |output|. Returns true if the line was valid and has a
+// non-zero number of current allocations. Visible for testing.
+BASE_EXPORT bool AppendHeapProfileLineAsTraceFormat(const std::string& line,
+ std::string* output);
+
+// Returns a pointer to a string given its hexadecimal address in |hex_address|.
+// Handles both 32-bit and 64-bit addresses. Returns "null" for null pointers
+// and "error" if |address| could not be parsed. Visible for testing.
+BASE_EXPORT const char* StringFromHexAddress(const std::string& hex_address);
+
+} // namespace debug
+} // namespace base
+
+// Make local variables with unique names based on the line number. Note that
+// the extra level of redirection is needed.
+#define INTERNAL_TRACE_MEMORY_ID3(line) trace_memory_unique_##line
+#define INTERNAL_TRACE_MEMORY_ID2(line) INTERNAL_TRACE_MEMORY_ID3(line)
+#define INTERNAL_TRACE_MEMORY_ID INTERNAL_TRACE_MEMORY_ID2(__LINE__)
+
+// This is the core macro that adds a scope to each TRACE_EVENT location.
+// It generates a unique local variable name using the macros above.
+#if defined(TCMALLOC_TRACE_MEMORY_SUPPORTED)
+#define INTERNAL_TRACE_MEMORY(category, name) \
+ base::debug::ScopedTraceMemory INTERNAL_TRACE_MEMORY_ID(category, name);
+#else
+#define INTERNAL_TRACE_MEMORY(category, name)
+#endif // defined(TRACE_MEMORY_SUPPORTED)
+
+// A special trace name that allows us to ignore memory allocations inside
+// the memory dump system itself. The allocations are recorded, but the
+// visualizer skips them. Must match the value in heap.js.
+#define TRACE_MEMORY_IGNORE "trace-memory-ignore"
+
+#endif // BASE_TRACE_EVENT_TRACE_EVENT_MEMORY_H_
diff --git a/base/trace_event/trace_event_memory_unittest.cc b/base/trace_event/trace_event_memory_unittest.cc
new file mode 100644
index 0000000..220c0e6
--- /dev/null
+++ b/base/trace_event/trace_event_memory_unittest.cc
@@ -0,0 +1,240 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_event_memory.h"
+
+#include <sstream>
+#include <string>
+
+#include "base/message_loop/message_loop.h"
+#include "base/trace_event/trace_event_impl.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(TCMALLOC_TRACE_MEMORY_SUPPORTED)
+#include "third_party/tcmalloc/chromium/src/gperftools/heap-profiler.h"
+#endif
+
+namespace base {
+namespace debug {
+
+// Tests for the trace event memory tracking system. Exists as a class so it
+// can be a friend of TraceMemoryController.
+class TraceMemoryTest : public testing::Test {
+ public:
+ TraceMemoryTest() {}
+ ~TraceMemoryTest() override {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TraceMemoryTest);
+};
+
+//////////////////////////////////////////////////////////////////////////////
+
+#if defined(TCMALLOC_TRACE_MEMORY_SUPPORTED)
+
+TEST_F(TraceMemoryTest, TraceMemoryController) {
+ MessageLoop message_loop;
+
+ // Start with no observers of the TraceLog.
+ EXPECT_EQ(0u, TraceLog::GetInstance()->GetObserverCountForTest());
+
+ // Creating a controller adds it to the TraceLog observer list.
+ scoped_ptr<TraceMemoryController> controller(
+ new TraceMemoryController(
+ message_loop.message_loop_proxy(),
+ ::HeapProfilerWithPseudoStackStart,
+ ::HeapProfilerStop,
+ ::GetHeapProfile));
+ EXPECT_EQ(1u, TraceLog::GetInstance()->GetObserverCountForTest());
+ EXPECT_TRUE(
+ TraceLog::GetInstance()->HasEnabledStateObserver(controller.get()));
+
+ // By default the observer isn't dumping memory profiles.
+ EXPECT_FALSE(controller->IsTimerRunningForTest());
+
+ // Simulate enabling tracing.
+ controller->StartProfiling();
+ message_loop.RunUntilIdle();
+ EXPECT_TRUE(controller->IsTimerRunningForTest());
+
+ // Simulate disabling tracing.
+ controller->StopProfiling();
+ message_loop.RunUntilIdle();
+ EXPECT_FALSE(controller->IsTimerRunningForTest());
+
+ // Deleting the observer removes it from the TraceLog observer list.
+ controller.reset();
+ EXPECT_EQ(0u, TraceLog::GetInstance()->GetObserverCountForTest());
+}
+
+TEST_F(TraceMemoryTest, ScopedTraceMemory) {
+ ScopedTraceMemory::InitForTest();
+
+ // Start with an empty stack.
+ EXPECT_EQ(0, ScopedTraceMemory::GetStackDepthForTest());
+
+ {
+ // Push an item.
+ ScopedTraceMemory scope1("cat1", "name1");
+ EXPECT_EQ(1, ScopedTraceMemory::GetStackDepthForTest());
+ EXPECT_EQ("cat1", ScopedTraceMemory::GetScopeDataForTest(0).category);
+ EXPECT_EQ("name1", ScopedTraceMemory::GetScopeDataForTest(0).name);
+
+ {
+ // One more item.
+ ScopedTraceMemory scope2("cat2", "name2");
+ EXPECT_EQ(2, ScopedTraceMemory::GetStackDepthForTest());
+ EXPECT_EQ("cat2", ScopedTraceMemory::GetScopeDataForTest(1).category);
+ EXPECT_EQ("name2", ScopedTraceMemory::GetScopeDataForTest(1).name);
+ }
+
+ // Ended scope 2.
+ EXPECT_EQ(1, ScopedTraceMemory::GetStackDepthForTest());
+ }
+
+ // Ended scope 1.
+ EXPECT_EQ(0, ScopedTraceMemory::GetStackDepthForTest());
+
+ ScopedTraceMemory::CleanupForTest();
+}
+
+void TestDeepScopeNesting(int current, int depth) {
+ EXPECT_EQ(current, ScopedTraceMemory::GetStackDepthForTest());
+ ScopedTraceMemory scope("category", "name");
+ if (current < depth)
+ TestDeepScopeNesting(current + 1, depth);
+ EXPECT_EQ(current + 1, ScopedTraceMemory::GetStackDepthForTest());
+}
+
+TEST_F(TraceMemoryTest, DeepScopeNesting) {
+ ScopedTraceMemory::InitForTest();
+
+ // Ensure really deep scopes don't crash.
+ TestDeepScopeNesting(0, 100);
+
+ ScopedTraceMemory::CleanupForTest();
+}
+
+#endif // defined(TRACE_MEMORY_SUPPORTED)
+
+/////////////////////////////////////////////////////////////////////////////
+
+TEST_F(TraceMemoryTest, AppendHeapProfileTotalsAsTraceFormat) {
+ // Empty input gives empty output.
+ std::string empty_output;
+ AppendHeapProfileTotalsAsTraceFormat("", &empty_output);
+ EXPECT_EQ("", empty_output);
+
+ // Typical case.
+ const char input[] =
+ "heap profile: 357: 55227 [ 14653: 2624014] @ heapprofile";
+ const std::string kExpectedOutput =
+ "{\"current_allocs\": 357, \"current_bytes\": 55227, \"trace\": \"\"}";
+ std::string output;
+ AppendHeapProfileTotalsAsTraceFormat(input, &output);
+ EXPECT_EQ(kExpectedOutput, output);
+}
+
+TEST_F(TraceMemoryTest, AppendHeapProfileLineAsTraceFormat) {
+ // Empty input gives empty output.
+ std::string empty_output;
+ EXPECT_FALSE(AppendHeapProfileLineAsTraceFormat("", &empty_output));
+ EXPECT_EQ("", empty_output);
+
+ // Invalid input returns false.
+ std::string junk_output;
+ EXPECT_FALSE(AppendHeapProfileLineAsTraceFormat("junk", &junk_output));
+
+ // Input with normal category and name entries.
+ const char kCategory[] = "category";
+ const char kName[] = "name";
+ std::ostringstream input;
+ input << " 68: 4195 [ 1087: 98009] @ " << &kCategory << " "
+ << &kName;
+ const std::string kExpectedOutput =
+ ",\n"
+ "{"
+ "\"current_allocs\": 68, "
+ "\"current_bytes\": 4195, "
+ "\"trace\": \"name \""
+ "}";
+ std::string output;
+ EXPECT_TRUE(
+ AppendHeapProfileLineAsTraceFormat(input.str().c_str(), &output));
+ EXPECT_EQ(kExpectedOutput, output);
+
+ // Input with with the category "toplevel".
+ // TODO(jamescook): Eliminate this special case and move the logic to the
+ // trace viewer code.
+ const char kTaskCategory[] = "toplevel";
+ const char kTaskName[] = "TaskName";
+ std::ostringstream input2;
+ input2 << " 68: 4195 [ 1087: 98009] @ " << &kTaskCategory << " "
+ << &kTaskName;
+ const std::string kExpectedOutput2 =
+ ",\n"
+ "{"
+ "\"current_allocs\": 68, "
+ "\"current_bytes\": 4195, "
+ "\"trace\": \"TaskName->PostTask \""
+ "}";
+ std::string output2;
+ EXPECT_TRUE(
+ AppendHeapProfileLineAsTraceFormat(input2.str().c_str(), &output2));
+ EXPECT_EQ(kExpectedOutput2, output2);
+
+ // Zero current allocations is skipped.
+ std::ostringstream zero_input;
+ zero_input << " 0: 0 [ 1087: 98009] @ " << &kCategory << " "
+ << &kName;
+ std::string zero_output;
+ EXPECT_FALSE(AppendHeapProfileLineAsTraceFormat(zero_input.str().c_str(),
+ &zero_output));
+ EXPECT_EQ("", zero_output);
+}
+
+TEST_F(TraceMemoryTest, AppendHeapProfileAsTraceFormat) {
+ // Empty input gives empty output.
+ std::string empty_output;
+ AppendHeapProfileAsTraceFormat("", &empty_output);
+ EXPECT_EQ("", empty_output);
+
+ // Typical case.
+ const char input[] =
+ "heap profile: 357: 55227 [ 14653: 2624014] @ heapprofile\n"
+ " 95: 40940 [ 649: 114260] @\n"
+ " 77: 32546 [ 742: 106234] @ 0x0 0x0\n"
+ " 0: 0 [ 132: 4236] @ 0x0\n"
+ "\n"
+ "MAPPED_LIBRARIES:\n"
+ "1be411fc1000-1be4139e4000 rw-p 00000000 00:00 0\n"
+ "1be4139e4000-1be4139e5000 ---p 00000000 00:00 0\n";
+ const std::string kExpectedOutput =
+ "[{"
+ "\"current_allocs\": 357, "
+ "\"current_bytes\": 55227, "
+ "\"trace\": \"\"},\n"
+ "{\"current_allocs\": 95, "
+ "\"current_bytes\": 40940, "
+ "\"trace\": \"\"},\n"
+ "{\"current_allocs\": 77, "
+ "\"current_bytes\": 32546, "
+ "\"trace\": \"null \""
+ "}]\n";
+ std::string output;
+ AppendHeapProfileAsTraceFormat(input, &output);
+ EXPECT_EQ(kExpectedOutput, output);
+}
+
+TEST_F(TraceMemoryTest, StringFromHexAddress) {
+ EXPECT_STREQ("null", StringFromHexAddress("0x0"));
+ EXPECT_STREQ("error", StringFromHexAddress("not an address"));
+ const char kHello[] = "hello";
+ std::ostringstream hex_address;
+ hex_address << &kHello;
+ EXPECT_STREQ(kHello, StringFromHexAddress(hex_address.str()));
+}
+
+} // namespace debug
+} // namespace base
diff --git a/base/trace_event/trace_event_synthetic_delay.cc b/base/trace_event/trace_event_synthetic_delay.cc
new file mode 100644
index 0000000..3651611
--- /dev/null
+++ b/base/trace_event/trace_event_synthetic_delay.cc
@@ -0,0 +1,233 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/singleton.h"
+#include "base/trace_event/trace_event_synthetic_delay.h"
+
+namespace {
+const int kMaxSyntheticDelays = 32;
+} // namespace
+
+namespace base {
+namespace debug {
+
+TraceEventSyntheticDelayClock::TraceEventSyntheticDelayClock() {}
+TraceEventSyntheticDelayClock::~TraceEventSyntheticDelayClock() {}
+
+class TraceEventSyntheticDelayRegistry : public TraceEventSyntheticDelayClock {
+ public:
+ static TraceEventSyntheticDelayRegistry* GetInstance();
+
+ TraceEventSyntheticDelay* GetOrCreateDelay(const char* name);
+ void ResetAllDelays();
+
+ // TraceEventSyntheticDelayClock implementation.
+ base::TimeTicks Now() override;
+
+ private:
+ TraceEventSyntheticDelayRegistry();
+
+ friend struct DefaultSingletonTraits<TraceEventSyntheticDelayRegistry>;
+
+ Lock lock_;
+ TraceEventSyntheticDelay delays_[kMaxSyntheticDelays];
+ TraceEventSyntheticDelay dummy_delay_;
+ base::subtle::Atomic32 delay_count_;
+
+ DISALLOW_COPY_AND_ASSIGN(TraceEventSyntheticDelayRegistry);
+};
+
+TraceEventSyntheticDelay::TraceEventSyntheticDelay()
+ : mode_(STATIC), begin_count_(0), trigger_count_(0), clock_(NULL) {}
+
+TraceEventSyntheticDelay::~TraceEventSyntheticDelay() {}
+
+TraceEventSyntheticDelay* TraceEventSyntheticDelay::Lookup(
+ const std::string& name) {
+ return TraceEventSyntheticDelayRegistry::GetInstance()->GetOrCreateDelay(
+ name.c_str());
+}
+
+void TraceEventSyntheticDelay::Initialize(
+ const std::string& name,
+ TraceEventSyntheticDelayClock* clock) {
+ name_ = name;
+ clock_ = clock;
+}
+
+void TraceEventSyntheticDelay::SetTargetDuration(
+ base::TimeDelta target_duration) {
+ AutoLock lock(lock_);
+ target_duration_ = target_duration;
+ trigger_count_ = 0;
+ begin_count_ = 0;
+}
+
+void TraceEventSyntheticDelay::SetMode(Mode mode) {
+ AutoLock lock(lock_);
+ mode_ = mode;
+}
+
+void TraceEventSyntheticDelay::SetClock(TraceEventSyntheticDelayClock* clock) {
+ AutoLock lock(lock_);
+ clock_ = clock;
+}
+
+void TraceEventSyntheticDelay::Begin() {
+ // Note that we check for a non-zero target duration without locking to keep
+ // things quick for the common case when delays are disabled. Since the delay
+ // calculation is done with a lock held, it will always be correct. The only
+ // downside of this is that we may fail to apply some delays when the target
+ // duration changes.
+ ANNOTATE_BENIGN_RACE(&target_duration_, "Synthetic delay duration");
+ if (!target_duration_.ToInternalValue())
+ return;
+
+ base::TimeTicks start_time = clock_->Now();
+ {
+ AutoLock lock(lock_);
+ if (++begin_count_ != 1)
+ return;
+ end_time_ = CalculateEndTimeLocked(start_time);
+ }
+}
+
+void TraceEventSyntheticDelay::BeginParallel(base::TimeTicks* out_end_time) {
+ // See note in Begin().
+ ANNOTATE_BENIGN_RACE(&target_duration_, "Synthetic delay duration");
+ if (!target_duration_.ToInternalValue()) {
+ *out_end_time = base::TimeTicks();
+ return;
+ }
+
+ base::TimeTicks start_time = clock_->Now();
+ {
+ AutoLock lock(lock_);
+ *out_end_time = CalculateEndTimeLocked(start_time);
+ }
+}
+
+void TraceEventSyntheticDelay::End() {
+ // See note in Begin().
+ ANNOTATE_BENIGN_RACE(&target_duration_, "Synthetic delay duration");
+ if (!target_duration_.ToInternalValue())
+ return;
+
+ base::TimeTicks end_time;
+ {
+ AutoLock lock(lock_);
+ if (!begin_count_ || --begin_count_ != 0)
+ return;
+ end_time = end_time_;
+ }
+ if (!end_time.is_null())
+ ApplyDelay(end_time);
+}
+
+void TraceEventSyntheticDelay::EndParallel(base::TimeTicks end_time) {
+ if (!end_time.is_null())
+ ApplyDelay(end_time);
+}
+
+base::TimeTicks TraceEventSyntheticDelay::CalculateEndTimeLocked(
+ base::TimeTicks start_time) {
+ if (mode_ == ONE_SHOT && trigger_count_++)
+ return base::TimeTicks();
+ else if (mode_ == ALTERNATING && trigger_count_++ % 2)
+ return base::TimeTicks();
+ return start_time + target_duration_;
+}
+
+void TraceEventSyntheticDelay::ApplyDelay(base::TimeTicks end_time) {
+ TRACE_EVENT0("synthetic_delay", name_.c_str());
+ while (clock_->Now() < end_time) {
+ // Busy loop.
+ }
+}
+
+TraceEventSyntheticDelayRegistry*
+TraceEventSyntheticDelayRegistry::GetInstance() {
+ return Singleton<
+ TraceEventSyntheticDelayRegistry,
+ LeakySingletonTraits<TraceEventSyntheticDelayRegistry> >::get();
+}
+
+TraceEventSyntheticDelayRegistry::TraceEventSyntheticDelayRegistry()
+ : delay_count_(0) {}
+
+TraceEventSyntheticDelay* TraceEventSyntheticDelayRegistry::GetOrCreateDelay(
+ const char* name) {
+ // Try to find an existing delay first without locking to make the common case
+ // fast.
+ int delay_count = base::subtle::Acquire_Load(&delay_count_);
+ for (int i = 0; i < delay_count; ++i) {
+ if (!strcmp(name, delays_[i].name_.c_str()))
+ return &delays_[i];
+ }
+
+ AutoLock lock(lock_);
+ delay_count = base::subtle::Acquire_Load(&delay_count_);
+ for (int i = 0; i < delay_count; ++i) {
+ if (!strcmp(name, delays_[i].name_.c_str()))
+ return &delays_[i];
+ }
+
+ DCHECK(delay_count < kMaxSyntheticDelays)
+ << "must increase kMaxSyntheticDelays";
+ if (delay_count >= kMaxSyntheticDelays)
+ return &dummy_delay_;
+
+ delays_[delay_count].Initialize(std::string(name), this);
+ base::subtle::Release_Store(&delay_count_, delay_count + 1);
+ return &delays_[delay_count];
+}
+
+base::TimeTicks TraceEventSyntheticDelayRegistry::Now() {
+ return base::TimeTicks::Now();
+}
+
+void TraceEventSyntheticDelayRegistry::ResetAllDelays() {
+ AutoLock lock(lock_);
+ int delay_count = base::subtle::Acquire_Load(&delay_count_);
+ for (int i = 0; i < delay_count; ++i) {
+ delays_[i].SetTargetDuration(base::TimeDelta());
+ delays_[i].SetClock(this);
+ }
+}
+
+void ResetTraceEventSyntheticDelays() {
+ TraceEventSyntheticDelayRegistry::GetInstance()->ResetAllDelays();
+}
+
+} // namespace debug
+} // namespace base
+
+namespace trace_event_internal {
+
+ScopedSyntheticDelay::ScopedSyntheticDelay(const char* name,
+ base::subtle::AtomicWord* impl_ptr)
+ : delay_impl_(GetOrCreateDelay(name, impl_ptr)) {
+ delay_impl_->BeginParallel(&end_time_);
+}
+
+ScopedSyntheticDelay::~ScopedSyntheticDelay() {
+ delay_impl_->EndParallel(end_time_);
+}
+
+base::debug::TraceEventSyntheticDelay* GetOrCreateDelay(
+ const char* name,
+ base::subtle::AtomicWord* impl_ptr) {
+ base::debug::TraceEventSyntheticDelay* delay_impl =
+ reinterpret_cast<base::debug::TraceEventSyntheticDelay*>(
+ base::subtle::Acquire_Load(impl_ptr));
+ if (!delay_impl) {
+ delay_impl = base::debug::TraceEventSyntheticDelayRegistry::GetInstance()
+ ->GetOrCreateDelay(name);
+ base::subtle::Release_Store(
+ impl_ptr, reinterpret_cast<base::subtle::AtomicWord>(delay_impl));
+ }
+ return delay_impl;
+}
+
+} // namespace trace_event_internal
diff --git a/base/trace_event/trace_event_synthetic_delay.h b/base/trace_event/trace_event_synthetic_delay.h
new file mode 100644
index 0000000..b52f3b0
--- /dev/null
+++ b/base/trace_event/trace_event_synthetic_delay.h
@@ -0,0 +1,166 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// The synthetic delay framework makes it possible to dynamically inject
+// arbitrary delays into into different parts of the codebase. This can be used,
+// for instance, for testing various task scheduling algorithms.
+//
+// The delays are specified in terms of a target duration for a given block of
+// code. If the code executes faster than the duration, the thread is made to
+// sleep until the deadline is met.
+//
+// Code can be instrumented for delays with two sets of macros. First, for
+// delays that should apply within a scope, use the following macro:
+//
+// TRACE_EVENT_SYNTHETIC_DELAY("cc.LayerTreeHost.DrawAndSwap");
+//
+// For delaying operations that span multiple scopes, use:
+//
+// TRACE_EVENT_SYNTHETIC_DELAY_BEGIN("cc.Scheduler.BeginMainFrame");
+// ...
+// TRACE_EVENT_SYNTHETIC_DELAY_END("cc.Scheduler.BeginMainFrame");
+//
+// Here BEGIN establishes the start time for the delay and END executes the
+// delay based on the remaining time. If BEGIN is called multiple times in a
+// row, END should be called a corresponding number of times. Only the last
+// call to END will have an effect.
+//
+// Note that a single delay may begin on one thread and end on another. This
+// implies that a single delay cannot not be applied in several threads at once.
+
+#ifndef BASE_TRACE_EVENT_TRACE_EVENT_SYNTHETIC_DELAY_H_
+#define BASE_TRACE_EVENT_TRACE_EVENT_SYNTHETIC_DELAY_H_
+
+#include "base/atomicops.h"
+#include "base/synchronization/lock.h"
+#include "base/time/time.h"
+#include "base/trace_event/trace_event.h"
+
+// Apply a named delay in the current scope.
+#define TRACE_EVENT_SYNTHETIC_DELAY(name) \
+ static base::subtle::AtomicWord INTERNAL_TRACE_EVENT_UID(impl_ptr) = 0; \
+ trace_event_internal::ScopedSyntheticDelay INTERNAL_TRACE_EVENT_UID(delay)( \
+ name, &INTERNAL_TRACE_EVENT_UID(impl_ptr));
+
+// Begin a named delay, establishing its timing start point. May be called
+// multiple times as long as the calls to TRACE_EVENT_SYNTHETIC_DELAY_END are
+// balanced. Only the first call records the timing start point.
+#define TRACE_EVENT_SYNTHETIC_DELAY_BEGIN(name) \
+ do { \
+ static base::subtle::AtomicWord impl_ptr = 0; \
+ trace_event_internal::GetOrCreateDelay(name, &impl_ptr)->Begin(); \
+ } while (false)
+
+// End a named delay. The delay is applied only if this call matches the
+// first corresponding call to TRACE_EVENT_SYNTHETIC_DELAY_BEGIN with the
+// same delay.
+#define TRACE_EVENT_SYNTHETIC_DELAY_END(name) \
+ do { \
+ static base::subtle::AtomicWord impl_ptr = 0; \
+ trace_event_internal::GetOrCreateDelay(name, &impl_ptr)->End(); \
+ } while (false)
+
+template <typename Type>
+struct DefaultSingletonTraits;
+
+namespace base {
+namespace debug {
+
+// Time source for computing delay durations. Used for testing.
+class TRACE_EVENT_API_CLASS_EXPORT TraceEventSyntheticDelayClock {
+ public:
+ TraceEventSyntheticDelayClock();
+ virtual ~TraceEventSyntheticDelayClock();
+ virtual base::TimeTicks Now() = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TraceEventSyntheticDelayClock);
+};
+
+// Single delay point instance.
+class TRACE_EVENT_API_CLASS_EXPORT TraceEventSyntheticDelay {
+ public:
+ enum Mode {
+ STATIC, // Apply the configured delay every time.
+ ONE_SHOT, // Apply the configured delay just once.
+ ALTERNATING // Apply the configured delay every other time.
+ };
+
+ // Returns an existing named delay instance or creates a new one with |name|.
+ static TraceEventSyntheticDelay* Lookup(const std::string& name);
+
+ void SetTargetDuration(TimeDelta target_duration);
+ void SetMode(Mode mode);
+ void SetClock(TraceEventSyntheticDelayClock* clock);
+
+ // Begin the delay, establishing its timing start point. May be called
+ // multiple times as long as the calls to End() are balanced. Only the first
+ // call records the timing start point.
+ void Begin();
+
+ // End the delay. The delay is applied only if this call matches the first
+ // corresponding call to Begin() with the same delay.
+ void End();
+
+ // Begin a parallel instance of the delay. Several parallel instances may be
+ // active simultaneously and will complete independently. The computed end
+ // time for the delay is stored in |out_end_time|, which should later be
+ // passed to EndParallel().
+ void BeginParallel(base::TimeTicks* out_end_time);
+
+ // End a previously started parallel delay. |end_time| is the delay end point
+ // computed by BeginParallel().
+ void EndParallel(base::TimeTicks end_time);
+
+ private:
+ TraceEventSyntheticDelay();
+ ~TraceEventSyntheticDelay();
+ friend class TraceEventSyntheticDelayRegistry;
+
+ void Initialize(const std::string& name,
+ TraceEventSyntheticDelayClock* clock);
+ base::TimeTicks CalculateEndTimeLocked(base::TimeTicks start_time);
+ void ApplyDelay(base::TimeTicks end_time);
+
+ Lock lock_;
+ Mode mode_;
+ std::string name_;
+ int begin_count_;
+ int trigger_count_;
+ base::TimeTicks end_time_;
+ base::TimeDelta target_duration_;
+ TraceEventSyntheticDelayClock* clock_;
+
+ DISALLOW_COPY_AND_ASSIGN(TraceEventSyntheticDelay);
+};
+
+// Set the target durations of all registered synthetic delay points to zero.
+TRACE_EVENT_API_CLASS_EXPORT void ResetTraceEventSyntheticDelays();
+
+} // namespace debug
+} // namespace base
+
+namespace trace_event_internal {
+
+// Helper class for scoped delays. Do not use directly.
+class TRACE_EVENT_API_CLASS_EXPORT ScopedSyntheticDelay {
+ public:
+ explicit ScopedSyntheticDelay(const char* name,
+ base::subtle::AtomicWord* impl_ptr);
+ ~ScopedSyntheticDelay();
+
+ private:
+ base::debug::TraceEventSyntheticDelay* delay_impl_;
+ base::TimeTicks end_time_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedSyntheticDelay);
+};
+
+// Helper for registering delays. Do not use directly.
+TRACE_EVENT_API_CLASS_EXPORT base::debug::TraceEventSyntheticDelay*
+ GetOrCreateDelay(const char* name, base::subtle::AtomicWord* impl_ptr);
+
+} // namespace trace_event_internal
+
+#endif /* BASE_TRACE_EVENT_TRACE_EVENT_SYNTHETIC_DELAY_H_ */
diff --git a/base/trace_event/trace_event_synthetic_delay_unittest.cc b/base/trace_event/trace_event_synthetic_delay_unittest.cc
new file mode 100644
index 0000000..84ac75c
--- /dev/null
+++ b/base/trace_event/trace_event_synthetic_delay_unittest.cc
@@ -0,0 +1,154 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_event_synthetic_delay.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace debug {
+namespace {
+
+const int kTargetDurationMs = 100;
+// Allow some leeway in timings to make it possible to run these tests with a
+// wall clock time source too.
+const int kShortDurationMs = 10;
+
+} // namespace
+
+class TraceEventSyntheticDelayTest : public testing::Test,
+ public TraceEventSyntheticDelayClock {
+ public:
+ TraceEventSyntheticDelayTest() {}
+ ~TraceEventSyntheticDelayTest() override { ResetTraceEventSyntheticDelays(); }
+
+ // TraceEventSyntheticDelayClock implementation.
+ base::TimeTicks Now() override {
+ AdvanceTime(base::TimeDelta::FromMilliseconds(kShortDurationMs / 10));
+ return now_;
+ }
+
+ TraceEventSyntheticDelay* ConfigureDelay(const char* name) {
+ TraceEventSyntheticDelay* delay = TraceEventSyntheticDelay::Lookup(name);
+ delay->SetClock(this);
+ delay->SetTargetDuration(
+ base::TimeDelta::FromMilliseconds(kTargetDurationMs));
+ return delay;
+ }
+
+ void AdvanceTime(base::TimeDelta delta) { now_ += delta; }
+
+ int64 TestFunction() {
+ base::TimeTicks start = Now();
+ { TRACE_EVENT_SYNTHETIC_DELAY("test.Delay"); }
+ return (Now() - start).InMilliseconds();
+ }
+
+ int64 AsyncTestFunctionBegin() {
+ base::TimeTicks start = Now();
+ { TRACE_EVENT_SYNTHETIC_DELAY_BEGIN("test.AsyncDelay"); }
+ return (Now() - start).InMilliseconds();
+ }
+
+ int64 AsyncTestFunctionEnd() {
+ base::TimeTicks start = Now();
+ { TRACE_EVENT_SYNTHETIC_DELAY_END("test.AsyncDelay"); }
+ return (Now() - start).InMilliseconds();
+ }
+
+ private:
+ base::TimeTicks now_;
+
+ DISALLOW_COPY_AND_ASSIGN(TraceEventSyntheticDelayTest);
+};
+
+TEST_F(TraceEventSyntheticDelayTest, StaticDelay) {
+ TraceEventSyntheticDelay* delay = ConfigureDelay("test.Delay");
+ delay->SetMode(TraceEventSyntheticDelay::STATIC);
+ EXPECT_GE(TestFunction(), kTargetDurationMs);
+}
+
+TEST_F(TraceEventSyntheticDelayTest, OneShotDelay) {
+ TraceEventSyntheticDelay* delay = ConfigureDelay("test.Delay");
+ delay->SetMode(TraceEventSyntheticDelay::ONE_SHOT);
+ EXPECT_GE(TestFunction(), kTargetDurationMs);
+ EXPECT_LT(TestFunction(), kShortDurationMs);
+
+ delay->SetTargetDuration(
+ base::TimeDelta::FromMilliseconds(kTargetDurationMs));
+ EXPECT_GE(TestFunction(), kTargetDurationMs);
+}
+
+TEST_F(TraceEventSyntheticDelayTest, AlternatingDelay) {
+ TraceEventSyntheticDelay* delay = ConfigureDelay("test.Delay");
+ delay->SetMode(TraceEventSyntheticDelay::ALTERNATING);
+ EXPECT_GE(TestFunction(), kTargetDurationMs);
+ EXPECT_LT(TestFunction(), kShortDurationMs);
+ EXPECT_GE(TestFunction(), kTargetDurationMs);
+ EXPECT_LT(TestFunction(), kShortDurationMs);
+}
+
+TEST_F(TraceEventSyntheticDelayTest, AsyncDelay) {
+ ConfigureDelay("test.AsyncDelay");
+ EXPECT_LT(AsyncTestFunctionBegin(), kShortDurationMs);
+ EXPECT_GE(AsyncTestFunctionEnd(), kTargetDurationMs / 2);
+}
+
+TEST_F(TraceEventSyntheticDelayTest, AsyncDelayExceeded) {
+ ConfigureDelay("test.AsyncDelay");
+ EXPECT_LT(AsyncTestFunctionBegin(), kShortDurationMs);
+ AdvanceTime(base::TimeDelta::FromMilliseconds(kTargetDurationMs));
+ EXPECT_LT(AsyncTestFunctionEnd(), kShortDurationMs);
+}
+
+TEST_F(TraceEventSyntheticDelayTest, AsyncDelayNoActivation) {
+ ConfigureDelay("test.AsyncDelay");
+ EXPECT_LT(AsyncTestFunctionEnd(), kShortDurationMs);
+}
+
+TEST_F(TraceEventSyntheticDelayTest, AsyncDelayNested) {
+ ConfigureDelay("test.AsyncDelay");
+ EXPECT_LT(AsyncTestFunctionBegin(), kShortDurationMs);
+ EXPECT_LT(AsyncTestFunctionBegin(), kShortDurationMs);
+ EXPECT_LT(AsyncTestFunctionEnd(), kShortDurationMs);
+ EXPECT_GE(AsyncTestFunctionEnd(), kTargetDurationMs / 2);
+}
+
+TEST_F(TraceEventSyntheticDelayTest, AsyncDelayUnbalanced) {
+ ConfigureDelay("test.AsyncDelay");
+ EXPECT_LT(AsyncTestFunctionBegin(), kShortDurationMs);
+ EXPECT_GE(AsyncTestFunctionEnd(), kTargetDurationMs / 2);
+ EXPECT_LT(AsyncTestFunctionEnd(), kShortDurationMs);
+
+ EXPECT_LT(AsyncTestFunctionBegin(), kShortDurationMs);
+ EXPECT_GE(AsyncTestFunctionEnd(), kTargetDurationMs / 2);
+}
+
+TEST_F(TraceEventSyntheticDelayTest, ResetDelays) {
+ ConfigureDelay("test.Delay");
+ ResetTraceEventSyntheticDelays();
+ EXPECT_LT(TestFunction(), kShortDurationMs);
+}
+
+TEST_F(TraceEventSyntheticDelayTest, BeginParallel) {
+ TraceEventSyntheticDelay* delay = ConfigureDelay("test.AsyncDelay");
+ base::TimeTicks end_times[2];
+ base::TimeTicks start_time = Now();
+
+ delay->BeginParallel(&end_times[0]);
+ EXPECT_FALSE(end_times[0].is_null());
+
+ delay->BeginParallel(&end_times[1]);
+ EXPECT_FALSE(end_times[1].is_null());
+
+ delay->EndParallel(end_times[0]);
+ EXPECT_GE((Now() - start_time).InMilliseconds(), kTargetDurationMs);
+
+ start_time = Now();
+ delay->EndParallel(end_times[1]);
+ EXPECT_LT((Now() - start_time).InMilliseconds(), kShortDurationMs);
+}
+
+} // namespace debug
+} // namespace base
diff --git a/base/trace_event/trace_event_system_stats_monitor.cc b/base/trace_event/trace_event_system_stats_monitor.cc
new file mode 100644
index 0000000..6fa0174
--- /dev/null
+++ b/base/trace_event/trace_event_system_stats_monitor.cc
@@ -0,0 +1,133 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_event_system_stats_monitor.h"
+
+#include "base/debug/leak_annotations.h"
+#include "base/json/json_writer.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_util.h"
+#include "base/thread_task_runner_handle.h"
+#include "base/threading/thread_local_storage.h"
+#include "base/trace_event/trace_event.h"
+
+namespace base {
+namespace debug {
+
+namespace {
+
+/////////////////////////////////////////////////////////////////////////////
+// Holds profiled system stats until the tracing system needs to serialize it.
+class SystemStatsHolder : public base::debug::ConvertableToTraceFormat {
+ public:
+ SystemStatsHolder() { }
+
+ // Fills system_metrics_ with profiled system memory and disk stats.
+ // Uses the previous stats to compute rates if this is not the first profile.
+ void GetSystemProfilingStats();
+
+ // base::debug::ConvertableToTraceFormat overrides:
+ void AppendAsTraceFormat(std::string* out) const override {
+ AppendSystemProfileAsTraceFormat(system_stats_, out);
+ }
+
+ private:
+ ~SystemStatsHolder() override {}
+
+ SystemMetrics system_stats_;
+
+ DISALLOW_COPY_AND_ASSIGN(SystemStatsHolder);
+};
+
+void SystemStatsHolder::GetSystemProfilingStats() {
+ system_stats_ = SystemMetrics::Sample();
+}
+
+} // namespace
+
+//////////////////////////////////////////////////////////////////////////////
+
+TraceEventSystemStatsMonitor::TraceEventSystemStatsMonitor(
+ scoped_refptr<SingleThreadTaskRunner> task_runner)
+ : task_runner_(task_runner),
+ weak_factory_(this) {
+ // Force the "system_stats" category to show up in the trace viewer.
+ TraceLog::GetCategoryGroupEnabled(TRACE_DISABLED_BY_DEFAULT("system_stats"));
+
+ // Allow this to be instantiated on unsupported platforms, but don't run.
+ TraceLog::GetInstance()->AddEnabledStateObserver(this);
+}
+
+TraceEventSystemStatsMonitor::~TraceEventSystemStatsMonitor() {
+ if (dump_timer_.IsRunning())
+ StopProfiling();
+ TraceLog::GetInstance()->RemoveEnabledStateObserver(this);
+}
+
+void TraceEventSystemStatsMonitor::OnTraceLogEnabled() {
+ // Check to see if system tracing is enabled.
+ bool enabled;
+
+ TRACE_EVENT_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT(
+ "system_stats"), &enabled);
+ if (!enabled)
+ return;
+ task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&TraceEventSystemStatsMonitor::StartProfiling,
+ weak_factory_.GetWeakPtr()));
+}
+
+void TraceEventSystemStatsMonitor::OnTraceLogDisabled() {
+ task_runner_->PostTask(
+ FROM_HERE,
+ base::Bind(&TraceEventSystemStatsMonitor::StopProfiling,
+ weak_factory_.GetWeakPtr()));
+}
+
+void TraceEventSystemStatsMonitor::StartProfiling() {
+ // Watch for the tracing framework sending enabling more than once.
+ if (dump_timer_.IsRunning())
+ return;
+
+ dump_timer_.Start(FROM_HERE,
+ TimeDelta::FromMilliseconds(TraceEventSystemStatsMonitor::
+ kSamplingIntervalMilliseconds),
+ base::Bind(&TraceEventSystemStatsMonitor::
+ DumpSystemStats,
+ weak_factory_.GetWeakPtr()));
+}
+
+// If system tracing is enabled, dumps a profile to the tracing system.
+void TraceEventSystemStatsMonitor::DumpSystemStats() {
+ scoped_refptr<SystemStatsHolder> dump_holder = new SystemStatsHolder();
+ dump_holder->GetSystemProfilingStats();
+
+ TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
+ TRACE_DISABLED_BY_DEFAULT("system_stats"),
+ "base::TraceEventSystemStatsMonitor::SystemStats",
+ this,
+ scoped_refptr<ConvertableToTraceFormat>(dump_holder));
+}
+
+void TraceEventSystemStatsMonitor::StopProfiling() {
+ dump_timer_.Stop();
+}
+
+bool TraceEventSystemStatsMonitor::IsTimerRunningForTest() const {
+ return dump_timer_.IsRunning();
+}
+
+void AppendSystemProfileAsTraceFormat(const SystemMetrics& system_metrics,
+ std::string* output) {
+ std::string tmp;
+ base::JSONWriter::Write(system_metrics.ToValue().get(), &tmp);
+ *output += tmp;
+}
+
+} // namespace debug
+} // namespace base
diff --git a/base/trace_event/trace_event_system_stats_monitor.h b/base/trace_event/trace_event_system_stats_monitor.h
new file mode 100644
index 0000000..08fbfea
--- /dev/null
+++ b/base/trace_event/trace_event_system_stats_monitor.h
@@ -0,0 +1,75 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_EVENT_SYSTEM_STATS_MONITOR_H_
+#define BASE_TRACE_EVENT_TRACE_EVENT_SYSTEM_STATS_MONITOR_H_
+
+#include "base/base_export.h"
+#include "base/gtest_prod_util.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/weak_ptr.h"
+#include "base/process/process_metrics.h"
+#include "base/timer/timer.h"
+#include "base/trace_event/trace_event_impl.h"
+
+namespace base {
+
+class SingleThreadTaskRunner;
+
+namespace debug {
+
+// Watches for chrome://tracing to be enabled or disabled. When tracing is
+// enabled, also enables system events profiling. This class is the preferred
+// way to turn system tracing on and off.
+class BASE_EXPORT TraceEventSystemStatsMonitor
+ : public TraceLog::EnabledStateObserver {
+ public:
+ // Length of time interval between stat profiles.
+ static const int kSamplingIntervalMilliseconds = 2000;
+
+ // |task_runner| must be the primary thread for the client
+ // process, e.g. the UI thread in a browser.
+ explicit TraceEventSystemStatsMonitor(
+ scoped_refptr<SingleThreadTaskRunner> task_runner);
+
+ virtual ~TraceEventSystemStatsMonitor();
+
+ // base::debug::TraceLog::EnabledStateChangedObserver overrides:
+ void OnTraceLogEnabled() override;
+ void OnTraceLogDisabled() override;
+
+ // Retrieves system profiling at the current time.
+ void DumpSystemStats();
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(TraceSystemStatsMonitorTest,
+ TraceEventSystemStatsMonitor);
+
+ bool IsTimerRunningForTest() const;
+
+ void StartProfiling();
+
+ void StopProfiling();
+
+ // Ensures the observer starts and stops tracing on the primary thread.
+ scoped_refptr<SingleThreadTaskRunner> task_runner_;
+
+ // Timer to schedule system profile dumps.
+ RepeatingTimer<TraceEventSystemStatsMonitor> dump_timer_;
+
+ WeakPtrFactory<TraceEventSystemStatsMonitor> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(TraceEventSystemStatsMonitor);
+};
+
+// Converts system memory profiling stats in |input| to
+// trace event compatible JSON and appends to |output|. Visible for testing.
+BASE_EXPORT void AppendSystemProfileAsTraceFormat(const SystemMetrics&
+ system_stats,
+ std::string* output);
+
+} // namespace debug
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_TRACE_EVENT_SYSTEM_STATS_MONITOR_H_
diff --git a/base/trace_event/trace_event_system_stats_monitor_unittest.cc b/base/trace_event/trace_event_system_stats_monitor_unittest.cc
new file mode 100644
index 0000000..143ac4a
--- /dev/null
+++ b/base/trace_event/trace_event_system_stats_monitor_unittest.cc
@@ -0,0 +1,66 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_event_system_stats_monitor.h"
+
+#include <sstream>
+#include <string>
+
+#include "base/message_loop/message_loop.h"
+#include "base/trace_event/trace_event_impl.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace debug {
+
+#if !defined(OS_IOS)
+// Tests for the system stats monitor.
+// Exists as a class so it can be a friend of TraceEventSystemStatsMonitor.
+class TraceSystemStatsMonitorTest : public testing::Test {
+ public:
+ TraceSystemStatsMonitorTest() {}
+ ~TraceSystemStatsMonitorTest() override {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TraceSystemStatsMonitorTest);
+};
+
+//////////////////////////////////////////////////////////////////////////////
+
+TEST_F(TraceSystemStatsMonitorTest, TraceEventSystemStatsMonitor) {
+ MessageLoop message_loop;
+
+ // Start with no observers of the TraceLog.
+ EXPECT_EQ(0u, TraceLog::GetInstance()->GetObserverCountForTest());
+
+ // Creating a system stats monitor adds it to the TraceLog observer list.
+ scoped_ptr<TraceEventSystemStatsMonitor> system_stats_monitor(
+ new TraceEventSystemStatsMonitor(
+ message_loop.message_loop_proxy()));
+ EXPECT_EQ(1u, TraceLog::GetInstance()->GetObserverCountForTest());
+ EXPECT_TRUE(
+ TraceLog::GetInstance()->HasEnabledStateObserver(
+ system_stats_monitor.get()));
+
+ // By default the observer isn't dumping memory profiles.
+ EXPECT_FALSE(system_stats_monitor->IsTimerRunningForTest());
+
+ // Simulate enabling tracing.
+ system_stats_monitor->StartProfiling();
+ message_loop.RunUntilIdle();
+ EXPECT_TRUE(system_stats_monitor->IsTimerRunningForTest());
+
+ // Simulate disabling tracing.
+ system_stats_monitor->StopProfiling();
+ message_loop.RunUntilIdle();
+ EXPECT_FALSE(system_stats_monitor->IsTimerRunningForTest());
+
+ // Deleting the observer removes it from the TraceLog observer list.
+ system_stats_monitor.reset();
+ EXPECT_EQ(0u, TraceLog::GetInstance()->GetObserverCountForTest());
+}
+#endif // !defined(OS_IOS)
+
+} // namespace debug
+} // namespace base
diff --git a/base/trace_event/trace_event_unittest.cc b/base/trace_event/trace_event_unittest.cc
new file mode 100644
index 0000000..47b5ab6
--- /dev/null
+++ b/base/trace_event/trace_event_unittest.cc
@@ -0,0 +1,3084 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <math.h>
+#include <cstdlib>
+
+#include "base/bind.h"
+#include "base/command_line.h"
+#include "base/json/json_reader.h"
+#include "base/json/json_writer.h"
+#include "base/memory/ref_counted_memory.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/singleton.h"
+#include "base/process/process_handle.h"
+#include "base/strings/stringprintf.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread.h"
+#include "base/time/time.h"
+#include "base/trace_event/trace_event.h"
+#include "base/trace_event/trace_event_synthetic_delay.h"
+#include "base/values.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace debug {
+
+namespace {
+
+enum CompareOp {
+ IS_EQUAL,
+ IS_NOT_EQUAL,
+};
+
+struct JsonKeyValue {
+ const char* key;
+ const char* value;
+ CompareOp op;
+};
+
+const int kThreadId = 42;
+const int kAsyncId = 5;
+const char kAsyncIdStr[] = "0x5";
+const int kAsyncId2 = 6;
+const char kAsyncId2Str[] = "0x6";
+
+class TraceEventTestFixture : public testing::Test {
+ public:
+ void OnTraceDataCollected(
+ WaitableEvent* flush_complete_event,
+ const scoped_refptr<base::RefCountedString>& events_str,
+ bool has_more_events);
+ void OnWatchEventMatched() {
+ ++event_watch_notification_;
+ }
+ DictionaryValue* FindMatchingTraceEntry(const JsonKeyValue* key_values);
+ DictionaryValue* FindNamePhase(const char* name, const char* phase);
+ DictionaryValue* FindNamePhaseKeyValue(const char* name,
+ const char* phase,
+ const char* key,
+ const char* value);
+ void DropTracedMetadataRecords();
+ bool FindMatchingValue(const char* key,
+ const char* value);
+ bool FindNonMatchingValue(const char* key,
+ const char* value);
+ void Clear() {
+ trace_parsed_.Clear();
+ json_output_.json_output.clear();
+ }
+
+ void BeginTrace() {
+ BeginSpecificTrace("*");
+ }
+
+ void BeginSpecificTrace(const std::string& filter) {
+ event_watch_notification_ = 0;
+ TraceLog::GetInstance()->SetEnabled(
+ CategoryFilter(filter),
+ TraceLog::RECORDING_MODE,
+ TraceOptions());
+ }
+
+ void EndTraceAndFlush() {
+ WaitableEvent flush_complete_event(false, false);
+ EndTraceAndFlushAsync(&flush_complete_event);
+ flush_complete_event.Wait();
+ }
+
+ // Used when testing thread-local buffers which requires the thread initiating
+ // flush to have a message loop.
+ void EndTraceAndFlushInThreadWithMessageLoop() {
+ WaitableEvent flush_complete_event(false, false);
+ Thread flush_thread("flush");
+ flush_thread.Start();
+ flush_thread.message_loop()->PostTask(FROM_HERE,
+ base::Bind(&TraceEventTestFixture::EndTraceAndFlushAsync,
+ base::Unretained(this),
+ &flush_complete_event));
+ flush_complete_event.Wait();
+ }
+
+ void EndTraceAndFlushAsync(WaitableEvent* flush_complete_event) {
+ TraceLog::GetInstance()->SetDisabled();
+ TraceLog::GetInstance()->Flush(
+ base::Bind(&TraceEventTestFixture::OnTraceDataCollected,
+ base::Unretained(static_cast<TraceEventTestFixture*>(this)),
+ base::Unretained(flush_complete_event)));
+ }
+
+ void FlushMonitoring() {
+ WaitableEvent flush_complete_event(false, false);
+ FlushMonitoring(&flush_complete_event);
+ flush_complete_event.Wait();
+ }
+
+ void FlushMonitoring(WaitableEvent* flush_complete_event) {
+ TraceLog::GetInstance()->FlushButLeaveBufferIntact(
+ base::Bind(&TraceEventTestFixture::OnTraceDataCollected,
+ base::Unretained(static_cast<TraceEventTestFixture*>(this)),
+ base::Unretained(flush_complete_event)));
+ }
+
+ void SetUp() override {
+ const char* name = PlatformThread::GetName();
+ old_thread_name_ = name ? strdup(name) : NULL;
+
+ TraceLog::DeleteForTesting();
+ TraceLog* tracelog = TraceLog::GetInstance();
+ ASSERT_TRUE(tracelog);
+ ASSERT_FALSE(tracelog->IsEnabled());
+ trace_buffer_.SetOutputCallback(json_output_.GetCallback());
+ event_watch_notification_ = 0;
+ }
+ void TearDown() override {
+ if (TraceLog::GetInstance())
+ EXPECT_FALSE(TraceLog::GetInstance()->IsEnabled());
+ PlatformThread::SetName(old_thread_name_ ? old_thread_name_ : "");
+ free(old_thread_name_);
+ old_thread_name_ = NULL;
+ // We want our singleton torn down after each test.
+ TraceLog::DeleteForTesting();
+ }
+
+ char* old_thread_name_;
+ ListValue trace_parsed_;
+ TraceResultBuffer trace_buffer_;
+ TraceResultBuffer::SimpleOutput json_output_;
+ int event_watch_notification_;
+
+ private:
+ // We want our singleton torn down after each test.
+ ShadowingAtExitManager at_exit_manager_;
+ Lock lock_;
+};
+
+void TraceEventTestFixture::OnTraceDataCollected(
+ WaitableEvent* flush_complete_event,
+ const scoped_refptr<base::RefCountedString>& events_str,
+ bool has_more_events) {
+ AutoLock lock(lock_);
+ json_output_.json_output.clear();
+ trace_buffer_.Start();
+ trace_buffer_.AddFragment(events_str->data());
+ trace_buffer_.Finish();
+
+ scoped_ptr<Value> root;
+ root.reset(base::JSONReader::Read(json_output_.json_output,
+ JSON_PARSE_RFC | JSON_DETACHABLE_CHILDREN));
+
+ if (!root.get()) {
+ LOG(ERROR) << json_output_.json_output;
+ }
+
+ ListValue* root_list = NULL;
+ ASSERT_TRUE(root.get());
+ ASSERT_TRUE(root->GetAsList(&root_list));
+
+ // Move items into our aggregate collection
+ while (root_list->GetSize()) {
+ scoped_ptr<Value> item;
+ root_list->Remove(0, &item);
+ trace_parsed_.Append(item.release());
+ }
+
+ if (!has_more_events)
+ flush_complete_event->Signal();
+}
+
+static bool CompareJsonValues(const std::string& lhs,
+ const std::string& rhs,
+ CompareOp op) {
+ switch (op) {
+ case IS_EQUAL:
+ return lhs == rhs;
+ case IS_NOT_EQUAL:
+ return lhs != rhs;
+ default:
+ CHECK(0);
+ }
+ return false;
+}
+
+static bool IsKeyValueInDict(const JsonKeyValue* key_value,
+ DictionaryValue* dict) {
+ Value* value = NULL;
+ std::string value_str;
+ if (dict->Get(key_value->key, &value) &&
+ value->GetAsString(&value_str) &&
+ CompareJsonValues(value_str, key_value->value, key_value->op))
+ return true;
+
+ // Recurse to test arguments
+ DictionaryValue* args_dict = NULL;
+ dict->GetDictionary("args", &args_dict);
+ if (args_dict)
+ return IsKeyValueInDict(key_value, args_dict);
+
+ return false;
+}
+
+static bool IsAllKeyValueInDict(const JsonKeyValue* key_values,
+ DictionaryValue* dict) {
+ // Scan all key_values, they must all be present and equal.
+ while (key_values && key_values->key) {
+ if (!IsKeyValueInDict(key_values, dict))
+ return false;
+ ++key_values;
+ }
+ return true;
+}
+
+DictionaryValue* TraceEventTestFixture::FindMatchingTraceEntry(
+ const JsonKeyValue* key_values) {
+ // Scan all items
+ size_t trace_parsed_count = trace_parsed_.GetSize();
+ for (size_t i = 0; i < trace_parsed_count; i++) {
+ Value* value = NULL;
+ trace_parsed_.Get(i, &value);
+ if (!value || value->GetType() != Value::TYPE_DICTIONARY)
+ continue;
+ DictionaryValue* dict = static_cast<DictionaryValue*>(value);
+
+ if (IsAllKeyValueInDict(key_values, dict))
+ return dict;
+ }
+ return NULL;
+}
+
+void TraceEventTestFixture::DropTracedMetadataRecords() {
+ scoped_ptr<ListValue> old_trace_parsed(trace_parsed_.DeepCopy());
+ size_t old_trace_parsed_size = old_trace_parsed->GetSize();
+ trace_parsed_.Clear();
+
+ for (size_t i = 0; i < old_trace_parsed_size; i++) {
+ Value* value = NULL;
+ old_trace_parsed->Get(i, &value);
+ if (!value || value->GetType() != Value::TYPE_DICTIONARY) {
+ trace_parsed_.Append(value->DeepCopy());
+ continue;
+ }
+ DictionaryValue* dict = static_cast<DictionaryValue*>(value);
+ std::string tmp;
+ if (dict->GetString("ph", &tmp) && tmp == "M")
+ continue;
+
+ trace_parsed_.Append(value->DeepCopy());
+ }
+}
+
+DictionaryValue* TraceEventTestFixture::FindNamePhase(const char* name,
+ const char* phase) {
+ JsonKeyValue key_values[] = {
+ {"name", name, IS_EQUAL},
+ {"ph", phase, IS_EQUAL},
+ {0, 0, IS_EQUAL}
+ };
+ return FindMatchingTraceEntry(key_values);
+}
+
+DictionaryValue* TraceEventTestFixture::FindNamePhaseKeyValue(
+ const char* name,
+ const char* phase,
+ const char* key,
+ const char* value) {
+ JsonKeyValue key_values[] = {
+ {"name", name, IS_EQUAL},
+ {"ph", phase, IS_EQUAL},
+ {key, value, IS_EQUAL},
+ {0, 0, IS_EQUAL}
+ };
+ return FindMatchingTraceEntry(key_values);
+}
+
+bool TraceEventTestFixture::FindMatchingValue(const char* key,
+ const char* value) {
+ JsonKeyValue key_values[] = {
+ {key, value, IS_EQUAL},
+ {0, 0, IS_EQUAL}
+ };
+ return FindMatchingTraceEntry(key_values);
+}
+
+bool TraceEventTestFixture::FindNonMatchingValue(const char* key,
+ const char* value) {
+ JsonKeyValue key_values[] = {
+ {key, value, IS_NOT_EQUAL},
+ {0, 0, IS_EQUAL}
+ };
+ return FindMatchingTraceEntry(key_values);
+}
+
+bool IsStringInDict(const char* string_to_match, const DictionaryValue* dict) {
+ for (DictionaryValue::Iterator it(*dict); !it.IsAtEnd(); it.Advance()) {
+ if (it.key().find(string_to_match) != std::string::npos)
+ return true;
+
+ std::string value_str;
+ it.value().GetAsString(&value_str);
+ if (value_str.find(string_to_match) != std::string::npos)
+ return true;
+ }
+
+ // Recurse to test arguments
+ const DictionaryValue* args_dict = NULL;
+ dict->GetDictionary("args", &args_dict);
+ if (args_dict)
+ return IsStringInDict(string_to_match, args_dict);
+
+ return false;
+}
+
+const DictionaryValue* FindTraceEntry(
+ const ListValue& trace_parsed,
+ const char* string_to_match,
+ const DictionaryValue* match_after_this_item = NULL) {
+ // Scan all items
+ size_t trace_parsed_count = trace_parsed.GetSize();
+ for (size_t i = 0; i < trace_parsed_count; i++) {
+ const Value* value = NULL;
+ trace_parsed.Get(i, &value);
+ if (match_after_this_item) {
+ if (value == match_after_this_item)
+ match_after_this_item = NULL;
+ continue;
+ }
+ if (!value || value->GetType() != Value::TYPE_DICTIONARY)
+ continue;
+ const DictionaryValue* dict = static_cast<const DictionaryValue*>(value);
+
+ if (IsStringInDict(string_to_match, dict))
+ return dict;
+ }
+ return NULL;
+}
+
+std::vector<const DictionaryValue*> FindTraceEntries(
+ const ListValue& trace_parsed,
+ const char* string_to_match) {
+ std::vector<const DictionaryValue*> hits;
+ size_t trace_parsed_count = trace_parsed.GetSize();
+ for (size_t i = 0; i < trace_parsed_count; i++) {
+ const Value* value = NULL;
+ trace_parsed.Get(i, &value);
+ if (!value || value->GetType() != Value::TYPE_DICTIONARY)
+ continue;
+ const DictionaryValue* dict = static_cast<const DictionaryValue*>(value);
+
+ if (IsStringInDict(string_to_match, dict))
+ hits.push_back(dict);
+ }
+ return hits;
+}
+
+const char kControlCharacters[] = "\001\002\003\n\r";
+
+void TraceWithAllMacroVariants(WaitableEvent* task_complete_event) {
+ {
+ TRACE_EVENT_BEGIN_ETW("TRACE_EVENT_BEGIN_ETW call", 0x1122, "extrastring1");
+ TRACE_EVENT_END_ETW("TRACE_EVENT_END_ETW call", 0x3344, "extrastring2");
+ TRACE_EVENT_INSTANT_ETW("TRACE_EVENT_INSTANT_ETW call",
+ 0x5566, "extrastring3");
+
+ TRACE_EVENT0("all", "TRACE_EVENT0 call");
+ TRACE_EVENT1("all", "TRACE_EVENT1 call", "name1", "value1");
+ TRACE_EVENT2("all", "TRACE_EVENT2 call",
+ "name1", "\"value1\"",
+ "name2", "value\\2");
+
+ TRACE_EVENT_INSTANT0("all", "TRACE_EVENT_INSTANT0 call",
+ TRACE_EVENT_SCOPE_GLOBAL);
+ TRACE_EVENT_INSTANT1("all", "TRACE_EVENT_INSTANT1 call",
+ TRACE_EVENT_SCOPE_PROCESS, "name1", "value1");
+ TRACE_EVENT_INSTANT2("all", "TRACE_EVENT_INSTANT2 call",
+ TRACE_EVENT_SCOPE_THREAD,
+ "name1", "value1",
+ "name2", "value2");
+
+ TRACE_EVENT_BEGIN0("all", "TRACE_EVENT_BEGIN0 call");
+ TRACE_EVENT_BEGIN1("all", "TRACE_EVENT_BEGIN1 call", "name1", "value1");
+ TRACE_EVENT_BEGIN2("all", "TRACE_EVENT_BEGIN2 call",
+ "name1", "value1",
+ "name2", "value2");
+
+ TRACE_EVENT_END0("all", "TRACE_EVENT_END0 call");
+ TRACE_EVENT_END1("all", "TRACE_EVENT_END1 call", "name1", "value1");
+ TRACE_EVENT_END2("all", "TRACE_EVENT_END2 call",
+ "name1", "value1",
+ "name2", "value2");
+
+ TRACE_EVENT_ASYNC_BEGIN0("all", "TRACE_EVENT_ASYNC_BEGIN0 call", kAsyncId);
+ TRACE_EVENT_ASYNC_BEGIN1("all", "TRACE_EVENT_ASYNC_BEGIN1 call", kAsyncId,
+ "name1", "value1");
+ TRACE_EVENT_ASYNC_BEGIN2("all", "TRACE_EVENT_ASYNC_BEGIN2 call", kAsyncId,
+ "name1", "value1",
+ "name2", "value2");
+
+ TRACE_EVENT_ASYNC_STEP_INTO0("all", "TRACE_EVENT_ASYNC_STEP_INTO0 call",
+ kAsyncId, "step_begin1");
+ TRACE_EVENT_ASYNC_STEP_INTO1("all", "TRACE_EVENT_ASYNC_STEP_INTO1 call",
+ kAsyncId, "step_begin2", "name1", "value1");
+
+ TRACE_EVENT_ASYNC_END0("all", "TRACE_EVENT_ASYNC_END0 call", kAsyncId);
+ TRACE_EVENT_ASYNC_END1("all", "TRACE_EVENT_ASYNC_END1 call", kAsyncId,
+ "name1", "value1");
+ TRACE_EVENT_ASYNC_END2("all", "TRACE_EVENT_ASYNC_END2 call", kAsyncId,
+ "name1", "value1",
+ "name2", "value2");
+
+ TRACE_EVENT_BEGIN_ETW("TRACE_EVENT_BEGIN_ETW0 call", kAsyncId, NULL);
+ TRACE_EVENT_BEGIN_ETW("TRACE_EVENT_BEGIN_ETW1 call", kAsyncId, "value");
+ TRACE_EVENT_END_ETW("TRACE_EVENT_END_ETW0 call", kAsyncId, NULL);
+ TRACE_EVENT_END_ETW("TRACE_EVENT_END_ETW1 call", kAsyncId, "value");
+ TRACE_EVENT_INSTANT_ETW("TRACE_EVENT_INSTANT_ETW0 call", kAsyncId, NULL);
+ TRACE_EVENT_INSTANT_ETW("TRACE_EVENT_INSTANT_ETW1 call", kAsyncId, "value");
+
+ TRACE_COUNTER1("all", "TRACE_COUNTER1 call", 31415);
+ TRACE_COUNTER2("all", "TRACE_COUNTER2 call",
+ "a", 30000,
+ "b", 1415);
+
+ TRACE_COUNTER_ID1("all", "TRACE_COUNTER_ID1 call", 0x319009, 31415);
+ TRACE_COUNTER_ID2("all", "TRACE_COUNTER_ID2 call", 0x319009,
+ "a", 30000, "b", 1415);
+
+ TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP0("all",
+ "TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP0 call",
+ kAsyncId, kThreadId, 12345);
+ TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP0("all",
+ "TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP0 call",
+ kAsyncId, kThreadId, 23456);
+
+ TRACE_EVENT_BEGIN_WITH_ID_TID_AND_TIMESTAMP0("all",
+ "TRACE_EVENT_BEGIN_WITH_ID_TID_AND_TIMESTAMP0 call",
+ kAsyncId2, kThreadId, 34567);
+ TRACE_EVENT_ASYNC_STEP_PAST0("all", "TRACE_EVENT_ASYNC_STEP_PAST0 call",
+ kAsyncId2, "step_end1");
+ TRACE_EVENT_ASYNC_STEP_PAST1("all", "TRACE_EVENT_ASYNC_STEP_PAST1 call",
+ kAsyncId2, "step_end2", "name1", "value1");
+
+ TRACE_EVENT_END_WITH_ID_TID_AND_TIMESTAMP0("all",
+ "TRACE_EVENT_END_WITH_ID_TID_AND_TIMESTAMP0 call",
+ kAsyncId2, kThreadId, 45678);
+
+ TRACE_EVENT_OBJECT_CREATED_WITH_ID("all", "tracked object 1", 0x42);
+ TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
+ "all", "tracked object 1", 0x42, "hello");
+ TRACE_EVENT_OBJECT_DELETED_WITH_ID("all", "tracked object 1", 0x42);
+
+ TraceScopedTrackableObject<int> trackable("all", "tracked object 2",
+ 0x2128506);
+ trackable.snapshot("world");
+
+ TRACE_EVENT1(kControlCharacters, kControlCharacters,
+ kControlCharacters, kControlCharacters);
+ } // Scope close causes TRACE_EVENT0 etc to send their END events.
+
+ if (task_complete_event)
+ task_complete_event->Signal();
+}
+
+void ValidateAllTraceMacrosCreatedData(const ListValue& trace_parsed) {
+ const DictionaryValue* item = NULL;
+
+#define EXPECT_FIND_(string) \
+ item = FindTraceEntry(trace_parsed, string); \
+ EXPECT_TRUE(item);
+#define EXPECT_NOT_FIND_(string) \
+ item = FindTraceEntry(trace_parsed, string); \
+ EXPECT_FALSE(item);
+#define EXPECT_SUB_FIND_(string) \
+ if (item) \
+ EXPECT_TRUE(IsStringInDict(string, item));
+
+ EXPECT_FIND_("ETW Trace Event");
+ EXPECT_FIND_("all");
+ EXPECT_FIND_("TRACE_EVENT_BEGIN_ETW call");
+ {
+ std::string str_val;
+ EXPECT_TRUE(item && item->GetString("args.id", &str_val));
+ EXPECT_STREQ("0x1122", str_val.c_str());
+ }
+ EXPECT_SUB_FIND_("extrastring1");
+ EXPECT_FIND_("TRACE_EVENT_END_ETW call");
+ EXPECT_FIND_("TRACE_EVENT_INSTANT_ETW call");
+ EXPECT_FIND_("TRACE_EVENT0 call");
+ {
+ std::string ph;
+ std::string ph_end;
+ EXPECT_TRUE((item = FindTraceEntry(trace_parsed, "TRACE_EVENT0 call")));
+ EXPECT_TRUE((item && item->GetString("ph", &ph)));
+ EXPECT_EQ("X", ph);
+ item = FindTraceEntry(trace_parsed, "TRACE_EVENT0 call", item);
+ EXPECT_FALSE(item);
+ }
+ EXPECT_FIND_("TRACE_EVENT1 call");
+ EXPECT_SUB_FIND_("name1");
+ EXPECT_SUB_FIND_("value1");
+ EXPECT_FIND_("TRACE_EVENT2 call");
+ EXPECT_SUB_FIND_("name1");
+ EXPECT_SUB_FIND_("\"value1\"");
+ EXPECT_SUB_FIND_("name2");
+ EXPECT_SUB_FIND_("value\\2");
+
+ EXPECT_FIND_("TRACE_EVENT_INSTANT0 call");
+ {
+ std::string scope;
+ EXPECT_TRUE((item && item->GetString("s", &scope)));
+ EXPECT_EQ("g", scope);
+ }
+ EXPECT_FIND_("TRACE_EVENT_INSTANT1 call");
+ {
+ std::string scope;
+ EXPECT_TRUE((item && item->GetString("s", &scope)));
+ EXPECT_EQ("p", scope);
+ }
+ EXPECT_SUB_FIND_("name1");
+ EXPECT_SUB_FIND_("value1");
+ EXPECT_FIND_("TRACE_EVENT_INSTANT2 call");
+ {
+ std::string scope;
+ EXPECT_TRUE((item && item->GetString("s", &scope)));
+ EXPECT_EQ("t", scope);
+ }
+ EXPECT_SUB_FIND_("name1");
+ EXPECT_SUB_FIND_("value1");
+ EXPECT_SUB_FIND_("name2");
+ EXPECT_SUB_FIND_("value2");
+
+ EXPECT_FIND_("TRACE_EVENT_BEGIN0 call");
+ EXPECT_FIND_("TRACE_EVENT_BEGIN1 call");
+ EXPECT_SUB_FIND_("name1");
+ EXPECT_SUB_FIND_("value1");
+ EXPECT_FIND_("TRACE_EVENT_BEGIN2 call");
+ EXPECT_SUB_FIND_("name1");
+ EXPECT_SUB_FIND_("value1");
+ EXPECT_SUB_FIND_("name2");
+ EXPECT_SUB_FIND_("value2");
+
+ EXPECT_FIND_("TRACE_EVENT_END0 call");
+ EXPECT_FIND_("TRACE_EVENT_END1 call");
+ EXPECT_SUB_FIND_("name1");
+ EXPECT_SUB_FIND_("value1");
+ EXPECT_FIND_("TRACE_EVENT_END2 call");
+ EXPECT_SUB_FIND_("name1");
+ EXPECT_SUB_FIND_("value1");
+ EXPECT_SUB_FIND_("name2");
+ EXPECT_SUB_FIND_("value2");
+
+ EXPECT_FIND_("TRACE_EVENT_ASYNC_BEGIN0 call");
+ EXPECT_SUB_FIND_("id");
+ EXPECT_SUB_FIND_(kAsyncIdStr);
+ EXPECT_FIND_("TRACE_EVENT_ASYNC_BEGIN1 call");
+ EXPECT_SUB_FIND_("id");
+ EXPECT_SUB_FIND_(kAsyncIdStr);
+ EXPECT_SUB_FIND_("name1");
+ EXPECT_SUB_FIND_("value1");
+ EXPECT_FIND_("TRACE_EVENT_ASYNC_BEGIN2 call");
+ EXPECT_SUB_FIND_("id");
+ EXPECT_SUB_FIND_(kAsyncIdStr);
+ EXPECT_SUB_FIND_("name1");
+ EXPECT_SUB_FIND_("value1");
+ EXPECT_SUB_FIND_("name2");
+ EXPECT_SUB_FIND_("value2");
+
+ EXPECT_FIND_("TRACE_EVENT_ASYNC_STEP_INTO0 call");
+ EXPECT_SUB_FIND_("id");
+ EXPECT_SUB_FIND_(kAsyncIdStr);
+ EXPECT_SUB_FIND_("step_begin1");
+ EXPECT_FIND_("TRACE_EVENT_ASYNC_STEP_INTO1 call");
+ EXPECT_SUB_FIND_("id");
+ EXPECT_SUB_FIND_(kAsyncIdStr);
+ EXPECT_SUB_FIND_("step_begin2");
+ EXPECT_SUB_FIND_("name1");
+ EXPECT_SUB_FIND_("value1");
+
+ EXPECT_FIND_("TRACE_EVENT_ASYNC_END0 call");
+ EXPECT_SUB_FIND_("id");
+ EXPECT_SUB_FIND_(kAsyncIdStr);
+ EXPECT_FIND_("TRACE_EVENT_ASYNC_END1 call");
+ EXPECT_SUB_FIND_("id");
+ EXPECT_SUB_FIND_(kAsyncIdStr);
+ EXPECT_SUB_FIND_("name1");
+ EXPECT_SUB_FIND_("value1");
+ EXPECT_FIND_("TRACE_EVENT_ASYNC_END2 call");
+ EXPECT_SUB_FIND_("id");
+ EXPECT_SUB_FIND_(kAsyncIdStr);
+ EXPECT_SUB_FIND_("name1");
+ EXPECT_SUB_FIND_("value1");
+ EXPECT_SUB_FIND_("name2");
+ EXPECT_SUB_FIND_("value2");
+
+ EXPECT_FIND_("TRACE_EVENT_BEGIN_ETW0 call");
+ EXPECT_SUB_FIND_("id");
+ EXPECT_SUB_FIND_(kAsyncIdStr);
+ EXPECT_SUB_FIND_("extra");
+ EXPECT_SUB_FIND_("NULL");
+ EXPECT_FIND_("TRACE_EVENT_BEGIN_ETW1 call");
+ EXPECT_SUB_FIND_("id");
+ EXPECT_SUB_FIND_(kAsyncIdStr);
+ EXPECT_SUB_FIND_("extra");
+ EXPECT_SUB_FIND_("value");
+ EXPECT_FIND_("TRACE_EVENT_END_ETW0 call");
+ EXPECT_SUB_FIND_("id");
+ EXPECT_SUB_FIND_(kAsyncIdStr);
+ EXPECT_SUB_FIND_("extra");
+ EXPECT_SUB_FIND_("NULL");
+ EXPECT_FIND_("TRACE_EVENT_END_ETW1 call");
+ EXPECT_SUB_FIND_("id");
+ EXPECT_SUB_FIND_(kAsyncIdStr);
+ EXPECT_SUB_FIND_("extra");
+ EXPECT_SUB_FIND_("value");
+ EXPECT_FIND_("TRACE_EVENT_INSTANT_ETW0 call");
+ EXPECT_SUB_FIND_("id");
+ EXPECT_SUB_FIND_(kAsyncIdStr);
+ EXPECT_SUB_FIND_("extra");
+ EXPECT_SUB_FIND_("NULL");
+ EXPECT_FIND_("TRACE_EVENT_INSTANT_ETW1 call");
+ EXPECT_SUB_FIND_("id");
+ EXPECT_SUB_FIND_(kAsyncIdStr);
+ EXPECT_SUB_FIND_("extra");
+ EXPECT_SUB_FIND_("value");
+
+ EXPECT_FIND_("TRACE_COUNTER1 call");
+ {
+ std::string ph;
+ EXPECT_TRUE((item && item->GetString("ph", &ph)));
+ EXPECT_EQ("C", ph);
+
+ int value;
+ EXPECT_TRUE((item && item->GetInteger("args.value", &value)));
+ EXPECT_EQ(31415, value);
+ }
+
+ EXPECT_FIND_("TRACE_COUNTER2 call");
+ {
+ std::string ph;
+ EXPECT_TRUE((item && item->GetString("ph", &ph)));
+ EXPECT_EQ("C", ph);
+
+ int value;
+ EXPECT_TRUE((item && item->GetInteger("args.a", &value)));
+ EXPECT_EQ(30000, value);
+
+ EXPECT_TRUE((item && item->GetInteger("args.b", &value)));
+ EXPECT_EQ(1415, value);
+ }
+
+ EXPECT_FIND_("TRACE_COUNTER_ID1 call");
+ {
+ std::string id;
+ EXPECT_TRUE((item && item->GetString("id", &id)));
+ EXPECT_EQ("0x319009", id);
+
+ std::string ph;
+ EXPECT_TRUE((item && item->GetString("ph", &ph)));
+ EXPECT_EQ("C", ph);
+
+ int value;
+ EXPECT_TRUE((item && item->GetInteger("args.value", &value)));
+ EXPECT_EQ(31415, value);
+ }
+
+ EXPECT_FIND_("TRACE_COUNTER_ID2 call");
+ {
+ std::string id;
+ EXPECT_TRUE((item && item->GetString("id", &id)));
+ EXPECT_EQ("0x319009", id);
+
+ std::string ph;
+ EXPECT_TRUE((item && item->GetString("ph", &ph)));
+ EXPECT_EQ("C", ph);
+
+ int value;
+ EXPECT_TRUE((item && item->GetInteger("args.a", &value)));
+ EXPECT_EQ(30000, value);
+
+ EXPECT_TRUE((item && item->GetInteger("args.b", &value)));
+ EXPECT_EQ(1415, value);
+ }
+
+ EXPECT_FIND_("TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP0 call");
+ {
+ int val;
+ EXPECT_TRUE((item && item->GetInteger("ts", &val)));
+ EXPECT_EQ(12345, val);
+ EXPECT_TRUE((item && item->GetInteger("tid", &val)));
+ EXPECT_EQ(kThreadId, val);
+ std::string id;
+ EXPECT_TRUE((item && item->GetString("id", &id)));
+ EXPECT_EQ(kAsyncIdStr, id);
+ }
+
+ EXPECT_FIND_("TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP0 call");
+ {
+ int val;
+ EXPECT_TRUE((item && item->GetInteger("ts", &val)));
+ EXPECT_EQ(23456, val);
+ EXPECT_TRUE((item && item->GetInteger("tid", &val)));
+ EXPECT_EQ(kThreadId, val);
+ std::string id;
+ EXPECT_TRUE((item && item->GetString("id", &id)));
+ EXPECT_EQ(kAsyncIdStr, id);
+ }
+
+ EXPECT_FIND_("TRACE_EVENT_BEGIN_WITH_ID_TID_AND_TIMESTAMP0 call");
+ {
+ int val;
+ EXPECT_TRUE((item && item->GetInteger("ts", &val)));
+ EXPECT_EQ(34567, val);
+ EXPECT_TRUE((item && item->GetInteger("tid", &val)));
+ EXPECT_EQ(kThreadId, val);
+ std::string id;
+ EXPECT_TRUE((item && item->GetString("id", &id)));
+ EXPECT_EQ(kAsyncId2Str, id);
+ }
+
+ EXPECT_FIND_("TRACE_EVENT_ASYNC_STEP_PAST0 call");
+ {
+ EXPECT_SUB_FIND_("id");
+ EXPECT_SUB_FIND_(kAsyncId2Str);
+ EXPECT_SUB_FIND_("step_end1");
+ EXPECT_FIND_("TRACE_EVENT_ASYNC_STEP_PAST1 call");
+ EXPECT_SUB_FIND_("id");
+ EXPECT_SUB_FIND_(kAsyncId2Str);
+ EXPECT_SUB_FIND_("step_end2");
+ EXPECT_SUB_FIND_("name1");
+ EXPECT_SUB_FIND_("value1");
+ }
+
+ EXPECT_FIND_("TRACE_EVENT_END_WITH_ID_TID_AND_TIMESTAMP0 call");
+ {
+ int val;
+ EXPECT_TRUE((item && item->GetInteger("ts", &val)));
+ EXPECT_EQ(45678, val);
+ EXPECT_TRUE((item && item->GetInteger("tid", &val)));
+ EXPECT_EQ(kThreadId, val);
+ std::string id;
+ EXPECT_TRUE((item && item->GetString("id", &id)));
+ EXPECT_EQ(kAsyncId2Str, id);
+ }
+
+ EXPECT_FIND_("tracked object 1");
+ {
+ std::string phase;
+ std::string id;
+ std::string snapshot;
+
+ EXPECT_TRUE((item && item->GetString("ph", &phase)));
+ EXPECT_EQ("N", phase);
+ EXPECT_TRUE((item && item->GetString("id", &id)));
+ EXPECT_EQ("0x42", id);
+
+ item = FindTraceEntry(trace_parsed, "tracked object 1", item);
+ EXPECT_TRUE(item);
+ EXPECT_TRUE(item && item->GetString("ph", &phase));
+ EXPECT_EQ("O", phase);
+ EXPECT_TRUE(item && item->GetString("id", &id));
+ EXPECT_EQ("0x42", id);
+ EXPECT_TRUE(item && item->GetString("args.snapshot", &snapshot));
+ EXPECT_EQ("hello", snapshot);
+
+ item = FindTraceEntry(trace_parsed, "tracked object 1", item);
+ EXPECT_TRUE(item);
+ EXPECT_TRUE(item && item->GetString("ph", &phase));
+ EXPECT_EQ("D", phase);
+ EXPECT_TRUE(item && item->GetString("id", &id));
+ EXPECT_EQ("0x42", id);
+ }
+
+ EXPECT_FIND_("tracked object 2");
+ {
+ std::string phase;
+ std::string id;
+ std::string snapshot;
+
+ EXPECT_TRUE(item && item->GetString("ph", &phase));
+ EXPECT_EQ("N", phase);
+ EXPECT_TRUE(item && item->GetString("id", &id));
+ EXPECT_EQ("0x2128506", id);
+
+ item = FindTraceEntry(trace_parsed, "tracked object 2", item);
+ EXPECT_TRUE(item);
+ EXPECT_TRUE(item && item->GetString("ph", &phase));
+ EXPECT_EQ("O", phase);
+ EXPECT_TRUE(item && item->GetString("id", &id));
+ EXPECT_EQ("0x2128506", id);
+ EXPECT_TRUE(item && item->GetString("args.snapshot", &snapshot));
+ EXPECT_EQ("world", snapshot);
+
+ item = FindTraceEntry(trace_parsed, "tracked object 2", item);
+ EXPECT_TRUE(item);
+ EXPECT_TRUE(item && item->GetString("ph", &phase));
+ EXPECT_EQ("D", phase);
+ EXPECT_TRUE(item && item->GetString("id", &id));
+ EXPECT_EQ("0x2128506", id);
+ }
+
+ EXPECT_FIND_(kControlCharacters);
+ EXPECT_SUB_FIND_(kControlCharacters);
+}
+
+void TraceManyInstantEvents(int thread_id, int num_events,
+ WaitableEvent* task_complete_event) {
+ for (int i = 0; i < num_events; i++) {
+ TRACE_EVENT_INSTANT2("all", "multi thread event",
+ TRACE_EVENT_SCOPE_THREAD,
+ "thread", thread_id,
+ "event", i);
+ }
+
+ if (task_complete_event)
+ task_complete_event->Signal();
+}
+
+void ValidateInstantEventPresentOnEveryThread(const ListValue& trace_parsed,
+ int num_threads,
+ int num_events) {
+ std::map<int, std::map<int, bool> > results;
+
+ size_t trace_parsed_count = trace_parsed.GetSize();
+ for (size_t i = 0; i < trace_parsed_count; i++) {
+ const Value* value = NULL;
+ trace_parsed.Get(i, &value);
+ if (!value || value->GetType() != Value::TYPE_DICTIONARY)
+ continue;
+ const DictionaryValue* dict = static_cast<const DictionaryValue*>(value);
+ std::string name;
+ dict->GetString("name", &name);
+ if (name != "multi thread event")
+ continue;
+
+ int thread = 0;
+ int event = 0;
+ EXPECT_TRUE(dict->GetInteger("args.thread", &thread));
+ EXPECT_TRUE(dict->GetInteger("args.event", &event));
+ results[thread][event] = true;
+ }
+
+ EXPECT_FALSE(results[-1][-1]);
+ for (int thread = 0; thread < num_threads; thread++) {
+ for (int event = 0; event < num_events; event++) {
+ EXPECT_TRUE(results[thread][event]);
+ }
+ }
+}
+
+} // namespace
+
+// Simple Test for emitting data and validating it was received.
+TEST_F(TraceEventTestFixture, DataCaptured) {
+ TraceLog::GetInstance()->SetEnabled(
+ CategoryFilter("*"),
+ TraceLog::RECORDING_MODE,
+ TraceOptions());
+
+ TraceWithAllMacroVariants(NULL);
+
+ EndTraceAndFlush();
+
+ ValidateAllTraceMacrosCreatedData(trace_parsed_);
+}
+
+class MockEnabledStateChangedObserver :
+ public TraceLog::EnabledStateObserver {
+ public:
+ MOCK_METHOD0(OnTraceLogEnabled, void());
+ MOCK_METHOD0(OnTraceLogDisabled, void());
+};
+
+TEST_F(TraceEventTestFixture, EnabledObserverFiresOnEnable) {
+ MockEnabledStateChangedObserver observer;
+ TraceLog::GetInstance()->AddEnabledStateObserver(&observer);
+
+ EXPECT_CALL(observer, OnTraceLogEnabled())
+ .Times(1);
+ TraceLog::GetInstance()->SetEnabled(
+ CategoryFilter("*"),
+ TraceLog::RECORDING_MODE,
+ TraceOptions());
+ testing::Mock::VerifyAndClear(&observer);
+ EXPECT_TRUE(TraceLog::GetInstance()->IsEnabled());
+
+ // Cleanup.
+ TraceLog::GetInstance()->RemoveEnabledStateObserver(&observer);
+ TraceLog::GetInstance()->SetDisabled();
+}
+
+TEST_F(TraceEventTestFixture, EnabledObserverDoesntFireOnSecondEnable) {
+ TraceLog::GetInstance()->SetEnabled(
+ CategoryFilter("*"),
+ TraceLog::RECORDING_MODE,
+ TraceOptions());
+
+ testing::StrictMock<MockEnabledStateChangedObserver> observer;
+ TraceLog::GetInstance()->AddEnabledStateObserver(&observer);
+
+ EXPECT_CALL(observer, OnTraceLogEnabled())
+ .Times(0);
+ EXPECT_CALL(observer, OnTraceLogDisabled())
+ .Times(0);
+ TraceLog::GetInstance()->SetEnabled(
+ CategoryFilter("*"),
+ TraceLog::RECORDING_MODE,
+ TraceOptions());
+ testing::Mock::VerifyAndClear(&observer);
+ EXPECT_TRUE(TraceLog::GetInstance()->IsEnabled());
+
+ // Cleanup.
+ TraceLog::GetInstance()->RemoveEnabledStateObserver(&observer);
+ TraceLog::GetInstance()->SetDisabled();
+ TraceLog::GetInstance()->SetDisabled();
+}
+
+TEST_F(TraceEventTestFixture, EnabledObserverFiresOnFirstDisable) {
+ CategoryFilter cf_inc_all("*");
+ TraceLog::GetInstance()->SetEnabled(
+ cf_inc_all,
+ TraceLog::RECORDING_MODE,
+ TraceOptions());
+ TraceLog::GetInstance()->SetEnabled(
+ cf_inc_all,
+ TraceLog::RECORDING_MODE,
+ TraceOptions());
+
+ testing::StrictMock<MockEnabledStateChangedObserver> observer;
+ TraceLog::GetInstance()->AddEnabledStateObserver(&observer);
+
+ EXPECT_CALL(observer, OnTraceLogEnabled())
+ .Times(0);
+ EXPECT_CALL(observer, OnTraceLogDisabled())
+ .Times(1);
+ TraceLog::GetInstance()->SetDisabled();
+ testing::Mock::VerifyAndClear(&observer);
+
+ // Cleanup.
+ TraceLog::GetInstance()->RemoveEnabledStateObserver(&observer);
+ TraceLog::GetInstance()->SetDisabled();
+}
+
+TEST_F(TraceEventTestFixture, EnabledObserverFiresOnDisable) {
+ TraceLog::GetInstance()->SetEnabled(
+ CategoryFilter("*"),
+ TraceLog::RECORDING_MODE,
+ TraceOptions());
+
+ MockEnabledStateChangedObserver observer;
+ TraceLog::GetInstance()->AddEnabledStateObserver(&observer);
+
+ EXPECT_CALL(observer, OnTraceLogDisabled())
+ .Times(1);
+ TraceLog::GetInstance()->SetDisabled();
+ testing::Mock::VerifyAndClear(&observer);
+
+ // Cleanup.
+ TraceLog::GetInstance()->RemoveEnabledStateObserver(&observer);
+}
+
+// Tests the IsEnabled() state of TraceLog changes before callbacks.
+class AfterStateChangeEnabledStateObserver
+ : public TraceLog::EnabledStateObserver {
+ public:
+ AfterStateChangeEnabledStateObserver() {}
+ virtual ~AfterStateChangeEnabledStateObserver() {}
+
+ // TraceLog::EnabledStateObserver overrides:
+ void OnTraceLogEnabled() override {
+ EXPECT_TRUE(TraceLog::GetInstance()->IsEnabled());
+ }
+
+ void OnTraceLogDisabled() override {
+ EXPECT_FALSE(TraceLog::GetInstance()->IsEnabled());
+ }
+};
+
+TEST_F(TraceEventTestFixture, ObserversFireAfterStateChange) {
+ AfterStateChangeEnabledStateObserver observer;
+ TraceLog::GetInstance()->AddEnabledStateObserver(&observer);
+
+ TraceLog::GetInstance()->SetEnabled(
+ CategoryFilter("*"),
+ TraceLog::RECORDING_MODE,
+ TraceOptions());
+ EXPECT_TRUE(TraceLog::GetInstance()->IsEnabled());
+
+ TraceLog::GetInstance()->SetDisabled();
+ EXPECT_FALSE(TraceLog::GetInstance()->IsEnabled());
+
+ TraceLog::GetInstance()->RemoveEnabledStateObserver(&observer);
+}
+
+// Tests that a state observer can remove itself during a callback.
+class SelfRemovingEnabledStateObserver
+ : public TraceLog::EnabledStateObserver {
+ public:
+ SelfRemovingEnabledStateObserver() {}
+ virtual ~SelfRemovingEnabledStateObserver() {}
+
+ // TraceLog::EnabledStateObserver overrides:
+ void OnTraceLogEnabled() override {}
+
+ void OnTraceLogDisabled() override {
+ TraceLog::GetInstance()->RemoveEnabledStateObserver(this);
+ }
+};
+
+TEST_F(TraceEventTestFixture, SelfRemovingObserver) {
+ ASSERT_EQ(0u, TraceLog::GetInstance()->GetObserverCountForTest());
+
+ SelfRemovingEnabledStateObserver observer;
+ TraceLog::GetInstance()->AddEnabledStateObserver(&observer);
+ EXPECT_EQ(1u, TraceLog::GetInstance()->GetObserverCountForTest());
+
+ TraceLog::GetInstance()->SetEnabled(
+ CategoryFilter("*"),
+ TraceLog::RECORDING_MODE,
+ TraceOptions());
+ TraceLog::GetInstance()->SetDisabled();
+ // The observer removed itself on disable.
+ EXPECT_EQ(0u, TraceLog::GetInstance()->GetObserverCountForTest());
+}
+
+bool IsNewTrace() {
+ bool is_new_trace;
+ TRACE_EVENT_IS_NEW_TRACE(&is_new_trace);
+ return is_new_trace;
+}
+
+TEST_F(TraceEventTestFixture, NewTraceRecording) {
+ ASSERT_FALSE(IsNewTrace());
+ TraceLog::GetInstance()->SetEnabled(
+ CategoryFilter("*"),
+ TraceLog::RECORDING_MODE,
+ TraceOptions());
+ // First call to IsNewTrace() should succeed. But, the second shouldn't.
+ ASSERT_TRUE(IsNewTrace());
+ ASSERT_FALSE(IsNewTrace());
+ EndTraceAndFlush();
+
+ // IsNewTrace() should definitely be false now.
+ ASSERT_FALSE(IsNewTrace());
+
+ // Start another trace. IsNewTrace() should become true again, briefly, as
+ // before.
+ TraceLog::GetInstance()->SetEnabled(
+ CategoryFilter("*"),
+ TraceLog::RECORDING_MODE,
+ TraceOptions());
+ ASSERT_TRUE(IsNewTrace());
+ ASSERT_FALSE(IsNewTrace());
+
+ // Cleanup.
+ EndTraceAndFlush();
+}
+
+
+// Test that categories work.
+TEST_F(TraceEventTestFixture, Categories) {
+ // Test that categories that are used can be retrieved whether trace was
+ // enabled or disabled when the trace event was encountered.
+ TRACE_EVENT_INSTANT0("c1", "name", TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("c2", "name", TRACE_EVENT_SCOPE_THREAD);
+ BeginTrace();
+ TRACE_EVENT_INSTANT0("c3", "name", TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("c4", "name", TRACE_EVENT_SCOPE_THREAD);
+ // Category groups containing more than one category.
+ TRACE_EVENT_INSTANT0("c5,c6", "name", TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("c7,c8", "name", TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0(TRACE_DISABLED_BY_DEFAULT("c9"), "name",
+ TRACE_EVENT_SCOPE_THREAD);
+
+ EndTraceAndFlush();
+ std::vector<std::string> cat_groups;
+ TraceLog::GetInstance()->GetKnownCategoryGroups(&cat_groups);
+ EXPECT_TRUE(std::find(cat_groups.begin(),
+ cat_groups.end(), "c1") != cat_groups.end());
+ EXPECT_TRUE(std::find(cat_groups.begin(),
+ cat_groups.end(), "c2") != cat_groups.end());
+ EXPECT_TRUE(std::find(cat_groups.begin(),
+ cat_groups.end(), "c3") != cat_groups.end());
+ EXPECT_TRUE(std::find(cat_groups.begin(),
+ cat_groups.end(), "c4") != cat_groups.end());
+ EXPECT_TRUE(std::find(cat_groups.begin(),
+ cat_groups.end(), "c5,c6") != cat_groups.end());
+ EXPECT_TRUE(std::find(cat_groups.begin(),
+ cat_groups.end(), "c7,c8") != cat_groups.end());
+ EXPECT_TRUE(std::find(cat_groups.begin(),
+ cat_groups.end(),
+ "disabled-by-default-c9") != cat_groups.end());
+ // Make sure metadata isn't returned.
+ EXPECT_TRUE(std::find(cat_groups.begin(),
+ cat_groups.end(), "__metadata") == cat_groups.end());
+
+ const std::vector<std::string> empty_categories;
+ std::vector<std::string> included_categories;
+ std::vector<std::string> excluded_categories;
+
+ // Test that category filtering works.
+
+ // Include nonexistent category -> no events
+ Clear();
+ included_categories.clear();
+ TraceLog::GetInstance()->SetEnabled(
+ CategoryFilter("not_found823564786"),
+ TraceLog::RECORDING_MODE,
+ TraceOptions());
+ TRACE_EVENT_INSTANT0("cat1", "name", TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("cat2", "name", TRACE_EVENT_SCOPE_THREAD);
+ EndTraceAndFlush();
+ DropTracedMetadataRecords();
+ EXPECT_TRUE(trace_parsed_.empty());
+
+ // Include existent category -> only events of that category
+ Clear();
+ included_categories.clear();
+ TraceLog::GetInstance()->SetEnabled(
+ CategoryFilter("inc"),
+ TraceLog::RECORDING_MODE,
+ TraceOptions());
+ TRACE_EVENT_INSTANT0("inc", "name", TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("inc2", "name", TRACE_EVENT_SCOPE_THREAD);
+ EndTraceAndFlush();
+ DropTracedMetadataRecords();
+ EXPECT_TRUE(FindMatchingValue("cat", "inc"));
+ EXPECT_FALSE(FindNonMatchingValue("cat", "inc"));
+
+ // Include existent wildcard -> all categories matching wildcard
+ Clear();
+ included_categories.clear();
+ TraceLog::GetInstance()->SetEnabled(
+ CategoryFilter("inc_wildcard_*,inc_wildchar_?_end"),
+ TraceLog::RECORDING_MODE,
+ TraceOptions());
+ TRACE_EVENT_INSTANT0("inc_wildcard_abc", "included",
+ TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("inc_wildcard_", "included", TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("inc_wildchar_x_end", "included",
+ TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("inc_wildchar_bla_end", "not_inc",
+ TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("cat1", "not_inc", TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("cat2", "not_inc", TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("inc_wildcard_category,other_category", "included",
+ TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0(
+ "non_included_category,inc_wildcard_category", "included",
+ TRACE_EVENT_SCOPE_THREAD);
+ EndTraceAndFlush();
+ EXPECT_TRUE(FindMatchingValue("cat", "inc_wildcard_abc"));
+ EXPECT_TRUE(FindMatchingValue("cat", "inc_wildcard_"));
+ EXPECT_TRUE(FindMatchingValue("cat", "inc_wildchar_x_end"));
+ EXPECT_FALSE(FindMatchingValue("name", "not_inc"));
+ EXPECT_TRUE(FindMatchingValue("cat", "inc_wildcard_category,other_category"));
+ EXPECT_TRUE(FindMatchingValue("cat",
+ "non_included_category,inc_wildcard_category"));
+
+ included_categories.clear();
+
+ // Exclude nonexistent category -> all events
+ Clear();
+ TraceLog::GetInstance()->SetEnabled(
+ CategoryFilter("-not_found823564786"),
+ TraceLog::RECORDING_MODE,
+ TraceOptions());
+ TRACE_EVENT_INSTANT0("cat1", "name", TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("cat2", "name", TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("category1,category2", "name", TRACE_EVENT_SCOPE_THREAD);
+ EndTraceAndFlush();
+ EXPECT_TRUE(FindMatchingValue("cat", "cat1"));
+ EXPECT_TRUE(FindMatchingValue("cat", "cat2"));
+ EXPECT_TRUE(FindMatchingValue("cat", "category1,category2"));
+
+ // Exclude existent category -> only events of other categories
+ Clear();
+ TraceLog::GetInstance()->SetEnabled(
+ CategoryFilter("-inc"),
+ TraceLog::RECORDING_MODE,
+ TraceOptions());
+ TRACE_EVENT_INSTANT0("inc", "name", TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("inc2", "name", TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("inc2,inc", "name", TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("inc,inc2", "name", TRACE_EVENT_SCOPE_THREAD);
+ EndTraceAndFlush();
+ EXPECT_TRUE(FindMatchingValue("cat", "inc2"));
+ EXPECT_FALSE(FindMatchingValue("cat", "inc"));
+ EXPECT_FALSE(FindMatchingValue("cat", "inc2,inc"));
+ EXPECT_FALSE(FindMatchingValue("cat", "inc,inc2"));
+
+ // Exclude existent wildcard -> all categories not matching wildcard
+ Clear();
+ TraceLog::GetInstance()->SetEnabled(
+ CategoryFilter("-inc_wildcard_*,-inc_wildchar_?_end"),
+ TraceLog::RECORDING_MODE,
+ TraceOptions());
+ TRACE_EVENT_INSTANT0("inc_wildcard_abc", "not_inc",
+ TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("inc_wildcard_", "not_inc",
+ TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("inc_wildchar_x_end", "not_inc",
+ TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("inc_wildchar_bla_end", "included",
+ TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("cat1", "included", TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("cat2", "included", TRACE_EVENT_SCOPE_THREAD);
+ EndTraceAndFlush();
+ EXPECT_TRUE(FindMatchingValue("cat", "inc_wildchar_bla_end"));
+ EXPECT_TRUE(FindMatchingValue("cat", "cat1"));
+ EXPECT_TRUE(FindMatchingValue("cat", "cat2"));
+ EXPECT_FALSE(FindMatchingValue("name", "not_inc"));
+}
+
+
+// Test EVENT_WATCH_NOTIFICATION
+TEST_F(TraceEventTestFixture, EventWatchNotification) {
+ // Basic one occurrence.
+ BeginTrace();
+ TraceLog::WatchEventCallback callback =
+ base::Bind(&TraceEventTestFixture::OnWatchEventMatched,
+ base::Unretained(this));
+ TraceLog::GetInstance()->SetWatchEvent("cat", "event", callback);
+ TRACE_EVENT_INSTANT0("cat", "event", TRACE_EVENT_SCOPE_THREAD);
+ EndTraceAndFlush();
+ EXPECT_EQ(event_watch_notification_, 1);
+
+ // Auto-reset after end trace.
+ BeginTrace();
+ TraceLog::GetInstance()->SetWatchEvent("cat", "event", callback);
+ EndTraceAndFlush();
+ BeginTrace();
+ TRACE_EVENT_INSTANT0("cat", "event", TRACE_EVENT_SCOPE_THREAD);
+ EndTraceAndFlush();
+ EXPECT_EQ(event_watch_notification_, 0);
+
+ // Multiple occurrence.
+ BeginTrace();
+ int num_occurrences = 5;
+ TraceLog::GetInstance()->SetWatchEvent("cat", "event", callback);
+ for (int i = 0; i < num_occurrences; ++i)
+ TRACE_EVENT_INSTANT0("cat", "event", TRACE_EVENT_SCOPE_THREAD);
+ EndTraceAndFlush();
+ EXPECT_EQ(event_watch_notification_, num_occurrences);
+
+ // Wrong category.
+ BeginTrace();
+ TraceLog::GetInstance()->SetWatchEvent("cat", "event", callback);
+ TRACE_EVENT_INSTANT0("wrong_cat", "event", TRACE_EVENT_SCOPE_THREAD);
+ EndTraceAndFlush();
+ EXPECT_EQ(event_watch_notification_, 0);
+
+ // Wrong name.
+ BeginTrace();
+ TraceLog::GetInstance()->SetWatchEvent("cat", "event", callback);
+ TRACE_EVENT_INSTANT0("cat", "wrong_event", TRACE_EVENT_SCOPE_THREAD);
+ EndTraceAndFlush();
+ EXPECT_EQ(event_watch_notification_, 0);
+
+ // Canceled.
+ BeginTrace();
+ TraceLog::GetInstance()->SetWatchEvent("cat", "event", callback);
+ TraceLog::GetInstance()->CancelWatchEvent();
+ TRACE_EVENT_INSTANT0("cat", "event", TRACE_EVENT_SCOPE_THREAD);
+ EndTraceAndFlush();
+ EXPECT_EQ(event_watch_notification_, 0);
+}
+
+// Test ASYNC_BEGIN/END events
+TEST_F(TraceEventTestFixture, AsyncBeginEndEvents) {
+ BeginTrace();
+
+ unsigned long long id = 0xfeedbeeffeedbeefull;
+ TRACE_EVENT_ASYNC_BEGIN0("cat", "name1", id);
+ TRACE_EVENT_ASYNC_STEP_INTO0("cat", "name1", id, "step1");
+ TRACE_EVENT_ASYNC_END0("cat", "name1", id);
+ TRACE_EVENT_BEGIN0("cat", "name2");
+ TRACE_EVENT_ASYNC_BEGIN0("cat", "name3", 0);
+ TRACE_EVENT_ASYNC_STEP_PAST0("cat", "name3", 0, "step2");
+
+ EndTraceAndFlush();
+
+ EXPECT_TRUE(FindNamePhase("name1", "S"));
+ EXPECT_TRUE(FindNamePhase("name1", "T"));
+ EXPECT_TRUE(FindNamePhase("name1", "F"));
+
+ std::string id_str;
+ StringAppendF(&id_str, "0x%llx", id);
+
+ EXPECT_TRUE(FindNamePhaseKeyValue("name1", "S", "id", id_str.c_str()));
+ EXPECT_TRUE(FindNamePhaseKeyValue("name1", "T", "id", id_str.c_str()));
+ EXPECT_TRUE(FindNamePhaseKeyValue("name1", "F", "id", id_str.c_str()));
+ EXPECT_TRUE(FindNamePhaseKeyValue("name3", "S", "id", "0x0"));
+ EXPECT_TRUE(FindNamePhaseKeyValue("name3", "p", "id", "0x0"));
+
+ // BEGIN events should not have id
+ EXPECT_FALSE(FindNamePhaseKeyValue("name2", "B", "id", "0"));
+}
+
+// Test ASYNC_BEGIN/END events
+TEST_F(TraceEventTestFixture, AsyncBeginEndPointerMangling) {
+ void* ptr = this;
+
+ TraceLog::GetInstance()->SetProcessID(100);
+ BeginTrace();
+ TRACE_EVENT_ASYNC_BEGIN0("cat", "name1", ptr);
+ TRACE_EVENT_ASYNC_BEGIN0("cat", "name2", ptr);
+ EndTraceAndFlush();
+
+ TraceLog::GetInstance()->SetProcessID(200);
+ BeginTrace();
+ TRACE_EVENT_ASYNC_END0("cat", "name1", ptr);
+ EndTraceAndFlush();
+
+ DictionaryValue* async_begin = FindNamePhase("name1", "S");
+ DictionaryValue* async_begin2 = FindNamePhase("name2", "S");
+ DictionaryValue* async_end = FindNamePhase("name1", "F");
+ EXPECT_TRUE(async_begin);
+ EXPECT_TRUE(async_begin2);
+ EXPECT_TRUE(async_end);
+
+ Value* value = NULL;
+ std::string async_begin_id_str;
+ std::string async_begin2_id_str;
+ std::string async_end_id_str;
+ ASSERT_TRUE(async_begin->Get("id", &value));
+ ASSERT_TRUE(value->GetAsString(&async_begin_id_str));
+ ASSERT_TRUE(async_begin2->Get("id", &value));
+ ASSERT_TRUE(value->GetAsString(&async_begin2_id_str));
+ ASSERT_TRUE(async_end->Get("id", &value));
+ ASSERT_TRUE(value->GetAsString(&async_end_id_str));
+
+ EXPECT_STREQ(async_begin_id_str.c_str(), async_begin2_id_str.c_str());
+ EXPECT_STRNE(async_begin_id_str.c_str(), async_end_id_str.c_str());
+}
+
+// Test that static strings are not copied.
+TEST_F(TraceEventTestFixture, StaticStringVsString) {
+ TraceLog* tracer = TraceLog::GetInstance();
+ // Make sure old events are flushed:
+ EXPECT_EQ(0u, tracer->GetStatus().event_count);
+ const unsigned char* category_group_enabled =
+ TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED("cat");
+
+ {
+ BeginTrace();
+ // Test that string arguments are copied.
+ TraceEventHandle handle1 =
+ trace_event_internal::AddTraceEvent(
+ TRACE_EVENT_PHASE_INSTANT, category_group_enabled, "name1", 0, 0,
+ "arg1", std::string("argval"), "arg2", std::string("argval"));
+ // Test that static TRACE_STR_COPY string arguments are copied.
+ TraceEventHandle handle2 =
+ trace_event_internal::AddTraceEvent(
+ TRACE_EVENT_PHASE_INSTANT, category_group_enabled, "name2", 0, 0,
+ "arg1", TRACE_STR_COPY("argval"),
+ "arg2", TRACE_STR_COPY("argval"));
+ EXPECT_GT(tracer->GetStatus().event_count, 1u);
+ const TraceEvent* event1 = tracer->GetEventByHandle(handle1);
+ const TraceEvent* event2 = tracer->GetEventByHandle(handle2);
+ ASSERT_TRUE(event1);
+ ASSERT_TRUE(event2);
+ EXPECT_STREQ("name1", event1->name());
+ EXPECT_STREQ("name2", event2->name());
+ EXPECT_TRUE(event1->parameter_copy_storage() != NULL);
+ EXPECT_TRUE(event2->parameter_copy_storage() != NULL);
+ EXPECT_GT(event1->parameter_copy_storage()->size(), 0u);
+ EXPECT_GT(event2->parameter_copy_storage()->size(), 0u);
+ EndTraceAndFlush();
+ }
+
+ {
+ BeginTrace();
+ // Test that static literal string arguments are not copied.
+ TraceEventHandle handle1 =
+ trace_event_internal::AddTraceEvent(
+ TRACE_EVENT_PHASE_INSTANT, category_group_enabled, "name1", 0, 0,
+ "arg1", "argval", "arg2", "argval");
+ // Test that static TRACE_STR_COPY NULL string arguments are not copied.
+ const char* str1 = NULL;
+ const char* str2 = NULL;
+ TraceEventHandle handle2 =
+ trace_event_internal::AddTraceEvent(
+ TRACE_EVENT_PHASE_INSTANT, category_group_enabled, "name2", 0, 0,
+ "arg1", TRACE_STR_COPY(str1),
+ "arg2", TRACE_STR_COPY(str2));
+ EXPECT_GT(tracer->GetStatus().event_count, 1u);
+ const TraceEvent* event1 = tracer->GetEventByHandle(handle1);
+ const TraceEvent* event2 = tracer->GetEventByHandle(handle2);
+ ASSERT_TRUE(event1);
+ ASSERT_TRUE(event2);
+ EXPECT_STREQ("name1", event1->name());
+ EXPECT_STREQ("name2", event2->name());
+ EXPECT_TRUE(event1->parameter_copy_storage() == NULL);
+ EXPECT_TRUE(event2->parameter_copy_storage() == NULL);
+ EndTraceAndFlush();
+ }
+}
+
+// Test that data sent from other threads is gathered
+TEST_F(TraceEventTestFixture, DataCapturedOnThread) {
+ BeginTrace();
+
+ Thread thread("1");
+ WaitableEvent task_complete_event(false, false);
+ thread.Start();
+
+ thread.message_loop()->PostTask(
+ FROM_HERE, base::Bind(&TraceWithAllMacroVariants, &task_complete_event));
+ task_complete_event.Wait();
+ thread.Stop();
+
+ EndTraceAndFlush();
+ ValidateAllTraceMacrosCreatedData(trace_parsed_);
+}
+
+// Test that data sent from multiple threads is gathered
+TEST_F(TraceEventTestFixture, DataCapturedManyThreads) {
+ BeginTrace();
+
+ const int num_threads = 4;
+ const int num_events = 4000;
+ Thread* threads[num_threads];
+ WaitableEvent* task_complete_events[num_threads];
+ for (int i = 0; i < num_threads; i++) {
+ threads[i] = new Thread(StringPrintf("Thread %d", i));
+ task_complete_events[i] = new WaitableEvent(false, false);
+ threads[i]->Start();
+ threads[i]->message_loop()->PostTask(
+ FROM_HERE, base::Bind(&TraceManyInstantEvents,
+ i, num_events, task_complete_events[i]));
+ }
+
+ for (int i = 0; i < num_threads; i++) {
+ task_complete_events[i]->Wait();
+ }
+
+ // Let half of the threads end before flush.
+ for (int i = 0; i < num_threads / 2; i++) {
+ threads[i]->Stop();
+ delete threads[i];
+ delete task_complete_events[i];
+ }
+
+ EndTraceAndFlushInThreadWithMessageLoop();
+ ValidateInstantEventPresentOnEveryThread(trace_parsed_,
+ num_threads, num_events);
+
+ // Let the other half of the threads end after flush.
+ for (int i = num_threads / 2; i < num_threads; i++) {
+ threads[i]->Stop();
+ delete threads[i];
+ delete task_complete_events[i];
+ }
+}
+
+// Test that thread and process names show up in the trace
+TEST_F(TraceEventTestFixture, ThreadNames) {
+ // Create threads before we enable tracing to make sure
+ // that tracelog still captures them.
+ const int kNumThreads = 4;
+ const int kNumEvents = 10;
+ Thread* threads[kNumThreads];
+ PlatformThreadId thread_ids[kNumThreads];
+ for (int i = 0; i < kNumThreads; i++)
+ threads[i] = new Thread(StringPrintf("Thread %d", i));
+
+ // Enable tracing.
+ BeginTrace();
+
+ // Now run some trace code on these threads.
+ WaitableEvent* task_complete_events[kNumThreads];
+ for (int i = 0; i < kNumThreads; i++) {
+ task_complete_events[i] = new WaitableEvent(false, false);
+ threads[i]->Start();
+ thread_ids[i] = threads[i]->thread_id();
+ threads[i]->message_loop()->PostTask(
+ FROM_HERE, base::Bind(&TraceManyInstantEvents,
+ i, kNumEvents, task_complete_events[i]));
+ }
+ for (int i = 0; i < kNumThreads; i++) {
+ task_complete_events[i]->Wait();
+ }
+
+ // Shut things down.
+ for (int i = 0; i < kNumThreads; i++) {
+ threads[i]->Stop();
+ delete threads[i];
+ delete task_complete_events[i];
+ }
+
+ EndTraceAndFlush();
+
+ std::string tmp;
+ int tmp_int;
+ const DictionaryValue* item;
+
+ // Make sure we get thread name metadata.
+ // Note, the test suite may have created a ton of threads.
+ // So, we'll have thread names for threads we didn't create.
+ std::vector<const DictionaryValue*> items =
+ FindTraceEntries(trace_parsed_, "thread_name");
+ for (int i = 0; i < static_cast<int>(items.size()); i++) {
+ item = items[i];
+ ASSERT_TRUE(item);
+ EXPECT_TRUE(item->GetInteger("tid", &tmp_int));
+
+ // See if this thread name is one of the threads we just created
+ for (int j = 0; j < kNumThreads; j++) {
+ if(static_cast<int>(thread_ids[j]) != tmp_int)
+ continue;
+
+ std::string expected_name = StringPrintf("Thread %d", j);
+ EXPECT_TRUE(item->GetString("ph", &tmp) && tmp == "M");
+ EXPECT_TRUE(item->GetInteger("pid", &tmp_int) &&
+ tmp_int == static_cast<int>(base::GetCurrentProcId()));
+ // If the thread name changes or the tid gets reused, the name will be
+ // a comma-separated list of thread names, so look for a substring.
+ EXPECT_TRUE(item->GetString("args.name", &tmp) &&
+ tmp.find(expected_name) != std::string::npos);
+ }
+ }
+}
+
+TEST_F(TraceEventTestFixture, ThreadNameChanges) {
+ BeginTrace();
+
+ PlatformThread::SetName("");
+ TRACE_EVENT_INSTANT0("drink", "water", TRACE_EVENT_SCOPE_THREAD);
+
+ PlatformThread::SetName("cafe");
+ TRACE_EVENT_INSTANT0("drink", "coffee", TRACE_EVENT_SCOPE_THREAD);
+
+ PlatformThread::SetName("shop");
+ // No event here, so won't appear in combined name.
+
+ PlatformThread::SetName("pub");
+ TRACE_EVENT_INSTANT0("drink", "beer", TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("drink", "wine", TRACE_EVENT_SCOPE_THREAD);
+
+ PlatformThread::SetName(" bar");
+ TRACE_EVENT_INSTANT0("drink", "whisky", TRACE_EVENT_SCOPE_THREAD);
+
+ EndTraceAndFlush();
+
+ std::vector<const DictionaryValue*> items =
+ FindTraceEntries(trace_parsed_, "thread_name");
+ EXPECT_EQ(1u, items.size());
+ ASSERT_GT(items.size(), 0u);
+ const DictionaryValue* item = items[0];
+ ASSERT_TRUE(item);
+ int tid;
+ EXPECT_TRUE(item->GetInteger("tid", &tid));
+ EXPECT_EQ(PlatformThread::CurrentId(), static_cast<PlatformThreadId>(tid));
+
+ std::string expected_name = "cafe,pub, bar";
+ std::string tmp;
+ EXPECT_TRUE(item->GetString("args.name", &tmp));
+ EXPECT_EQ(expected_name, tmp);
+}
+
+// Test that the disabled trace categories are included/excluded from the
+// trace output correctly.
+TEST_F(TraceEventTestFixture, DisabledCategories) {
+ BeginTrace();
+ TRACE_EVENT_INSTANT0(TRACE_DISABLED_BY_DEFAULT("cc"), "first",
+ TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("included", "first", TRACE_EVENT_SCOPE_THREAD);
+ EndTraceAndFlush();
+ {
+ const DictionaryValue* item = NULL;
+ ListValue& trace_parsed = trace_parsed_;
+ EXPECT_NOT_FIND_("disabled-by-default-cc");
+ EXPECT_FIND_("included");
+ }
+ Clear();
+
+ BeginSpecificTrace("disabled-by-default-cc");
+ TRACE_EVENT_INSTANT0(TRACE_DISABLED_BY_DEFAULT("cc"), "second",
+ TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("other_included", "second", TRACE_EVENT_SCOPE_THREAD);
+ EndTraceAndFlush();
+
+ {
+ const DictionaryValue* item = NULL;
+ ListValue& trace_parsed = trace_parsed_;
+ EXPECT_FIND_("disabled-by-default-cc");
+ EXPECT_FIND_("other_included");
+ }
+
+ Clear();
+
+ BeginSpecificTrace("other_included");
+ TRACE_EVENT_INSTANT0(TRACE_DISABLED_BY_DEFAULT("cc") ",other_included",
+ "first", TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_INSTANT0("other_included," TRACE_DISABLED_BY_DEFAULT("cc"),
+ "second", TRACE_EVENT_SCOPE_THREAD);
+ EndTraceAndFlush();
+
+ {
+ const DictionaryValue* item = NULL;
+ ListValue& trace_parsed = trace_parsed_;
+ EXPECT_FIND_("disabled-by-default-cc,other_included");
+ EXPECT_FIND_("other_included,disabled-by-default-cc");
+ }
+}
+
+TEST_F(TraceEventTestFixture, NormallyNoDeepCopy) {
+ // Test that the TRACE_EVENT macros do not deep-copy their string. If they
+ // do so it may indicate a performance regression, but more-over it would
+ // make the DEEP_COPY overloads redundant.
+ std::string name_string("event name");
+
+ BeginTrace();
+ TRACE_EVENT_INSTANT0("category", name_string.c_str(),
+ TRACE_EVENT_SCOPE_THREAD);
+
+ // Modify the string in place (a wholesale reassignment may leave the old
+ // string intact on the heap).
+ name_string[0] = '@';
+
+ EndTraceAndFlush();
+
+ EXPECT_FALSE(FindTraceEntry(trace_parsed_, "event name"));
+ EXPECT_TRUE(FindTraceEntry(trace_parsed_, name_string.c_str()));
+}
+
+TEST_F(TraceEventTestFixture, DeepCopy) {
+ static const char kOriginalName1[] = "name1";
+ static const char kOriginalName2[] = "name2";
+ static const char kOriginalName3[] = "name3";
+ std::string name1(kOriginalName1);
+ std::string name2(kOriginalName2);
+ std::string name3(kOriginalName3);
+ std::string arg1("arg1");
+ std::string arg2("arg2");
+ std::string val1("val1");
+ std::string val2("val2");
+
+ BeginTrace();
+ TRACE_EVENT_COPY_INSTANT0("category", name1.c_str(),
+ TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_COPY_BEGIN1("category", name2.c_str(),
+ arg1.c_str(), 5);
+ TRACE_EVENT_COPY_END2("category", name3.c_str(),
+ arg1.c_str(), val1,
+ arg2.c_str(), val2);
+
+ // As per NormallyNoDeepCopy, modify the strings in place.
+ name1[0] = name2[0] = name3[0] = arg1[0] = arg2[0] = val1[0] = val2[0] = '@';
+
+ EndTraceAndFlush();
+
+ EXPECT_FALSE(FindTraceEntry(trace_parsed_, name1.c_str()));
+ EXPECT_FALSE(FindTraceEntry(trace_parsed_, name2.c_str()));
+ EXPECT_FALSE(FindTraceEntry(trace_parsed_, name3.c_str()));
+
+ const DictionaryValue* entry1 = FindTraceEntry(trace_parsed_, kOriginalName1);
+ const DictionaryValue* entry2 = FindTraceEntry(trace_parsed_, kOriginalName2);
+ const DictionaryValue* entry3 = FindTraceEntry(trace_parsed_, kOriginalName3);
+ ASSERT_TRUE(entry1);
+ ASSERT_TRUE(entry2);
+ ASSERT_TRUE(entry3);
+
+ int i;
+ EXPECT_FALSE(entry2->GetInteger("args.@rg1", &i));
+ EXPECT_TRUE(entry2->GetInteger("args.arg1", &i));
+ EXPECT_EQ(5, i);
+
+ std::string s;
+ EXPECT_TRUE(entry3->GetString("args.arg1", &s));
+ EXPECT_EQ("val1", s);
+ EXPECT_TRUE(entry3->GetString("args.arg2", &s));
+ EXPECT_EQ("val2", s);
+}
+
+// Test that TraceResultBuffer outputs the correct result whether it is added
+// in chunks or added all at once.
+TEST_F(TraceEventTestFixture, TraceResultBuffer) {
+ Clear();
+
+ trace_buffer_.Start();
+ trace_buffer_.AddFragment("bla1");
+ trace_buffer_.AddFragment("bla2");
+ trace_buffer_.AddFragment("bla3,bla4");
+ trace_buffer_.Finish();
+ EXPECT_STREQ(json_output_.json_output.c_str(), "[bla1,bla2,bla3,bla4]");
+
+ Clear();
+
+ trace_buffer_.Start();
+ trace_buffer_.AddFragment("bla1,bla2,bla3,bla4");
+ trace_buffer_.Finish();
+ EXPECT_STREQ(json_output_.json_output.c_str(), "[bla1,bla2,bla3,bla4]");
+}
+
+// Test that trace_event parameters are not evaluated if the tracing
+// system is disabled.
+TEST_F(TraceEventTestFixture, TracingIsLazy) {
+ BeginTrace();
+
+ int a = 0;
+ TRACE_EVENT_INSTANT1("category", "test", TRACE_EVENT_SCOPE_THREAD, "a", a++);
+ EXPECT_EQ(1, a);
+
+ TraceLog::GetInstance()->SetDisabled();
+
+ TRACE_EVENT_INSTANT1("category", "test", TRACE_EVENT_SCOPE_THREAD, "a", a++);
+ EXPECT_EQ(1, a);
+
+ EndTraceAndFlush();
+}
+
+TEST_F(TraceEventTestFixture, TraceEnableDisable) {
+ TraceLog* trace_log = TraceLog::GetInstance();
+ CategoryFilter cf_inc_all("*");
+ trace_log->SetEnabled(cf_inc_all,
+ TraceLog::RECORDING_MODE,
+ TraceOptions());
+ EXPECT_TRUE(trace_log->IsEnabled());
+ trace_log->SetDisabled();
+ EXPECT_FALSE(trace_log->IsEnabled());
+
+ trace_log->SetEnabled(cf_inc_all,
+ TraceLog::RECORDING_MODE,
+ TraceOptions());
+ EXPECT_TRUE(trace_log->IsEnabled());
+ const std::vector<std::string> empty;
+ trace_log->SetEnabled(CategoryFilter(),
+ TraceLog::RECORDING_MODE,
+ TraceOptions());
+ EXPECT_TRUE(trace_log->IsEnabled());
+ trace_log->SetDisabled();
+ EXPECT_FALSE(trace_log->IsEnabled());
+ trace_log->SetDisabled();
+ EXPECT_FALSE(trace_log->IsEnabled());
+}
+
+TEST_F(TraceEventTestFixture, TraceCategoriesAfterNestedEnable) {
+ TraceLog* trace_log = TraceLog::GetInstance();
+ trace_log->SetEnabled(CategoryFilter("foo,bar"),
+ TraceLog::RECORDING_MODE,
+ TraceOptions());
+ EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("foo"));
+ EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("bar"));
+ EXPECT_FALSE(*trace_log->GetCategoryGroupEnabled("baz"));
+ trace_log->SetEnabled(CategoryFilter("foo2"),
+ TraceLog::RECORDING_MODE,
+ TraceOptions());
+ EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("foo2"));
+ EXPECT_FALSE(*trace_log->GetCategoryGroupEnabled("baz"));
+ // The "" becomes the default catergory set when applied.
+ trace_log->SetEnabled(CategoryFilter(),
+ TraceLog::RECORDING_MODE,
+ TraceOptions());
+ EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("foo"));
+ EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("baz"));
+ EXPECT_STREQ("-*Debug,-*Test",
+ trace_log->GetCurrentCategoryFilter().ToString().c_str());
+ trace_log->SetDisabled();
+ trace_log->SetDisabled();
+ trace_log->SetDisabled();
+ EXPECT_FALSE(*trace_log->GetCategoryGroupEnabled("foo"));
+ EXPECT_FALSE(*trace_log->GetCategoryGroupEnabled("baz"));
+
+ trace_log->SetEnabled(CategoryFilter("-foo,-bar"),
+ TraceLog::RECORDING_MODE,
+ TraceOptions());
+ EXPECT_FALSE(*trace_log->GetCategoryGroupEnabled("foo"));
+ EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("baz"));
+ trace_log->SetEnabled(CategoryFilter("moo"),
+ TraceLog::RECORDING_MODE,
+ TraceOptions());
+ EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("baz"));
+ EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("moo"));
+ EXPECT_FALSE(*trace_log->GetCategoryGroupEnabled("foo"));
+ EXPECT_STREQ("-foo,-bar",
+ trace_log->GetCurrentCategoryFilter().ToString().c_str());
+ trace_log->SetDisabled();
+ trace_log->SetDisabled();
+
+ // Make sure disabled categories aren't cleared if we set in the second.
+ trace_log->SetEnabled(CategoryFilter("disabled-by-default-cc,foo"),
+ TraceLog::RECORDING_MODE,
+ TraceOptions());
+ EXPECT_FALSE(*trace_log->GetCategoryGroupEnabled("bar"));
+ trace_log->SetEnabled(CategoryFilter("disabled-by-default-gpu"),
+ TraceLog::RECORDING_MODE,
+ TraceOptions());
+ EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("disabled-by-default-cc"));
+ EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("disabled-by-default-gpu"));
+ EXPECT_TRUE(*trace_log->GetCategoryGroupEnabled("bar"));
+ EXPECT_STREQ("disabled-by-default-cc,disabled-by-default-gpu",
+ trace_log->GetCurrentCategoryFilter().ToString().c_str());
+ trace_log->SetDisabled();
+ trace_log->SetDisabled();
+}
+
+TEST_F(TraceEventTestFixture, TraceSampling) {
+ TraceOptions trace_options(RECORD_UNTIL_FULL);
+ trace_options.enable_sampling = true;
+ TraceLog::GetInstance()->SetEnabled(CategoryFilter("*"),
+ TraceLog::RECORDING_MODE,
+ trace_options);
+
+ TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(1, "cc", "Stuff");
+ TraceLog::GetInstance()->WaitSamplingEventForTesting();
+ TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(1, "cc", "Things");
+ TraceLog::GetInstance()->WaitSamplingEventForTesting();
+
+ EndTraceAndFlush();
+
+ // Make sure we hit at least once.
+ EXPECT_TRUE(FindNamePhase("Stuff", "P"));
+ EXPECT_TRUE(FindNamePhase("Things", "P"));
+}
+
+TEST_F(TraceEventTestFixture, TraceSamplingScope) {
+ TraceOptions trace_options(RECORD_UNTIL_FULL);
+ trace_options.enable_sampling = true;
+ TraceLog::GetInstance()->SetEnabled(CategoryFilter("*"),
+ TraceLog::RECORDING_MODE,
+ trace_options);
+
+ TRACE_EVENT_SCOPED_SAMPLING_STATE("AAA", "name");
+ TraceLog::GetInstance()->WaitSamplingEventForTesting();
+ {
+ EXPECT_STREQ(TRACE_EVENT_GET_SAMPLING_STATE(), "AAA");
+ TRACE_EVENT_SCOPED_SAMPLING_STATE("BBB", "name");
+ TraceLog::GetInstance()->WaitSamplingEventForTesting();
+ EXPECT_STREQ(TRACE_EVENT_GET_SAMPLING_STATE(), "BBB");
+ }
+ TraceLog::GetInstance()->WaitSamplingEventForTesting();
+ {
+ EXPECT_STREQ(TRACE_EVENT_GET_SAMPLING_STATE(), "AAA");
+ TRACE_EVENT_SCOPED_SAMPLING_STATE("CCC", "name");
+ TraceLog::GetInstance()->WaitSamplingEventForTesting();
+ EXPECT_STREQ(TRACE_EVENT_GET_SAMPLING_STATE(), "CCC");
+ }
+ TraceLog::GetInstance()->WaitSamplingEventForTesting();
+ {
+ EXPECT_STREQ(TRACE_EVENT_GET_SAMPLING_STATE(), "AAA");
+ TRACE_EVENT_SET_SAMPLING_STATE("DDD", "name");
+ TraceLog::GetInstance()->WaitSamplingEventForTesting();
+ EXPECT_STREQ(TRACE_EVENT_GET_SAMPLING_STATE(), "DDD");
+ }
+ TraceLog::GetInstance()->WaitSamplingEventForTesting();
+ EXPECT_STREQ(TRACE_EVENT_GET_SAMPLING_STATE(), "DDD");
+
+ EndTraceAndFlush();
+}
+
+TEST_F(TraceEventTestFixture, TraceContinuousSampling) {
+ TraceOptions trace_options(RECORD_UNTIL_FULL);
+ trace_options.enable_sampling = true;
+
+ TraceLog::GetInstance()->SetEnabled(CategoryFilter("*"),
+ TraceLog::MONITORING_MODE,
+ trace_options);
+
+ TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(1, "category", "AAA");
+ TraceLog::GetInstance()->WaitSamplingEventForTesting();
+ TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(1, "category", "BBB");
+ TraceLog::GetInstance()->WaitSamplingEventForTesting();
+
+ FlushMonitoring();
+
+ // Make sure we can get the profiled data.
+ EXPECT_TRUE(FindNamePhase("AAA", "P"));
+ EXPECT_TRUE(FindNamePhase("BBB", "P"));
+
+ Clear();
+ TraceLog::GetInstance()->WaitSamplingEventForTesting();
+
+ TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(1, "category", "CCC");
+ TraceLog::GetInstance()->WaitSamplingEventForTesting();
+ TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(1, "category", "DDD");
+ TraceLog::GetInstance()->WaitSamplingEventForTesting();
+
+ FlushMonitoring();
+
+ // Make sure the profiled data is accumulated.
+ EXPECT_TRUE(FindNamePhase("AAA", "P"));
+ EXPECT_TRUE(FindNamePhase("BBB", "P"));
+ EXPECT_TRUE(FindNamePhase("CCC", "P"));
+ EXPECT_TRUE(FindNamePhase("DDD", "P"));
+
+ Clear();
+
+ TraceLog::GetInstance()->SetDisabled();
+
+ // Make sure disabling the continuous sampling thread clears
+ // the profiled data.
+ EXPECT_FALSE(FindNamePhase("AAA", "P"));
+ EXPECT_FALSE(FindNamePhase("BBB", "P"));
+ EXPECT_FALSE(FindNamePhase("CCC", "P"));
+ EXPECT_FALSE(FindNamePhase("DDD", "P"));
+
+ Clear();
+}
+
+class MyData : public ConvertableToTraceFormat {
+ public:
+ MyData() {}
+
+ void AppendAsTraceFormat(std::string* out) const override {
+ out->append("{\"foo\":1}");
+ }
+
+ private:
+ ~MyData() override {}
+ DISALLOW_COPY_AND_ASSIGN(MyData);
+};
+
+TEST_F(TraceEventTestFixture, ConvertableTypes) {
+ TraceLog::GetInstance()->SetEnabled(
+ CategoryFilter("*"),
+ TraceLog::RECORDING_MODE,
+ TraceOptions());
+
+ scoped_refptr<ConvertableToTraceFormat> data(new MyData());
+ scoped_refptr<ConvertableToTraceFormat> data1(new MyData());
+ scoped_refptr<ConvertableToTraceFormat> data2(new MyData());
+ TRACE_EVENT1("foo", "bar", "data", data);
+ TRACE_EVENT2("foo", "baz",
+ "data1", data1,
+ "data2", data2);
+
+
+ scoped_refptr<ConvertableToTraceFormat> convertData1(new MyData());
+ scoped_refptr<ConvertableToTraceFormat> convertData2(new MyData());
+ TRACE_EVENT2(
+ "foo",
+ "string_first",
+ "str",
+ "string value 1",
+ "convert",
+ convertData1);
+ TRACE_EVENT2(
+ "foo",
+ "string_second",
+ "convert",
+ convertData2,
+ "str",
+ "string value 2");
+ EndTraceAndFlush();
+
+ // One arg version.
+ DictionaryValue* dict = FindNamePhase("bar", "X");
+ ASSERT_TRUE(dict);
+
+ const DictionaryValue* args_dict = NULL;
+ dict->GetDictionary("args", &args_dict);
+ ASSERT_TRUE(args_dict);
+
+ const Value* value = NULL;
+ const DictionaryValue* convertable_dict = NULL;
+ EXPECT_TRUE(args_dict->Get("data", &value));
+ ASSERT_TRUE(value->GetAsDictionary(&convertable_dict));
+
+ int foo_val;
+ EXPECT_TRUE(convertable_dict->GetInteger("foo", &foo_val));
+ EXPECT_EQ(1, foo_val);
+
+ // Two arg version.
+ dict = FindNamePhase("baz", "X");
+ ASSERT_TRUE(dict);
+
+ args_dict = NULL;
+ dict->GetDictionary("args", &args_dict);
+ ASSERT_TRUE(args_dict);
+
+ value = NULL;
+ convertable_dict = NULL;
+ EXPECT_TRUE(args_dict->Get("data1", &value));
+ ASSERT_TRUE(value->GetAsDictionary(&convertable_dict));
+
+ value = NULL;
+ convertable_dict = NULL;
+ EXPECT_TRUE(args_dict->Get("data2", &value));
+ ASSERT_TRUE(value->GetAsDictionary(&convertable_dict));
+
+ // Convertable with other types.
+ dict = FindNamePhase("string_first", "X");
+ ASSERT_TRUE(dict);
+
+ args_dict = NULL;
+ dict->GetDictionary("args", &args_dict);
+ ASSERT_TRUE(args_dict);
+
+ std::string str_value;
+ EXPECT_TRUE(args_dict->GetString("str", &str_value));
+ EXPECT_STREQ("string value 1", str_value.c_str());
+
+ value = NULL;
+ convertable_dict = NULL;
+ foo_val = 0;
+ EXPECT_TRUE(args_dict->Get("convert", &value));
+ ASSERT_TRUE(value->GetAsDictionary(&convertable_dict));
+ EXPECT_TRUE(convertable_dict->GetInteger("foo", &foo_val));
+ EXPECT_EQ(1, foo_val);
+
+ dict = FindNamePhase("string_second", "X");
+ ASSERT_TRUE(dict);
+
+ args_dict = NULL;
+ dict->GetDictionary("args", &args_dict);
+ ASSERT_TRUE(args_dict);
+
+ EXPECT_TRUE(args_dict->GetString("str", &str_value));
+ EXPECT_STREQ("string value 2", str_value.c_str());
+
+ value = NULL;
+ convertable_dict = NULL;
+ foo_val = 0;
+ EXPECT_TRUE(args_dict->Get("convert", &value));
+ ASSERT_TRUE(value->GetAsDictionary(&convertable_dict));
+ EXPECT_TRUE(convertable_dict->GetInteger("foo", &foo_val));
+ EXPECT_EQ(1, foo_val);
+}
+
+TEST_F(TraceEventTestFixture, PrimitiveArgs) {
+ TraceLog::GetInstance()->SetEnabled(
+ CategoryFilter("*"),
+ TraceLog::RECORDING_MODE,
+ TraceOptions());
+
+ TRACE_EVENT1("foo", "event1", "int_one", 1);
+ TRACE_EVENT1("foo", "event2", "int_neg_ten", -10);
+ TRACE_EVENT1("foo", "event3", "float_one", 1.0f);
+ TRACE_EVENT1("foo", "event4", "float_half", .5f);
+ TRACE_EVENT1("foo", "event5", "float_neghalf", -.5f);
+ TRACE_EVENT1("foo", "event6", "float_infinity",
+ std::numeric_limits<float>::infinity());
+ TRACE_EVENT1("foo", "event6b", "float_neg_infinity",
+ -std::numeric_limits<float>::infinity());
+ TRACE_EVENT1("foo", "event7", "double_nan",
+ std::numeric_limits<double>::quiet_NaN());
+ void* p = 0;
+ TRACE_EVENT1("foo", "event8", "pointer_null", p);
+ p = reinterpret_cast<void*>(0xbadf00d);
+ TRACE_EVENT1("foo", "event9", "pointer_badf00d", p);
+ TRACE_EVENT1("foo", "event10", "bool_true", true);
+ TRACE_EVENT1("foo", "event11", "bool_false", false);
+ TRACE_EVENT1("foo", "event12", "time_null",
+ base::Time());
+ TRACE_EVENT1("foo", "event13", "time_one",
+ base::Time::FromInternalValue(1));
+ TRACE_EVENT1("foo", "event14", "timeticks_null",
+ base::TimeTicks());
+ TRACE_EVENT1("foo", "event15", "timeticks_one",
+ base::TimeTicks::FromInternalValue(1));
+ EndTraceAndFlush();
+
+ const DictionaryValue* args_dict = NULL;
+ DictionaryValue* dict = NULL;
+ const Value* value = NULL;
+ std::string str_value;
+ int int_value;
+ double double_value;
+ bool bool_value;
+
+ dict = FindNamePhase("event1", "X");
+ ASSERT_TRUE(dict);
+ dict->GetDictionary("args", &args_dict);
+ ASSERT_TRUE(args_dict);
+ EXPECT_TRUE(args_dict->GetInteger("int_one", &int_value));
+ EXPECT_EQ(1, int_value);
+
+ dict = FindNamePhase("event2", "X");
+ ASSERT_TRUE(dict);
+ dict->GetDictionary("args", &args_dict);
+ ASSERT_TRUE(args_dict);
+ EXPECT_TRUE(args_dict->GetInteger("int_neg_ten", &int_value));
+ EXPECT_EQ(-10, int_value);
+
+ // 1f must be serlized to JSON as "1.0" in order to be a double, not an int.
+ dict = FindNamePhase("event3", "X");
+ ASSERT_TRUE(dict);
+ dict->GetDictionary("args", &args_dict);
+ ASSERT_TRUE(args_dict);
+ EXPECT_TRUE(args_dict->Get("float_one", &value));
+ EXPECT_TRUE(value->IsType(Value::TYPE_DOUBLE));
+ EXPECT_TRUE(value->GetAsDouble(&double_value));
+ EXPECT_EQ(1, double_value);
+
+ // .5f must be serlized to JSON as "0.5".
+ dict = FindNamePhase("event4", "X");
+ ASSERT_TRUE(dict);
+ dict->GetDictionary("args", &args_dict);
+ ASSERT_TRUE(args_dict);
+ EXPECT_TRUE(args_dict->Get("float_half", &value));
+ EXPECT_TRUE(value->IsType(Value::TYPE_DOUBLE));
+ EXPECT_TRUE(value->GetAsDouble(&double_value));
+ EXPECT_EQ(0.5, double_value);
+
+ // -.5f must be serlized to JSON as "-0.5".
+ dict = FindNamePhase("event5", "X");
+ ASSERT_TRUE(dict);
+ dict->GetDictionary("args", &args_dict);
+ ASSERT_TRUE(args_dict);
+ EXPECT_TRUE(args_dict->Get("float_neghalf", &value));
+ EXPECT_TRUE(value->IsType(Value::TYPE_DOUBLE));
+ EXPECT_TRUE(value->GetAsDouble(&double_value));
+ EXPECT_EQ(-0.5, double_value);
+
+ // Infinity is serialized to JSON as a string.
+ dict = FindNamePhase("event6", "X");
+ ASSERT_TRUE(dict);
+ dict->GetDictionary("args", &args_dict);
+ ASSERT_TRUE(args_dict);
+ EXPECT_TRUE(args_dict->GetString("float_infinity", &str_value));
+ EXPECT_STREQ("Infinity", str_value.c_str());
+ dict = FindNamePhase("event6b", "X");
+ ASSERT_TRUE(dict);
+ dict->GetDictionary("args", &args_dict);
+ ASSERT_TRUE(args_dict);
+ EXPECT_TRUE(args_dict->GetString("float_neg_infinity", &str_value));
+ EXPECT_STREQ("-Infinity", str_value.c_str());
+
+ // NaN is serialized to JSON as a string.
+ dict = FindNamePhase("event7", "X");
+ ASSERT_TRUE(dict);
+ dict->GetDictionary("args", &args_dict);
+ ASSERT_TRUE(args_dict);
+ EXPECT_TRUE(args_dict->GetString("double_nan", &str_value));
+ EXPECT_STREQ("NaN", str_value.c_str());
+
+ // NULL pointers should be serialized as "0x0".
+ dict = FindNamePhase("event8", "X");
+ ASSERT_TRUE(dict);
+ dict->GetDictionary("args", &args_dict);
+ ASSERT_TRUE(args_dict);
+ EXPECT_TRUE(args_dict->GetString("pointer_null", &str_value));
+ EXPECT_STREQ("0x0", str_value.c_str());
+
+ // Other pointers should be serlized as a hex string.
+ dict = FindNamePhase("event9", "X");
+ ASSERT_TRUE(dict);
+ dict->GetDictionary("args", &args_dict);
+ ASSERT_TRUE(args_dict);
+ EXPECT_TRUE(args_dict->GetString("pointer_badf00d", &str_value));
+ EXPECT_STREQ("0xbadf00d", str_value.c_str());
+
+ dict = FindNamePhase("event10", "X");
+ ASSERT_TRUE(dict);
+ dict->GetDictionary("args", &args_dict);
+ ASSERT_TRUE(args_dict);
+ EXPECT_TRUE(args_dict->GetBoolean("bool_true", &bool_value));
+ EXPECT_TRUE(bool_value);
+
+ dict = FindNamePhase("event11", "X");
+ ASSERT_TRUE(dict);
+ dict->GetDictionary("args", &args_dict);
+ ASSERT_TRUE(args_dict);
+ EXPECT_TRUE(args_dict->GetBoolean("bool_false", &bool_value));
+ EXPECT_FALSE(bool_value);
+
+ dict = FindNamePhase("event12", "X");
+ ASSERT_TRUE(dict);
+ dict->GetDictionary("args", &args_dict);
+ ASSERT_TRUE(args_dict);
+ EXPECT_TRUE(args_dict->GetInteger("time_null", &int_value));
+ EXPECT_EQ(0, int_value);
+
+ dict = FindNamePhase("event13", "X");
+ ASSERT_TRUE(dict);
+ dict->GetDictionary("args", &args_dict);
+ ASSERT_TRUE(args_dict);
+ EXPECT_TRUE(args_dict->GetInteger("time_one", &int_value));
+ EXPECT_EQ(1, int_value);
+
+ dict = FindNamePhase("event14", "X");
+ ASSERT_TRUE(dict);
+ dict->GetDictionary("args", &args_dict);
+ ASSERT_TRUE(args_dict);
+ EXPECT_TRUE(args_dict->GetInteger("timeticks_null", &int_value));
+ EXPECT_EQ(0, int_value);
+
+ dict = FindNamePhase("event15", "X");
+ ASSERT_TRUE(dict);
+ dict->GetDictionary("args", &args_dict);
+ ASSERT_TRUE(args_dict);
+ EXPECT_TRUE(args_dict->GetInteger("timeticks_one", &int_value));
+ EXPECT_EQ(1, int_value);
+}
+
+class TraceEventCallbackTest : public TraceEventTestFixture {
+ public:
+ void SetUp() override {
+ TraceEventTestFixture::SetUp();
+ ASSERT_EQ(NULL, s_instance);
+ s_instance = this;
+ }
+ void TearDown() override {
+ TraceLog::GetInstance()->SetDisabled();
+ ASSERT_TRUE(!!s_instance);
+ s_instance = NULL;
+ TraceEventTestFixture::TearDown();
+ }
+
+ protected:
+ // For TraceEventCallbackAndRecordingX tests.
+ void VerifyCallbackAndRecordedEvents(size_t expected_callback_count,
+ size_t expected_recorded_count) {
+ // Callback events.
+ EXPECT_EQ(expected_callback_count, collected_events_names_.size());
+ for (size_t i = 0; i < collected_events_names_.size(); ++i) {
+ EXPECT_EQ("callback", collected_events_categories_[i]);
+ EXPECT_EQ("yes", collected_events_names_[i]);
+ }
+
+ // Recorded events.
+ EXPECT_EQ(expected_recorded_count, trace_parsed_.GetSize());
+ EXPECT_TRUE(FindTraceEntry(trace_parsed_, "recording"));
+ EXPECT_FALSE(FindTraceEntry(trace_parsed_, "callback"));
+ EXPECT_TRUE(FindTraceEntry(trace_parsed_, "yes"));
+ EXPECT_FALSE(FindTraceEntry(trace_parsed_, "no"));
+ }
+
+ void VerifyCollectedEvent(size_t i,
+ unsigned phase,
+ const std::string& category,
+ const std::string& name) {
+ EXPECT_EQ(phase, collected_events_phases_[i]);
+ EXPECT_EQ(category, collected_events_categories_[i]);
+ EXPECT_EQ(name, collected_events_names_[i]);
+ }
+
+ std::vector<std::string> collected_events_categories_;
+ std::vector<std::string> collected_events_names_;
+ std::vector<unsigned char> collected_events_phases_;
+ std::vector<TimeTicks> collected_events_timestamps_;
+
+ static TraceEventCallbackTest* s_instance;
+ static void Callback(TimeTicks timestamp,
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ unsigned long long id,
+ int num_args,
+ const char* const arg_names[],
+ const unsigned char arg_types[],
+ const unsigned long long arg_values[],
+ unsigned char flags) {
+ s_instance->collected_events_phases_.push_back(phase);
+ s_instance->collected_events_categories_.push_back(
+ TraceLog::GetCategoryGroupName(category_group_enabled));
+ s_instance->collected_events_names_.push_back(name);
+ s_instance->collected_events_timestamps_.push_back(timestamp);
+ }
+};
+
+TraceEventCallbackTest* TraceEventCallbackTest::s_instance;
+
+TEST_F(TraceEventCallbackTest, TraceEventCallback) {
+ TRACE_EVENT_INSTANT0("all", "before enable", TRACE_EVENT_SCOPE_THREAD);
+ TraceLog::GetInstance()->SetEventCallbackEnabled(
+ CategoryFilter("*"), Callback);
+ TRACE_EVENT_INSTANT0("all", "event1", TRACE_EVENT_SCOPE_GLOBAL);
+ TRACE_EVENT_INSTANT0("all", "event2", TRACE_EVENT_SCOPE_GLOBAL);
+ {
+ TRACE_EVENT0("all", "duration");
+ TRACE_EVENT_INSTANT0("all", "event3", TRACE_EVENT_SCOPE_GLOBAL);
+ }
+ TraceLog::GetInstance()->SetEventCallbackDisabled();
+ TRACE_EVENT_INSTANT0("all", "after callback removed",
+ TRACE_EVENT_SCOPE_GLOBAL);
+ ASSERT_EQ(5u, collected_events_names_.size());
+ EXPECT_EQ("event1", collected_events_names_[0]);
+ EXPECT_EQ(TRACE_EVENT_PHASE_INSTANT, collected_events_phases_[0]);
+ EXPECT_EQ("event2", collected_events_names_[1]);
+ EXPECT_EQ(TRACE_EVENT_PHASE_INSTANT, collected_events_phases_[1]);
+ EXPECT_EQ("duration", collected_events_names_[2]);
+ EXPECT_EQ(TRACE_EVENT_PHASE_BEGIN, collected_events_phases_[2]);
+ EXPECT_EQ("event3", collected_events_names_[3]);
+ EXPECT_EQ(TRACE_EVENT_PHASE_INSTANT, collected_events_phases_[3]);
+ EXPECT_EQ("duration", collected_events_names_[4]);
+ EXPECT_EQ(TRACE_EVENT_PHASE_END, collected_events_phases_[4]);
+ for (size_t i = 1; i < collected_events_timestamps_.size(); i++) {
+ EXPECT_LE(collected_events_timestamps_[i - 1],
+ collected_events_timestamps_[i]);
+ }
+}
+
+TEST_F(TraceEventCallbackTest, TraceEventCallbackWhileFull) {
+ TraceLog::GetInstance()->SetEnabled(
+ CategoryFilter("*"),
+ TraceLog::RECORDING_MODE,
+ TraceOptions());
+ do {
+ TRACE_EVENT_INSTANT0("all", "badger badger", TRACE_EVENT_SCOPE_GLOBAL);
+ } while (!TraceLog::GetInstance()->BufferIsFull());
+ TraceLog::GetInstance()->SetEventCallbackEnabled(CategoryFilter("*"),
+ Callback);
+ TRACE_EVENT_INSTANT0("all", "a snake", TRACE_EVENT_SCOPE_GLOBAL);
+ TraceLog::GetInstance()->SetEventCallbackDisabled();
+ ASSERT_EQ(1u, collected_events_names_.size());
+ EXPECT_EQ("a snake", collected_events_names_[0]);
+}
+
+// 1: Enable callback, enable recording, disable callback, disable recording.
+TEST_F(TraceEventCallbackTest, TraceEventCallbackAndRecording1) {
+ TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
+ TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
+ TraceLog::GetInstance()->SetEventCallbackEnabled(CategoryFilter("callback"),
+ Callback);
+ TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
+ TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+ TraceLog::GetInstance()->SetEnabled(
+ CategoryFilter("recording"),
+ TraceLog::RECORDING_MODE,
+ TraceOptions());
+ TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+ TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+ TraceLog::GetInstance()->SetEventCallbackDisabled();
+ TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+ TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
+ EndTraceAndFlush();
+ TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
+ TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
+
+ DropTracedMetadataRecords();
+ VerifyCallbackAndRecordedEvents(2, 2);
+}
+
+// 2: Enable callback, enable recording, disable recording, disable callback.
+TEST_F(TraceEventCallbackTest, TraceEventCallbackAndRecording2) {
+ TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
+ TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
+ TraceLog::GetInstance()->SetEventCallbackEnabled(CategoryFilter("callback"),
+ Callback);
+ TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
+ TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+ TraceLog::GetInstance()->SetEnabled(
+ CategoryFilter("recording"),
+ TraceLog::RECORDING_MODE,
+ TraceOptions());
+ TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+ TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+ EndTraceAndFlush();
+ TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
+ TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+ TraceLog::GetInstance()->SetEventCallbackDisabled();
+ TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
+ TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
+
+ DropTracedMetadataRecords();
+ VerifyCallbackAndRecordedEvents(3, 1);
+}
+
+// 3: Enable recording, enable callback, disable callback, disable recording.
+TEST_F(TraceEventCallbackTest, TraceEventCallbackAndRecording3) {
+ TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
+ TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
+ TraceLog::GetInstance()->SetEnabled(
+ CategoryFilter("recording"),
+ TraceLog::RECORDING_MODE,
+ TraceOptions());
+ TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+ TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
+ TraceLog::GetInstance()->SetEventCallbackEnabled(CategoryFilter("callback"),
+ Callback);
+ TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+ TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+ TraceLog::GetInstance()->SetEventCallbackDisabled();
+ TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+ TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
+ EndTraceAndFlush();
+ TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
+ TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
+
+ DropTracedMetadataRecords();
+ VerifyCallbackAndRecordedEvents(1, 3);
+}
+
+// 4: Enable recording, enable callback, disable recording, disable callback.
+TEST_F(TraceEventCallbackTest, TraceEventCallbackAndRecording4) {
+ TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
+ TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
+ TraceLog::GetInstance()->SetEnabled(
+ CategoryFilter("recording"),
+ TraceLog::RECORDING_MODE,
+ TraceOptions());
+ TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+ TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
+ TraceLog::GetInstance()->SetEventCallbackEnabled(CategoryFilter("callback"),
+ Callback);
+ TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+ TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+ EndTraceAndFlush();
+ TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
+ TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
+ TraceLog::GetInstance()->SetEventCallbackDisabled();
+ TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
+ TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
+
+ DropTracedMetadataRecords();
+ VerifyCallbackAndRecordedEvents(2, 2);
+}
+
+TEST_F(TraceEventCallbackTest, TraceEventCallbackAndRecordingDuration) {
+ TraceLog::GetInstance()->SetEventCallbackEnabled(CategoryFilter("*"),
+ Callback);
+ {
+ TRACE_EVENT0("callback", "duration1");
+ TraceLog::GetInstance()->SetEnabled(
+ CategoryFilter("*"),
+ TraceLog::RECORDING_MODE,
+ TraceOptions());
+ TRACE_EVENT0("callback", "duration2");
+ EndTraceAndFlush();
+ TRACE_EVENT0("callback", "duration3");
+ }
+ TraceLog::GetInstance()->SetEventCallbackDisabled();
+
+ ASSERT_EQ(6u, collected_events_names_.size());
+ VerifyCollectedEvent(0, TRACE_EVENT_PHASE_BEGIN, "callback", "duration1");
+ VerifyCollectedEvent(1, TRACE_EVENT_PHASE_BEGIN, "callback", "duration2");
+ VerifyCollectedEvent(2, TRACE_EVENT_PHASE_BEGIN, "callback", "duration3");
+ VerifyCollectedEvent(3, TRACE_EVENT_PHASE_END, "callback", "duration3");
+ VerifyCollectedEvent(4, TRACE_EVENT_PHASE_END, "callback", "duration2");
+ VerifyCollectedEvent(5, TRACE_EVENT_PHASE_END, "callback", "duration1");
+}
+
+TEST_F(TraceEventTestFixture, TraceBufferVectorReportFull) {
+ TraceLog* trace_log = TraceLog::GetInstance();
+ trace_log->SetEnabled(CategoryFilter("*"),
+ TraceLog::RECORDING_MODE,
+ TraceOptions());
+ trace_log->logged_events_.reset(
+ trace_log->CreateTraceBufferVectorOfSize(100));
+ do {
+ TRACE_EVENT_BEGIN_WITH_ID_TID_AND_TIMESTAMP0(
+ "all", "with_timestamp", 0, 0,
+ TimeTicks::NowFromSystemTraceTime().ToInternalValue());
+ TRACE_EVENT_END_WITH_ID_TID_AND_TIMESTAMP0(
+ "all", "with_timestamp", 0, 0,
+ TimeTicks::NowFromSystemTraceTime().ToInternalValue());
+ } while (!trace_log->BufferIsFull());
+
+ EndTraceAndFlush();
+
+ const DictionaryValue* trace_full_metadata = NULL;
+
+ trace_full_metadata = FindTraceEntry(trace_parsed_,
+ "overflowed_at_ts");
+ std::string phase;
+ double buffer_limit_reached_timestamp = 0;
+
+ EXPECT_TRUE(trace_full_metadata);
+ EXPECT_TRUE(trace_full_metadata->GetString("ph", &phase));
+ EXPECT_EQ("M", phase);
+ EXPECT_TRUE(trace_full_metadata->GetDouble(
+ "args.overflowed_at_ts", &buffer_limit_reached_timestamp));
+ EXPECT_DOUBLE_EQ(
+ static_cast<double>(
+ trace_log->buffer_limit_reached_timestamp_.ToInternalValue()),
+ buffer_limit_reached_timestamp);
+
+ // Test that buffer_limit_reached_timestamp's value is between the timestamp
+ // of the last trace event and current time.
+ DropTracedMetadataRecords();
+ const DictionaryValue* last_trace_event = NULL;
+ double last_trace_event_timestamp = 0;
+ EXPECT_TRUE(trace_parsed_.GetDictionary(trace_parsed_.GetSize() - 1,
+ &last_trace_event));
+ EXPECT_TRUE(last_trace_event->GetDouble("ts", &last_trace_event_timestamp));
+ EXPECT_LE(last_trace_event_timestamp, buffer_limit_reached_timestamp);
+ EXPECT_LE(buffer_limit_reached_timestamp,
+ trace_log->OffsetNow().ToInternalValue());
+}
+
+TEST_F(TraceEventTestFixture, TraceBufferRingBufferGetReturnChunk) {
+ TraceLog::GetInstance()->SetEnabled(CategoryFilter("*"),
+ TraceLog::RECORDING_MODE,
+ TraceOptions(RECORD_CONTINUOUSLY));
+ TraceBuffer* buffer = TraceLog::GetInstance()->trace_buffer();
+ size_t capacity = buffer->Capacity();
+ size_t num_chunks = capacity / TraceBufferChunk::kTraceBufferChunkSize;
+ uint32 last_seq = 0;
+ size_t chunk_index;
+ EXPECT_EQ(0u, buffer->Size());
+
+ scoped_ptr<TraceBufferChunk*[]> chunks(new TraceBufferChunk*[num_chunks]);
+ for (size_t i = 0; i < num_chunks; ++i) {
+ chunks[i] = buffer->GetChunk(&chunk_index).release();
+ EXPECT_TRUE(chunks[i]);
+ EXPECT_EQ(i, chunk_index);
+ EXPECT_GT(chunks[i]->seq(), last_seq);
+ EXPECT_EQ((i + 1) * TraceBufferChunk::kTraceBufferChunkSize,
+ buffer->Size());
+ last_seq = chunks[i]->seq();
+ }
+
+ // Ring buffer is never full.
+ EXPECT_FALSE(buffer->IsFull());
+
+ // Return all chunks in original order.
+ for (size_t i = 0; i < num_chunks; ++i)
+ buffer->ReturnChunk(i, scoped_ptr<TraceBufferChunk>(chunks[i]));
+
+ // Should recycle the chunks in the returned order.
+ for (size_t i = 0; i < num_chunks; ++i) {
+ chunks[i] = buffer->GetChunk(&chunk_index).release();
+ EXPECT_TRUE(chunks[i]);
+ EXPECT_EQ(i, chunk_index);
+ EXPECT_GT(chunks[i]->seq(), last_seq);
+ last_seq = chunks[i]->seq();
+ }
+
+ // Return all chunks in reverse order.
+ for (size_t i = 0; i < num_chunks; ++i) {
+ buffer->ReturnChunk(
+ num_chunks - i - 1,
+ scoped_ptr<TraceBufferChunk>(chunks[num_chunks - i - 1]));
+ }
+
+ // Should recycle the chunks in the returned order.
+ for (size_t i = 0; i < num_chunks; ++i) {
+ chunks[i] = buffer->GetChunk(&chunk_index).release();
+ EXPECT_TRUE(chunks[i]);
+ EXPECT_EQ(num_chunks - i - 1, chunk_index);
+ EXPECT_GT(chunks[i]->seq(), last_seq);
+ last_seq = chunks[i]->seq();
+ }
+
+ for (size_t i = 0; i < num_chunks; ++i)
+ buffer->ReturnChunk(i, scoped_ptr<TraceBufferChunk>(chunks[i]));
+
+ TraceLog::GetInstance()->SetDisabled();
+}
+
+TEST_F(TraceEventTestFixture, TraceBufferRingBufferHalfIteration) {
+ TraceLog::GetInstance()->SetEnabled(CategoryFilter("*"),
+ TraceLog::RECORDING_MODE,
+ TraceOptions(RECORD_CONTINUOUSLY));
+ TraceBuffer* buffer = TraceLog::GetInstance()->trace_buffer();
+ size_t capacity = buffer->Capacity();
+ size_t num_chunks = capacity / TraceBufferChunk::kTraceBufferChunkSize;
+ size_t chunk_index;
+ EXPECT_EQ(0u, buffer->Size());
+ EXPECT_FALSE(buffer->NextChunk());
+
+ size_t half_chunks = num_chunks / 2;
+ scoped_ptr<TraceBufferChunk*[]> chunks(new TraceBufferChunk*[half_chunks]);
+
+ for (size_t i = 0; i < half_chunks; ++i) {
+ chunks[i] = buffer->GetChunk(&chunk_index).release();
+ EXPECT_TRUE(chunks[i]);
+ EXPECT_EQ(i, chunk_index);
+ }
+ for (size_t i = 0; i < half_chunks; ++i)
+ buffer->ReturnChunk(i, scoped_ptr<TraceBufferChunk>(chunks[i]));
+
+ for (size_t i = 0; i < half_chunks; ++i)
+ EXPECT_EQ(chunks[i], buffer->NextChunk());
+ EXPECT_FALSE(buffer->NextChunk());
+ TraceLog::GetInstance()->SetDisabled();
+}
+
+TEST_F(TraceEventTestFixture, TraceBufferRingBufferFullIteration) {
+ TraceLog::GetInstance()->SetEnabled(CategoryFilter("*"),
+ TraceLog::RECORDING_MODE,
+ TraceOptions(RECORD_CONTINUOUSLY));
+ TraceBuffer* buffer = TraceLog::GetInstance()->trace_buffer();
+ size_t capacity = buffer->Capacity();
+ size_t num_chunks = capacity / TraceBufferChunk::kTraceBufferChunkSize;
+ size_t chunk_index;
+ EXPECT_EQ(0u, buffer->Size());
+ EXPECT_FALSE(buffer->NextChunk());
+
+ scoped_ptr<TraceBufferChunk*[]> chunks(new TraceBufferChunk*[num_chunks]);
+
+ for (size_t i = 0; i < num_chunks; ++i) {
+ chunks[i] = buffer->GetChunk(&chunk_index).release();
+ EXPECT_TRUE(chunks[i]);
+ EXPECT_EQ(i, chunk_index);
+ }
+ for (size_t i = 0; i < num_chunks; ++i)
+ buffer->ReturnChunk(i, scoped_ptr<TraceBufferChunk>(chunks[i]));
+
+ for (size_t i = 0; i < num_chunks; ++i)
+ EXPECT_TRUE(chunks[i] == buffer->NextChunk());
+ EXPECT_FALSE(buffer->NextChunk());
+ TraceLog::GetInstance()->SetDisabled();
+}
+
+TEST_F(TraceEventTestFixture, TraceRecordAsMuchAsPossibleMode) {
+ TraceLog::GetInstance()->SetEnabled(CategoryFilter("*"),
+ TraceLog::RECORDING_MODE,
+ TraceOptions(RECORD_AS_MUCH_AS_POSSIBLE));
+ TraceBuffer* buffer = TraceLog::GetInstance()->trace_buffer();
+ EXPECT_EQ(512000000UL, buffer->Capacity());
+ TraceLog::GetInstance()->SetDisabled();
+}
+
+// Test the category filter.
+TEST_F(TraceEventTestFixture, CategoryFilter) {
+ // Using the default filter.
+ CategoryFilter default_cf = CategoryFilter(
+ CategoryFilter::kDefaultCategoryFilterString);
+ std::string category_filter_str = default_cf.ToString();
+ EXPECT_STREQ("-*Debug,-*Test", category_filter_str.c_str());
+ EXPECT_TRUE(default_cf.IsCategoryGroupEnabled("not-excluded-category"));
+ EXPECT_FALSE(
+ default_cf.IsCategoryGroupEnabled("disabled-by-default-category"));
+ EXPECT_FALSE(default_cf.IsCategoryGroupEnabled("Category1,CategoryDebug"));
+ EXPECT_FALSE(default_cf.IsCategoryGroupEnabled("CategoryDebug,Category1"));
+ EXPECT_FALSE(default_cf.IsCategoryGroupEnabled("CategoryTest,Category2"));
+
+ // Make sure that upon an empty string, we fall back to the default filter.
+ default_cf = CategoryFilter();
+ category_filter_str = default_cf.ToString();
+ EXPECT_STREQ("-*Debug,-*Test", category_filter_str.c_str());
+ EXPECT_TRUE(default_cf.IsCategoryGroupEnabled("not-excluded-category"));
+ EXPECT_FALSE(default_cf.IsCategoryGroupEnabled("Category1,CategoryDebug"));
+ EXPECT_FALSE(default_cf.IsCategoryGroupEnabled("CategoryDebug,Category1"));
+ EXPECT_FALSE(default_cf.IsCategoryGroupEnabled("CategoryTest,Category2"));
+
+ // Using an arbitrary non-empty filter.
+ CategoryFilter cf("included,-excluded,inc_pattern*,-exc_pattern*");
+ category_filter_str = cf.ToString();
+ EXPECT_STREQ("included,inc_pattern*,-excluded,-exc_pattern*",
+ category_filter_str.c_str());
+ EXPECT_TRUE(cf.IsCategoryGroupEnabled("included"));
+ EXPECT_TRUE(cf.IsCategoryGroupEnabled("inc_pattern_category"));
+ EXPECT_FALSE(cf.IsCategoryGroupEnabled("exc_pattern_category"));
+ EXPECT_FALSE(cf.IsCategoryGroupEnabled("excluded"));
+ EXPECT_FALSE(cf.IsCategoryGroupEnabled("not-excluded-nor-included"));
+ EXPECT_FALSE(cf.IsCategoryGroupEnabled("Category1,CategoryDebug"));
+ EXPECT_FALSE(cf.IsCategoryGroupEnabled("CategoryDebug,Category1"));
+ EXPECT_FALSE(cf.IsCategoryGroupEnabled("CategoryTest,Category2"));
+
+ cf.Merge(default_cf);
+ category_filter_str = cf.ToString();
+ EXPECT_STREQ("-excluded,-exc_pattern*,-*Debug,-*Test",
+ category_filter_str.c_str());
+ cf.Clear();
+
+ CategoryFilter reconstructed_cf(category_filter_str);
+ category_filter_str = reconstructed_cf.ToString();
+ EXPECT_STREQ("-excluded,-exc_pattern*,-*Debug,-*Test",
+ category_filter_str.c_str());
+
+ // One included category.
+ CategoryFilter one_inc_cf("only_inc_cat");
+ category_filter_str = one_inc_cf.ToString();
+ EXPECT_STREQ("only_inc_cat", category_filter_str.c_str());
+
+ // One excluded category.
+ CategoryFilter one_exc_cf("-only_exc_cat");
+ category_filter_str = one_exc_cf.ToString();
+ EXPECT_STREQ("-only_exc_cat", category_filter_str.c_str());
+
+ // Enabling a disabled- category does not require all categories to be traced
+ // to be included.
+ CategoryFilter disabled_cat("disabled-by-default-cc,-excluded");
+ EXPECT_STREQ("disabled-by-default-cc,-excluded",
+ disabled_cat.ToString().c_str());
+ EXPECT_TRUE(disabled_cat.IsCategoryGroupEnabled("disabled-by-default-cc"));
+ EXPECT_TRUE(disabled_cat.IsCategoryGroupEnabled("some_other_group"));
+ EXPECT_FALSE(disabled_cat.IsCategoryGroupEnabled("excluded"));
+
+ // Enabled a disabled- category and also including makes all categories to
+ // be traced require including.
+ CategoryFilter disabled_inc_cat("disabled-by-default-cc,included");
+ EXPECT_STREQ("included,disabled-by-default-cc",
+ disabled_inc_cat.ToString().c_str());
+ EXPECT_TRUE(
+ disabled_inc_cat.IsCategoryGroupEnabled("disabled-by-default-cc"));
+ EXPECT_TRUE(disabled_inc_cat.IsCategoryGroupEnabled("included"));
+ EXPECT_FALSE(disabled_inc_cat.IsCategoryGroupEnabled("other_included"));
+
+ // Test that IsEmptyOrContainsLeadingOrTrailingWhitespace actually catches
+ // categories that are explicitly forbiden.
+ // This method is called in a DCHECK to assert that we don't have these types
+ // of strings as categories.
+ EXPECT_TRUE(CategoryFilter::IsEmptyOrContainsLeadingOrTrailingWhitespace(
+ " bad_category "));
+ EXPECT_TRUE(CategoryFilter::IsEmptyOrContainsLeadingOrTrailingWhitespace(
+ " bad_category"));
+ EXPECT_TRUE(CategoryFilter::IsEmptyOrContainsLeadingOrTrailingWhitespace(
+ "bad_category "));
+ EXPECT_TRUE(CategoryFilter::IsEmptyOrContainsLeadingOrTrailingWhitespace(
+ " bad_category"));
+ EXPECT_TRUE(CategoryFilter::IsEmptyOrContainsLeadingOrTrailingWhitespace(
+ "bad_category "));
+ EXPECT_TRUE(CategoryFilter::IsEmptyOrContainsLeadingOrTrailingWhitespace(
+ " bad_category "));
+ EXPECT_TRUE(CategoryFilter::IsEmptyOrContainsLeadingOrTrailingWhitespace(
+ ""));
+ EXPECT_FALSE(CategoryFilter::IsEmptyOrContainsLeadingOrTrailingWhitespace(
+ "good_category"));
+}
+
+void BlockUntilStopped(WaitableEvent* task_start_event,
+ WaitableEvent* task_stop_event) {
+ task_start_event->Signal();
+ task_stop_event->Wait();
+}
+
+TEST_F(TraceEventTestFixture, SetCurrentThreadBlocksMessageLoopBeforeTracing) {
+ BeginTrace();
+
+ Thread thread("1");
+ WaitableEvent task_complete_event(false, false);
+ thread.Start();
+ thread.message_loop()->PostTask(
+ FROM_HERE, Bind(&TraceLog::SetCurrentThreadBlocksMessageLoop,
+ Unretained(TraceLog::GetInstance())));
+
+ thread.message_loop()->PostTask(
+ FROM_HERE, Bind(&TraceWithAllMacroVariants, &task_complete_event));
+ task_complete_event.Wait();
+
+ WaitableEvent task_start_event(false, false);
+ WaitableEvent task_stop_event(false, false);
+ thread.message_loop()->PostTask(
+ FROM_HERE, Bind(&BlockUntilStopped, &task_start_event, &task_stop_event));
+ task_start_event.Wait();
+
+ EndTraceAndFlush();
+ ValidateAllTraceMacrosCreatedData(trace_parsed_);
+
+ task_stop_event.Signal();
+ thread.Stop();
+}
+
+TEST_F(TraceEventTestFixture, ConvertTraceOptionsToInternalOptions) {
+ TraceLog* trace_log = TraceLog::GetInstance();
+ TraceOptions options(RECORD_UNTIL_FULL);
+ EXPECT_EQ(TraceLog::kInternalRecordUntilFull,
+ trace_log->GetInternalOptionsFromTraceOptions(options));
+
+ options.record_mode = RECORD_CONTINUOUSLY;
+ EXPECT_EQ(TraceLog::kInternalRecordContinuously,
+ trace_log->GetInternalOptionsFromTraceOptions(options));
+
+ options.record_mode = ECHO_TO_CONSOLE;
+ EXPECT_EQ(TraceLog::kInternalEchoToConsole,
+ trace_log->GetInternalOptionsFromTraceOptions(options));
+
+ options.enable_sampling = true;
+
+ options.record_mode = RECORD_UNTIL_FULL;
+ EXPECT_EQ(
+ TraceLog::kInternalRecordUntilFull | TraceLog::kInternalEnableSampling,
+ trace_log->GetInternalOptionsFromTraceOptions(options));
+
+ options.record_mode = RECORD_CONTINUOUSLY;
+ EXPECT_EQ(
+ TraceLog::kInternalRecordContinuously | TraceLog::kInternalEnableSampling,
+ trace_log->GetInternalOptionsFromTraceOptions(options));
+
+ options.record_mode = ECHO_TO_CONSOLE;
+ EXPECT_EQ(
+ TraceLog::kInternalEchoToConsole | TraceLog::kInternalEnableSampling,
+ trace_log->GetInternalOptionsFromTraceOptions(options));
+
+ options.enable_systrace = true;
+ EXPECT_EQ(
+ TraceLog::kInternalEchoToConsole | TraceLog::kInternalEnableSampling,
+ trace_log->GetInternalOptionsFromTraceOptions(options));
+}
+
+void SetBlockingFlagAndBlockUntilStopped(WaitableEvent* task_start_event,
+ WaitableEvent* task_stop_event) {
+ TraceLog::GetInstance()->SetCurrentThreadBlocksMessageLoop();
+ BlockUntilStopped(task_start_event, task_stop_event);
+}
+
+TEST_F(TraceEventTestFixture, SetCurrentThreadBlocksMessageLoopAfterTracing) {
+ BeginTrace();
+
+ Thread thread("1");
+ WaitableEvent task_complete_event(false, false);
+ thread.Start();
+
+ thread.message_loop()->PostTask(
+ FROM_HERE, Bind(&TraceWithAllMacroVariants, &task_complete_event));
+ task_complete_event.Wait();
+
+ WaitableEvent task_start_event(false, false);
+ WaitableEvent task_stop_event(false, false);
+ thread.message_loop()->PostTask(
+ FROM_HERE, Bind(&SetBlockingFlagAndBlockUntilStopped,
+ &task_start_event, &task_stop_event));
+ task_start_event.Wait();
+
+ EndTraceAndFlush();
+ ValidateAllTraceMacrosCreatedData(trace_parsed_);
+
+ task_stop_event.Signal();
+ thread.Stop();
+}
+
+TEST_F(TraceEventTestFixture, ThreadOnceBlocking) {
+ BeginTrace();
+
+ Thread thread("1");
+ WaitableEvent task_complete_event(false, false);
+ thread.Start();
+
+ thread.message_loop()->PostTask(
+ FROM_HERE, Bind(&TraceWithAllMacroVariants, &task_complete_event));
+ task_complete_event.Wait();
+ task_complete_event.Reset();
+
+ WaitableEvent task_start_event(false, false);
+ WaitableEvent task_stop_event(false, false);
+ thread.message_loop()->PostTask(
+ FROM_HERE, Bind(&BlockUntilStopped, &task_start_event, &task_stop_event));
+ task_start_event.Wait();
+
+ // The thread will timeout in this flush.
+ EndTraceAndFlushInThreadWithMessageLoop();
+ Clear();
+
+ // Let the thread's message loop continue to spin.
+ task_stop_event.Signal();
+
+ // The following sequence ensures that the FlushCurrentThread task has been
+ // executed in the thread before continuing.
+ task_start_event.Reset();
+ task_stop_event.Reset();
+ thread.message_loop()->PostTask(
+ FROM_HERE, Bind(&BlockUntilStopped, &task_start_event, &task_stop_event));
+ task_start_event.Wait();
+ task_stop_event.Signal();
+ Clear();
+
+ // TraceLog should discover the generation mismatch and recover the thread
+ // local buffer for the thread without any error.
+ BeginTrace();
+ thread.message_loop()->PostTask(
+ FROM_HERE, Bind(&TraceWithAllMacroVariants, &task_complete_event));
+ task_complete_event.Wait();
+ task_complete_event.Reset();
+ EndTraceAndFlushInThreadWithMessageLoop();
+ ValidateAllTraceMacrosCreatedData(trace_parsed_);
+}
+
+std::string* g_log_buffer = NULL;
+bool MockLogMessageHandler(int, const char*, int, size_t,
+ const std::string& str) {
+ if (!g_log_buffer)
+ g_log_buffer = new std::string();
+ g_log_buffer->append(str);
+ return false;
+}
+
+TEST_F(TraceEventTestFixture, EchoToConsole) {
+ logging::LogMessageHandlerFunction old_log_message_handler =
+ logging::GetLogMessageHandler();
+ logging::SetLogMessageHandler(MockLogMessageHandler);
+
+ TraceLog::GetInstance()->SetEnabled(
+ CategoryFilter("*"),
+ TraceLog::RECORDING_MODE,
+ TraceOptions(ECHO_TO_CONSOLE));
+ TRACE_EVENT_BEGIN0("a", "begin_end");
+ {
+ TRACE_EVENT0("b", "duration");
+ TRACE_EVENT0("b1", "duration1");
+ }
+ TRACE_EVENT_INSTANT0("c", "instant", TRACE_EVENT_SCOPE_GLOBAL);
+ TRACE_EVENT_END0("a", "begin_end");
+
+ EXPECT_NE(std::string::npos, g_log_buffer->find("begin_end[a]\x1b"));
+ EXPECT_NE(std::string::npos, g_log_buffer->find("| duration[b]\x1b"));
+ EXPECT_NE(std::string::npos, g_log_buffer->find("| | duration1[b1]\x1b"));
+ EXPECT_NE(std::string::npos, g_log_buffer->find("| | duration1[b1] ("));
+ EXPECT_NE(std::string::npos, g_log_buffer->find("| duration[b] ("));
+ EXPECT_NE(std::string::npos, g_log_buffer->find("| instant[c]\x1b"));
+ EXPECT_NE(std::string::npos, g_log_buffer->find("begin_end[a] ("));
+
+ EndTraceAndFlush();
+ delete g_log_buffer;
+ logging::SetLogMessageHandler(old_log_message_handler);
+ g_log_buffer = NULL;
+}
+
+bool LogMessageHandlerWithTraceEvent(int, const char*, int, size_t,
+ const std::string&) {
+ TRACE_EVENT0("log", "trace_event");
+ return false;
+}
+
+TEST_F(TraceEventTestFixture, EchoToConsoleTraceEventRecursion) {
+ logging::LogMessageHandlerFunction old_log_message_handler =
+ logging::GetLogMessageHandler();
+ logging::SetLogMessageHandler(LogMessageHandlerWithTraceEvent);
+
+ TraceLog::GetInstance()->SetEnabled(
+ CategoryFilter("*"),
+ TraceLog::RECORDING_MODE,
+ TraceOptions(ECHO_TO_CONSOLE));
+ {
+ // This should not cause deadlock or infinite recursion.
+ TRACE_EVENT0("b", "duration");
+ }
+
+ EndTraceAndFlush();
+ logging::SetLogMessageHandler(old_log_message_handler);
+}
+
+TEST_F(TraceEventTestFixture, TimeOffset) {
+ BeginTrace();
+ // Let TraceLog timer start from 0.
+ TimeDelta time_offset = TimeTicks::NowFromSystemTraceTime() - TimeTicks();
+ TraceLog::GetInstance()->SetTimeOffset(time_offset);
+
+ {
+ TRACE_EVENT0("all", "duration1");
+ TRACE_EVENT0("all", "duration2");
+ }
+ TRACE_EVENT_BEGIN_WITH_ID_TID_AND_TIMESTAMP0(
+ "all", "with_timestamp", 0, 0,
+ TimeTicks::NowFromSystemTraceTime().ToInternalValue());
+ TRACE_EVENT_END_WITH_ID_TID_AND_TIMESTAMP0(
+ "all", "with_timestamp", 0, 0,
+ TimeTicks::NowFromSystemTraceTime().ToInternalValue());
+
+ EndTraceAndFlush();
+ DropTracedMetadataRecords();
+
+ double end_time = static_cast<double>(
+ (TimeTicks::NowFromSystemTraceTime() - time_offset).ToInternalValue());
+ double last_timestamp = 0;
+ for (size_t i = 0; i < trace_parsed_.GetSize(); ++i) {
+ const DictionaryValue* item;
+ EXPECT_TRUE(trace_parsed_.GetDictionary(i, &item));
+ double timestamp;
+ EXPECT_TRUE(item->GetDouble("ts", &timestamp));
+ EXPECT_GE(timestamp, last_timestamp);
+ EXPECT_LE(timestamp, end_time);
+ last_timestamp = timestamp;
+ }
+}
+
+TEST_F(TraceEventTestFixture, ConfigureSyntheticDelays) {
+ BeginSpecificTrace("DELAY(test.Delay;0.05)");
+
+ base::TimeTicks start = base::TimeTicks::Now();
+ {
+ TRACE_EVENT_SYNTHETIC_DELAY("test.Delay");
+ }
+ base::TimeDelta duration = base::TimeTicks::Now() - start;
+ EXPECT_GE(duration.InMilliseconds(), 50);
+
+ EndTraceAndFlush();
+}
+
+TEST_F(TraceEventTestFixture, BadSyntheticDelayConfigurations) {
+ const char* const configs[] = {
+ "",
+ "DELAY(",
+ "DELAY(;",
+ "DELAY(;)",
+ "DELAY(test.Delay)",
+ "DELAY(test.Delay;)"
+ };
+ for (size_t i = 0; i < arraysize(configs); i++) {
+ BeginSpecificTrace(configs[i]);
+ EndTraceAndFlush();
+ CategoryFilter filter = TraceLog::GetInstance()->GetCurrentCategoryFilter();
+ EXPECT_EQ(0u, filter.GetSyntheticDelayValues().size());
+ }
+}
+
+TEST_F(TraceEventTestFixture, SyntheticDelayConfigurationMerging) {
+ CategoryFilter filter1("DELAY(test.Delay1;16)");
+ CategoryFilter filter2("DELAY(test.Delay2;32)");
+ filter1.Merge(filter2);
+ EXPECT_EQ(2u, filter1.GetSyntheticDelayValues().size());
+}
+
+TEST_F(TraceEventTestFixture, SyntheticDelayConfigurationToString) {
+ const char config[] = "DELAY(test.Delay;16;oneshot)";
+ CategoryFilter filter(config);
+ EXPECT_EQ(config, filter.ToString());
+}
+
+TEST(TraceOptionsTest, TraceOptionsFromString) {
+ TraceOptions options;
+ EXPECT_TRUE(options.SetFromString("record-until-full"));
+ EXPECT_EQ(RECORD_UNTIL_FULL, options.record_mode);
+ EXPECT_FALSE(options.enable_sampling);
+ EXPECT_FALSE(options.enable_systrace);
+
+ EXPECT_TRUE(options.SetFromString("record-continuously"));
+ EXPECT_EQ(RECORD_CONTINUOUSLY, options.record_mode);
+ EXPECT_FALSE(options.enable_sampling);
+ EXPECT_FALSE(options.enable_systrace);
+
+ EXPECT_TRUE(options.SetFromString("trace-to-console"));
+ EXPECT_EQ(ECHO_TO_CONSOLE, options.record_mode);
+ EXPECT_FALSE(options.enable_sampling);
+ EXPECT_FALSE(options.enable_systrace);
+
+ EXPECT_TRUE(options.SetFromString("record-as-much-as-possible"));
+ EXPECT_EQ(RECORD_AS_MUCH_AS_POSSIBLE, options.record_mode);
+ EXPECT_FALSE(options.enable_sampling);
+ EXPECT_FALSE(options.enable_systrace);
+
+ EXPECT_TRUE(options.SetFromString("record-until-full, enable-sampling"));
+ EXPECT_EQ(RECORD_UNTIL_FULL, options.record_mode);
+ EXPECT_TRUE(options.enable_sampling);
+ EXPECT_FALSE(options.enable_systrace);
+
+ EXPECT_TRUE(options.SetFromString("enable-systrace,record-continuously"));
+ EXPECT_EQ(RECORD_CONTINUOUSLY, options.record_mode);
+ EXPECT_FALSE(options.enable_sampling);
+ EXPECT_TRUE(options.enable_systrace);
+
+ EXPECT_TRUE(options.SetFromString(
+ "enable-systrace, trace-to-console,enable-sampling"));
+ EXPECT_EQ(ECHO_TO_CONSOLE, options.record_mode);
+ EXPECT_TRUE(options.enable_sampling);
+ EXPECT_TRUE(options.enable_systrace);
+
+ EXPECT_TRUE(options.SetFromString(
+ "record-continuously,record-until-full,trace-to-console"));
+ EXPECT_EQ(ECHO_TO_CONSOLE, options.record_mode);
+ EXPECT_FALSE(options.enable_systrace);
+ EXPECT_FALSE(options.enable_sampling);
+
+ EXPECT_TRUE(options.SetFromString(""));
+ EXPECT_EQ(RECORD_UNTIL_FULL, options.record_mode);
+ EXPECT_FALSE(options.enable_systrace);
+ EXPECT_FALSE(options.enable_sampling);
+
+ EXPECT_FALSE(options.SetFromString("foo-bar-baz"));
+}
+
+TEST(TraceOptionsTest, TraceOptionsToString) {
+ // Test that we can intialize TraceOptions from a string got from
+ // TraceOptions.ToString() method to get a same TraceOptions.
+ TraceRecordMode modes[] = {RECORD_UNTIL_FULL,
+ RECORD_CONTINUOUSLY,
+ ECHO_TO_CONSOLE,
+ RECORD_AS_MUCH_AS_POSSIBLE};
+ bool enable_sampling_options[] = {true, false};
+ bool enable_systrace_options[] = {true, false};
+
+ for (int i = 0; i < 4; ++i) {
+ for (int j = 0; j < 2; ++j) {
+ for (int k = 0; k < 2; ++k) {
+ TraceOptions original_option = TraceOptions(modes[i]);
+ original_option.enable_sampling = enable_sampling_options[j];
+ original_option.enable_systrace = enable_systrace_options[k];
+ TraceOptions new_options;
+ EXPECT_TRUE(new_options.SetFromString(original_option.ToString()));
+ EXPECT_EQ(original_option.record_mode, new_options.record_mode);
+ EXPECT_EQ(original_option.enable_sampling, new_options.enable_sampling);
+ EXPECT_EQ(original_option.enable_systrace, new_options.enable_systrace);
+ }
+ }
+ }
+}
+
+
+} // namespace debug
+} // namespace base
diff --git a/base/trace_event/trace_event_win.cc b/base/trace_event/trace_event_win.cc
new file mode 100644
index 0000000..d2c3dc8
--- /dev/null
+++ b/base/trace_event/trace_event_win.cc
@@ -0,0 +1,124 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_event_win.h"
+
+#include "base/logging.h"
+#include "base/memory/singleton.h"
+#include <initguid.h> // NOLINT
+
+namespace base {
+namespace debug {
+
+using base::win::EtwEventType;
+using base::win::EtwMofEvent;
+
+// {3DADA31D-19EF-4dc1-B345-037927193422}
+const GUID kChromeTraceProviderName = {
+ 0x3dada31d, 0x19ef, 0x4dc1, 0xb3, 0x45, 0x3, 0x79, 0x27, 0x19, 0x34, 0x22 };
+
+// {B967AE67-BB22-49d7-9406-55D91EE1D560}
+const GUID kTraceEventClass32 = {
+ 0xb967ae67, 0xbb22, 0x49d7, 0x94, 0x6, 0x55, 0xd9, 0x1e, 0xe1, 0xd5, 0x60 };
+
+// {97BE602D-2930-4ac3-8046-B6763B631DFE}
+const GUID kTraceEventClass64 = {
+ 0x97be602d, 0x2930, 0x4ac3, 0x80, 0x46, 0xb6, 0x76, 0x3b, 0x63, 0x1d, 0xfe};
+
+
+TraceEventETWProvider::TraceEventETWProvider() :
+ EtwTraceProvider(kChromeTraceProviderName) {
+ Register();
+}
+
+TraceEventETWProvider* TraceEventETWProvider::GetInstance() {
+ return Singleton<TraceEventETWProvider,
+ StaticMemorySingletonTraits<TraceEventETWProvider> >::get();
+}
+
+bool TraceEventETWProvider::StartTracing() {
+ return true;
+}
+
+void TraceEventETWProvider::TraceEvent(const char* name,
+ size_t name_len,
+ char type,
+ const void* id,
+ const char* extra,
+ size_t extra_len) {
+ // Make sure we don't touch NULL.
+ if (name == NULL)
+ name = "";
+ if (extra == NULL)
+ extra = "";
+
+ EtwEventType etw_type = 0;
+ switch (type) {
+ case TRACE_EVENT_PHASE_BEGIN:
+ etw_type = kTraceEventTypeBegin;
+ break;
+ case TRACE_EVENT_PHASE_END:
+ etw_type = kTraceEventTypeEnd;
+ break;
+
+ case TRACE_EVENT_PHASE_INSTANT:
+ etw_type = kTraceEventTypeInstant;
+ break;
+
+ default:
+ NOTREACHED() << "Unknown event type";
+ etw_type = kTraceEventTypeInstant;
+ break;
+ }
+
+ EtwMofEvent<5> event(kTraceEventClass32,
+ etw_type,
+ TRACE_LEVEL_INFORMATION);
+ event.SetField(0, name_len + 1, name);
+ event.SetField(1, sizeof(id), &id);
+ event.SetField(2, extra_len + 1, extra);
+
+ // These variables are declared here so that they are not out of scope when
+ // the event is logged.
+ DWORD depth;
+ void* backtrace[32];
+
+ // See whether we're to capture a backtrace.
+ if (enable_flags() & CAPTURE_STACK_TRACE) {
+ depth = CaptureStackBackTrace(0,
+ arraysize(backtrace),
+ backtrace,
+ NULL);
+ event.SetField(3, sizeof(depth), &depth);
+ event.SetField(4, sizeof(backtrace[0]) * depth, backtrace);
+ }
+
+ // Trace the event.
+ Log(event.get());
+}
+
+void TraceEventETWProvider::Trace(const char* name,
+ size_t name_len,
+ char type,
+ const void* id,
+ const char* extra,
+ size_t extra_len) {
+ TraceEventETWProvider* provider = TraceEventETWProvider::GetInstance();
+ if (provider && provider->IsTracing()) {
+ // Compute the name & extra lengths if not supplied already.
+ if (name_len == kUseStrlen)
+ name_len = (name == NULL) ? 0 : strlen(name);
+ if (extra_len == kUseStrlen)
+ extra_len = (extra == NULL) ? 0 : strlen(extra);
+
+ provider->TraceEvent(name, name_len, type, id, extra, extra_len);
+ }
+}
+
+void TraceEventETWProvider::Resurrect() {
+ StaticMemorySingletonTraits<TraceEventETWProvider>::Resurrect();
+}
+
+} // namespace debug
+} // namespace base
diff --git a/base/trace_event/trace_event_win.h b/base/trace_event/trace_event_win.h
new file mode 100644
index 0000000..e447c35
--- /dev/null
+++ b/base/trace_event/trace_event_win.h
@@ -0,0 +1,125 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains the Windows-specific declarations for trace_event.h.
+#ifndef BASE_TRACE_EVENT_TRACE_EVENT_WIN_H_
+#define BASE_TRACE_EVENT_TRACE_EVENT_WIN_H_
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/trace_event/trace_event.h"
+#include "base/win/event_trace_provider.h"
+
+// Fwd.
+template <typename Type>
+struct StaticMemorySingletonTraits;
+
+namespace base {
+namespace debug {
+
+// This EtwTraceProvider subclass implements ETW logging
+// for the macros above on Windows.
+class BASE_EXPORT TraceEventETWProvider : public base::win::EtwTraceProvider {
+ public:
+ static const size_t kUseStrlen = static_cast<size_t>(-1);
+
+ // Start logging trace events.
+ // This is a noop in this implementation.
+ static bool StartTracing();
+
+ // Trace begin/end/instant events, this is the bottleneck implementation
+ // all the others defer to.
+ // Allowing the use of std::string for name or extra is a convenience,
+ // whereas passing name or extra as a const char* avoids the construction
+ // of temporary std::string instances.
+ // If kUseStrlen is passed for name_len or extra_len, the strlen of the string
+ // will be used for length.
+ static void Trace(const char* name,
+ size_t name_len,
+ char type,
+ const void* id,
+ const char* extra,
+ size_t extra_len);
+
+ // Allows passing extra as a std::string for convenience.
+ static void Trace(const char* name,
+ char type,
+ const void* id,
+ const std::string& extra) {
+ return Trace(name, kUseStrlen, type, id, extra.c_str(), extra.length());
+ }
+
+ // Allows passing extra as a const char* to avoid constructing temporary
+ // std::string instances where not needed.
+ static void Trace(const char* name,
+ char type,
+ const void* id,
+ const char* extra) {
+ return Trace(name, kUseStrlen, type, id, extra, kUseStrlen);
+ }
+
+ // Retrieves the singleton.
+ // Note that this may return NULL post-AtExit processing.
+ static TraceEventETWProvider* GetInstance();
+
+ // Returns true iff tracing is turned on.
+ bool IsTracing() {
+ return enable_level() >= TRACE_LEVEL_INFORMATION;
+ }
+
+ // Emit a trace of type |type| containing |name|, |id|, and |extra|.
+ // Note: |name| and |extra| must be NULL, or a zero-terminated string of
+ // length |name_len| or |extra_len| respectively.
+ // Note: if name_len or extra_len are kUseStrlen, the length of the
+ // corresponding string will be used.
+ void TraceEvent(const char* name,
+ size_t name_len,
+ char type,
+ const void* id,
+ const char* extra,
+ size_t extra_len);
+
+ // Exposed for unittesting only, allows resurrecting our
+ // singleton instance post-AtExit processing.
+ static void Resurrect();
+
+ private:
+ // Ensure only the provider can construct us.
+ friend struct StaticMemorySingletonTraits<TraceEventETWProvider>;
+ TraceEventETWProvider();
+
+ DISALLOW_COPY_AND_ASSIGN(TraceEventETWProvider);
+};
+
+// The ETW trace provider GUID.
+BASE_EXPORT extern const GUID kChromeTraceProviderName;
+
+// The ETW event class GUID for 32 bit events.
+BASE_EXPORT extern const GUID kTraceEventClass32;
+
+// The ETW event class GUID for 64 bit events.
+BASE_EXPORT extern const GUID kTraceEventClass64;
+
+// The ETW event types, IDs 0x00-0x09 are reserved, so start at 0x10.
+const base::win::EtwEventType kTraceEventTypeBegin = 0x10;
+const base::win::EtwEventType kTraceEventTypeEnd = 0x11;
+const base::win::EtwEventType kTraceEventTypeInstant = 0x12;
+
+// If this flag is set in enable flags
+enum TraceEventETWFlags {
+ CAPTURE_STACK_TRACE = 0x0001,
+};
+
+// The event format consists of:
+// The "name" string as a zero-terminated ASCII string.
+// The id pointer in the machine bitness.
+// The "extra" string as a zero-terminated ASCII string.
+// Optionally the stack trace, consisting of a DWORD "depth", followed
+// by an array of void* (machine bitness) of length "depth".
+
+} // namespace debug
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_TRACE_EVENT_WIN_H_
diff --git a/base/trace_event/trace_event_win_unittest.cc b/base/trace_event/trace_event_win_unittest.cc
new file mode 100644
index 0000000..a411301
--- /dev/null
+++ b/base/trace_event/trace_event_win_unittest.cc
@@ -0,0 +1,319 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_event.h"
+
+#include <strstream>
+
+#include "base/at_exit.h"
+#include "base/basictypes.h"
+#include "base/files/file_util.h"
+#include "base/trace_event/trace_event.h"
+#include "base/trace_event/trace_event_win.h"
+#include "base/win/event_trace_consumer.h"
+#include "base/win/event_trace_controller.h"
+#include "base/win/event_trace_provider.h"
+#include "base/win/windows_version.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include <initguid.h> // NOLINT - must be last include.
+
+namespace base {
+namespace debug {
+
+namespace {
+
+using testing::_;
+using testing::AnyNumber;
+using testing::InSequence;
+using testing::Ge;
+using testing::Le;
+using testing::NotNull;
+
+using base::win::EtwEventType;
+using base::win::EtwTraceConsumerBase;
+using base::win::EtwTraceController;
+using base::win::EtwTraceProperties;
+
+// Data for unittests traces.
+const char kEmpty[] = "";
+const char kName[] = "unittest.trace_name";
+const char kExtra[] = "UnittestDummyExtraString";
+const void* kId = kName;
+
+const wchar_t kTestSessionName[] = L"TraceEvent unittest session";
+
+MATCHER_P(BufferStartsWith, str, "Buffer starts with") {
+ return memcmp(arg, str.c_str(), str.length()) == 0;
+}
+
+// Duplicated from <evntrace.h> to fix link problems.
+DEFINE_GUID( /* 68fdd900-4a3e-11d1-84f4-0000f80464e3 */
+ kEventTraceGuid,
+ 0x68fdd900,
+ 0x4a3e,
+ 0x11d1,
+ 0x84, 0xf4, 0x00, 0x00, 0xf8, 0x04, 0x64, 0xe3);
+
+class TestEventConsumer: public EtwTraceConsumerBase<TestEventConsumer> {
+ public:
+ TestEventConsumer() {
+ EXPECT_TRUE(current_ == NULL);
+ current_ = this;
+ }
+
+ ~TestEventConsumer() {
+ EXPECT_TRUE(current_ == this);
+ current_ = NULL;
+ }
+
+ MOCK_METHOD4(Event, void(REFGUID event_class,
+ EtwEventType event_type,
+ size_t buf_len,
+ const void* buf));
+
+ static void ProcessEvent(EVENT_TRACE* event) {
+ ASSERT_TRUE(current_ != NULL);
+ current_->Event(event->Header.Guid,
+ event->Header.Class.Type,
+ event->MofLength,
+ event->MofData);
+ }
+
+ private:
+ static TestEventConsumer* current_;
+};
+
+TestEventConsumer* TestEventConsumer::current_ = NULL;
+
+class TraceEventWinTest: public testing::Test {
+ public:
+ TraceEventWinTest() {
+ }
+
+ void SetUp() {
+ bool is_xp = win::GetVersion() < base::win::VERSION_VISTA;
+
+ if (is_xp) {
+ // Tear down any dangling session from an earlier failing test.
+ EtwTraceProperties ignore;
+ EtwTraceController::Stop(kTestSessionName, &ignore);
+ }
+
+ // Resurrect and initialize the TraceLog singleton instance.
+ // On Vista and better, we need the provider registered before we
+ // start the private, in-proc session, but on XP we need the global
+ // session created and the provider enabled before we register our
+ // provider.
+ TraceEventETWProvider* tracelog = NULL;
+ if (!is_xp) {
+ TraceEventETWProvider::Resurrect();
+ tracelog = TraceEventETWProvider::GetInstance();
+ ASSERT_TRUE(tracelog != NULL);
+ ASSERT_FALSE(tracelog->IsTracing());
+ }
+
+ // Create the log file.
+ ASSERT_TRUE(base::CreateTemporaryFile(&log_file_));
+
+ // Create a private log session on the file.
+ EtwTraceProperties prop;
+ ASSERT_HRESULT_SUCCEEDED(prop.SetLoggerFileName(log_file_.value().c_str()));
+ EVENT_TRACE_PROPERTIES& p = *prop.get();
+ p.Wnode.ClientContext = 1; // QPC timer accuracy.
+ p.LogFileMode = EVENT_TRACE_FILE_MODE_SEQUENTIAL; // Sequential log.
+
+ // On Vista and later, we create a private in-process log session, because
+ // otherwise we'd need administrator privileges. Unfortunately we can't
+ // do the same on XP and better, because the semantics of a private
+ // logger session are different, and the IN_PROC flag is not supported.
+ if (!is_xp) {
+ p.LogFileMode |= EVENT_TRACE_PRIVATE_IN_PROC | // In-proc for non-admin.
+ EVENT_TRACE_PRIVATE_LOGGER_MODE; // Process-private log.
+ }
+
+ p.MaximumFileSize = 100; // 100M file size.
+ p.FlushTimer = 1; // 1 second flush lag.
+ ASSERT_HRESULT_SUCCEEDED(controller_.Start(kTestSessionName, &prop));
+
+ // Enable the TraceLog provider GUID.
+ ASSERT_HRESULT_SUCCEEDED(
+ controller_.EnableProvider(kChromeTraceProviderName,
+ TRACE_LEVEL_INFORMATION,
+ 0));
+
+ if (is_xp) {
+ TraceEventETWProvider::Resurrect();
+ tracelog = TraceEventETWProvider::GetInstance();
+ }
+ ASSERT_TRUE(tracelog != NULL);
+ EXPECT_TRUE(tracelog->IsTracing());
+ }
+
+ void TearDown() {
+ EtwTraceProperties prop;
+ if (controller_.session() != 0)
+ EXPECT_HRESULT_SUCCEEDED(controller_.Stop(&prop));
+
+ if (!log_file_.value().empty())
+ base::DeleteFile(log_file_, false);
+
+ // We want our singleton torn down after each test.
+ TraceLog::DeleteForTesting();
+ }
+
+ void ExpectEvent(REFGUID guid,
+ EtwEventType type,
+ const char* name,
+ size_t name_len,
+ const void* id,
+ const char* extra,
+ size_t extra_len) {
+ // Build the trace event buffer we expect will result from this.
+ std::stringbuf str;
+ str.sputn(name, name_len + 1);
+ str.sputn(reinterpret_cast<const char*>(&id), sizeof(id));
+ str.sputn(extra, extra_len + 1);
+
+ // And set up the expectation for the event callback.
+ EXPECT_CALL(consumer_, Event(guid,
+ type,
+ testing::Ge(str.str().length()),
+ BufferStartsWith(str.str())));
+ }
+
+ void ExpectPlayLog() {
+ // Ignore EventTraceGuid events.
+ EXPECT_CALL(consumer_, Event(kEventTraceGuid, _, _, _))
+ .Times(AnyNumber());
+ }
+
+ void PlayLog() {
+ EtwTraceProperties prop;
+ EXPECT_HRESULT_SUCCEEDED(controller_.Flush(&prop));
+ EXPECT_HRESULT_SUCCEEDED(controller_.Stop(&prop));
+ ASSERT_HRESULT_SUCCEEDED(
+ consumer_.OpenFileSession(log_file_.value().c_str()));
+
+ ASSERT_HRESULT_SUCCEEDED(consumer_.Consume());
+ }
+
+ private:
+ // We want our singleton torn down after each test.
+ ShadowingAtExitManager at_exit_manager_;
+ EtwTraceController controller_;
+ FilePath log_file_;
+ TestEventConsumer consumer_;
+};
+
+} // namespace
+
+
+TEST_F(TraceEventWinTest, TraceLog) {
+ ExpectPlayLog();
+
+ // The events should arrive in the same sequence as the expects.
+ InSequence in_sequence;
+
+ // Full argument version, passing lengths explicitly.
+ TraceEventETWProvider::Trace(kName,
+ strlen(kName),
+ TRACE_EVENT_PHASE_BEGIN,
+ kId,
+ kExtra,
+ strlen(kExtra));
+
+ ExpectEvent(kTraceEventClass32,
+ kTraceEventTypeBegin,
+ kName, strlen(kName),
+ kId,
+ kExtra, strlen(kExtra));
+
+ // Const char* version.
+ TraceEventETWProvider::Trace(static_cast<const char*>(kName),
+ TRACE_EVENT_PHASE_END,
+ kId,
+ static_cast<const char*>(kExtra));
+
+ ExpectEvent(kTraceEventClass32,
+ kTraceEventTypeEnd,
+ kName, strlen(kName),
+ kId,
+ kExtra, strlen(kExtra));
+
+ // std::string extra version.
+ TraceEventETWProvider::Trace(static_cast<const char*>(kName),
+ TRACE_EVENT_PHASE_INSTANT,
+ kId,
+ std::string(kExtra));
+
+ ExpectEvent(kTraceEventClass32,
+ kTraceEventTypeInstant,
+ kName, strlen(kName),
+ kId,
+ kExtra, strlen(kExtra));
+
+
+ // Test for sanity on NULL inputs.
+ TraceEventETWProvider::Trace(NULL,
+ 0,
+ TRACE_EVENT_PHASE_BEGIN,
+ kId,
+ NULL,
+ 0);
+
+ ExpectEvent(kTraceEventClass32,
+ kTraceEventTypeBegin,
+ kEmpty, 0,
+ kId,
+ kEmpty, 0);
+
+ TraceEventETWProvider::Trace(NULL,
+ TraceEventETWProvider::kUseStrlen,
+ TRACE_EVENT_PHASE_END,
+ kId,
+ NULL,
+ TraceEventETWProvider::kUseStrlen);
+
+ ExpectEvent(kTraceEventClass32,
+ kTraceEventTypeEnd,
+ kEmpty, 0,
+ kId,
+ kEmpty, 0);
+
+ PlayLog();
+}
+
+TEST_F(TraceEventWinTest, Macros) {
+ ExpectPlayLog();
+
+ // The events should arrive in the same sequence as the expects.
+ InSequence in_sequence;
+
+ TRACE_EVENT_BEGIN_ETW(kName, kId, kExtra);
+ ExpectEvent(kTraceEventClass32,
+ kTraceEventTypeBegin,
+ kName, strlen(kName),
+ kId,
+ kExtra, strlen(kExtra));
+
+ TRACE_EVENT_END_ETW(kName, kId, kExtra);
+ ExpectEvent(kTraceEventClass32,
+ kTraceEventTypeEnd,
+ kName, strlen(kName),
+ kId,
+ kExtra, strlen(kExtra));
+
+ TRACE_EVENT_INSTANT_ETW(kName, kId, kExtra);
+ ExpectEvent(kTraceEventClass32,
+ kTraceEventTypeInstant,
+ kName, strlen(kName),
+ kId,
+ kExtra, strlen(kExtra));
+
+ PlayLog();
+}
+
+} // namespace debug
+} // namespace base