summaryrefslogtreecommitdiffstats
path: root/base
diff options
context:
space:
mode:
authoroysteine <oysteine@chromium.org>2015-07-27 14:49:27 -0700
committerCommit bot <commit-bot@chromium.org>2015-07-27 21:50:32 +0000
commit17de7455a3a1d5e4556d1ed18961e42d987e36c2 (patch)
tree1d91f638d3c645d3a4606105802e55f3829a67b0 /base
parent562defa3c58df3a4c490a2321d309a98ce81a11e (diff)
downloadchromium_src-17de7455a3a1d5e4556d1ed18961e42d987e36c2.zip
chromium_src-17de7455a3a1d5e4556d1ed18961e42d987e36c2.tar.gz
chromium_src-17de7455a3a1d5e4556d1ed18961e42d987e36c2.tar.bz2
Split most of trace_event_impl.h/cc out into trace_log.h/cc, trace_sampling_thread.h/cc and trace_buffer.h/cc
R=dsinclair,shatch BUG= Review URL: https://codereview.chromium.org/1254453006 Cr-Commit-Position: refs/heads/master@{#340569}
Diffstat (limited to 'base')
-rw-r--r--base/test/trace_event_analyzer_unittest.cc1
-rw-r--r--base/test/trace_to_file.cc3
-rw-r--r--base/trace_event/BUILD.gn8
-rw-r--r--base/trace_event/trace_buffer.cc395
-rw-r--r--base/trace_event/trace_buffer.h126
-rw-r--r--base/trace_event/trace_event.gypi8
-rw-r--r--base/trace_event/trace_event.h2
-rw-r--r--base/trace_event/trace_event_impl.cc2176
-rw-r--r--base/trace_event/trace_event_impl.h567
-rw-r--r--base/trace_event/trace_event_memory.h2
-rw-r--r--base/trace_event/trace_event_system_stats_monitor.h2
-rw-r--r--base/trace_event/trace_event_unittest.cc3
-rw-r--r--base/trace_event/trace_log.cc1642
-rw-r--r--base/trace_event/trace_log.h471
-rw-r--r--base/trace_event/trace_log_constants.cc (renamed from base/trace_event/trace_event_impl_constants.cc)2
-rw-r--r--base/trace_event/trace_sampling_thread.cc101
-rw-r--r--base/trace_event/trace_sampling_thread.h54
17 files changed, 2818 insertions, 2745 deletions
diff --git a/base/test/trace_event_analyzer_unittest.cc b/base/test/trace_event_analyzer_unittest.cc
index 278709f..6503b1f 100644
--- a/base/test/trace_event_analyzer_unittest.cc
+++ b/base/test/trace_event_analyzer_unittest.cc
@@ -6,6 +6,7 @@
#include "base/synchronization/waitable_event.h"
#include "base/test/trace_event_analyzer.h"
#include "base/threading/platform_thread.h"
+#include "base/trace_event/trace_buffer.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
diff --git a/base/test/trace_to_file.cc b/base/test/trace_to_file.cc
index e00b58ae0..85fff71 100644
--- a/base/test/trace_to_file.cc
+++ b/base/test/trace_to_file.cc
@@ -9,7 +9,8 @@
#include "base/command_line.h"
#include "base/files/file_util.h"
#include "base/run_loop.h"
-#include "base/trace_event/trace_event_impl.h"
+#include "base/trace_event/trace_buffer.h"
+#include "base/trace_event/trace_log.h"
namespace base {
namespace test {
diff --git a/base/trace_event/BUILD.gn b/base/trace_event/BUILD.gn
index 8f98747..1bc9a56 100644
--- a/base/trace_event/BUILD.gn
+++ b/base/trace_event/BUILD.gn
@@ -27,6 +27,8 @@ source_set("trace_event") {
"process_memory_totals.h",
"process_memory_totals_dump_provider.cc",
"process_memory_totals_dump_provider.h",
+ "trace_buffer.cc",
+ "trace_buffer.h",
"trace_config.cc",
"trace_config.h",
"trace_event.h",
@@ -37,7 +39,6 @@ source_set("trace_event") {
"trace_event_etw_export_win.h",
"trace_event_impl.cc",
"trace_event_impl.h",
- "trace_event_impl_constants.cc",
"trace_event_memory.cc",
"trace_event_memory.h",
"trace_event_memory_overhead.cc",
@@ -48,6 +49,11 @@ source_set("trace_event") {
"trace_event_system_stats_monitor.h",
"trace_event_win.cc",
"trace_event_win.h",
+ "trace_log.cc",
+ "trace_log.h",
+ "trace_log_constants.cc",
+ "trace_sampling_thread.cc",
+ "trace_sampling_thread.h",
"winheap_dump_provider_win.cc",
"winheap_dump_provider_win.h",
]
diff --git a/base/trace_event/trace_buffer.cc b/base/trace_event/trace_buffer.cc
new file mode 100644
index 0000000..33cb60f
--- /dev/null
+++ b/base/trace_event/trace_buffer.cc
@@ -0,0 +1,395 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_buffer.h"
+
+#include "base/trace_event/trace_event_impl.h"
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+class TraceBufferRingBuffer : public TraceBuffer {
+ public:
+ TraceBufferRingBuffer(size_t max_chunks)
+ : max_chunks_(max_chunks),
+ recyclable_chunks_queue_(new size_t[queue_capacity()]),
+ queue_head_(0),
+ queue_tail_(max_chunks),
+ current_iteration_index_(0),
+ current_chunk_seq_(1) {
+ chunks_.reserve(max_chunks);
+ for (size_t i = 0; i < max_chunks; ++i)
+ recyclable_chunks_queue_[i] = i;
+ }
+
+ scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
+ // Because the number of threads is much less than the number of chunks,
+ // the queue should never be empty.
+ DCHECK(!QueueIsEmpty());
+
+ *index = recyclable_chunks_queue_[queue_head_];
+ queue_head_ = NextQueueIndex(queue_head_);
+ current_iteration_index_ = queue_head_;
+
+ if (*index >= chunks_.size())
+ chunks_.resize(*index + 1);
+
+ TraceBufferChunk* chunk = chunks_[*index];
+ chunks_[*index] = NULL; // Put NULL in the slot of a in-flight chunk.
+ if (chunk)
+ chunk->Reset(current_chunk_seq_++);
+ else
+ chunk = new TraceBufferChunk(current_chunk_seq_++);
+
+ return scoped_ptr<TraceBufferChunk>(chunk);
+ }
+
+ void ReturnChunk(size_t index, scoped_ptr<TraceBufferChunk> chunk) override {
+ // When this method is called, the queue should not be full because it
+ // can contain all chunks including the one to be returned.
+ DCHECK(!QueueIsFull());
+ DCHECK(chunk);
+ DCHECK_LT(index, chunks_.size());
+ DCHECK(!chunks_[index]);
+ chunks_[index] = chunk.release();
+ recyclable_chunks_queue_[queue_tail_] = index;
+ queue_tail_ = NextQueueIndex(queue_tail_);
+ }
+
+ bool IsFull() const override { return false; }
+
+ size_t Size() const override {
+ // This is approximate because not all of the chunks are full.
+ return chunks_.size() * TraceBufferChunk::kTraceBufferChunkSize;
+ }
+
+ size_t Capacity() const override {
+ return max_chunks_ * TraceBufferChunk::kTraceBufferChunkSize;
+ }
+
+ TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
+ if (handle.chunk_index >= chunks_.size())
+ return NULL;
+ TraceBufferChunk* chunk = chunks_[handle.chunk_index];
+ if (!chunk || chunk->seq() != handle.chunk_seq)
+ return NULL;
+ return chunk->GetEventAt(handle.event_index);
+ }
+
+ const TraceBufferChunk* NextChunk() override {
+ if (chunks_.empty())
+ return NULL;
+
+ while (current_iteration_index_ != queue_tail_) {
+ size_t chunk_index = recyclable_chunks_queue_[current_iteration_index_];
+ current_iteration_index_ = NextQueueIndex(current_iteration_index_);
+ if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
+ continue;
+ DCHECK(chunks_[chunk_index]);
+ return chunks_[chunk_index];
+ }
+ return NULL;
+ }
+
+ scoped_ptr<TraceBuffer> CloneForIteration() const override {
+ scoped_ptr<ClonedTraceBuffer> cloned_buffer(new ClonedTraceBuffer());
+ for (size_t queue_index = queue_head_; queue_index != queue_tail_;
+ queue_index = NextQueueIndex(queue_index)) {
+ size_t chunk_index = recyclable_chunks_queue_[queue_index];
+ if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
+ continue;
+ TraceBufferChunk* chunk = chunks_[chunk_index];
+ cloned_buffer->chunks_.push_back(chunk ? chunk->Clone().release() : NULL);
+ }
+ return cloned_buffer.Pass();
+ }
+
+ void EstimateTraceMemoryOverhead(
+ TraceEventMemoryOverhead* overhead) override {
+ overhead->Add("TraceBufferRingBuffer", sizeof(*this));
+ for (size_t queue_index = queue_head_; queue_index != queue_tail_;
+ queue_index = NextQueueIndex(queue_index)) {
+ size_t chunk_index = recyclable_chunks_queue_[queue_index];
+ if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
+ continue;
+ chunks_[chunk_index]->EstimateTraceMemoryOverhead(overhead);
+ }
+ }
+
+ private:
+ class ClonedTraceBuffer : public TraceBuffer {
+ public:
+ ClonedTraceBuffer() : current_iteration_index_(0) {}
+
+ // The only implemented method.
+ const TraceBufferChunk* NextChunk() override {
+ return current_iteration_index_ < chunks_.size()
+ ? chunks_[current_iteration_index_++]
+ : NULL;
+ }
+
+ scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
+ NOTIMPLEMENTED();
+ return scoped_ptr<TraceBufferChunk>();
+ }
+ void ReturnChunk(size_t index, scoped_ptr<TraceBufferChunk>) override {
+ NOTIMPLEMENTED();
+ }
+ bool IsFull() const override { return false; }
+ size_t Size() const override { return 0; }
+ size_t Capacity() const override { return 0; }
+ TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
+ return NULL;
+ }
+ scoped_ptr<TraceBuffer> CloneForIteration() const override {
+ NOTIMPLEMENTED();
+ return scoped_ptr<TraceBuffer>();
+ }
+ void EstimateTraceMemoryOverhead(
+ TraceEventMemoryOverhead* overhead) override {
+ NOTIMPLEMENTED();
+ }
+
+ size_t current_iteration_index_;
+ ScopedVector<TraceBufferChunk> chunks_;
+ };
+
+ bool QueueIsEmpty() const { return queue_head_ == queue_tail_; }
+
+ size_t QueueSize() const {
+ return queue_tail_ > queue_head_
+ ? queue_tail_ - queue_head_
+ : queue_tail_ + queue_capacity() - queue_head_;
+ }
+
+ bool QueueIsFull() const { return QueueSize() == queue_capacity() - 1; }
+
+ size_t queue_capacity() const {
+ // One extra space to help distinguish full state and empty state.
+ return max_chunks_ + 1;
+ }
+
+ size_t NextQueueIndex(size_t index) const {
+ index++;
+ if (index >= queue_capacity())
+ index = 0;
+ return index;
+ }
+
+ size_t max_chunks_;
+ ScopedVector<TraceBufferChunk> chunks_;
+
+ scoped_ptr<size_t[]> recyclable_chunks_queue_;
+ size_t queue_head_;
+ size_t queue_tail_;
+
+ size_t current_iteration_index_;
+ uint32 current_chunk_seq_;
+
+ DISALLOW_COPY_AND_ASSIGN(TraceBufferRingBuffer);
+};
+
+class TraceBufferVector : public TraceBuffer {
+ public:
+ TraceBufferVector(size_t max_chunks)
+ : in_flight_chunk_count_(0),
+ current_iteration_index_(0),
+ max_chunks_(max_chunks) {
+ chunks_.reserve(max_chunks_);
+ }
+
+ scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
+ // This function may be called when adding normal events or indirectly from
+ // AddMetadataEventsWhileLocked(). We can not DECHECK(!IsFull()) because we
+ // have to add the metadata events and flush thread-local buffers even if
+ // the buffer is full.
+ *index = chunks_.size();
+ chunks_.push_back(NULL); // Put NULL in the slot of a in-flight chunk.
+ ++in_flight_chunk_count_;
+ // + 1 because zero chunk_seq is not allowed.
+ return scoped_ptr<TraceBufferChunk>(
+ new TraceBufferChunk(static_cast<uint32>(*index) + 1));
+ }
+
+ void ReturnChunk(size_t index, scoped_ptr<TraceBufferChunk> chunk) override {
+ DCHECK_GT(in_flight_chunk_count_, 0u);
+ DCHECK_LT(index, chunks_.size());
+ DCHECK(!chunks_[index]);
+ --in_flight_chunk_count_;
+ chunks_[index] = chunk.release();
+ }
+
+ bool IsFull() const override { return chunks_.size() >= max_chunks_; }
+
+ size_t Size() const override {
+ // This is approximate because not all of the chunks are full.
+ return chunks_.size() * TraceBufferChunk::kTraceBufferChunkSize;
+ }
+
+ size_t Capacity() const override {
+ return max_chunks_ * TraceBufferChunk::kTraceBufferChunkSize;
+ }
+
+ TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
+ if (handle.chunk_index >= chunks_.size())
+ return NULL;
+ TraceBufferChunk* chunk = chunks_[handle.chunk_index];
+ if (!chunk || chunk->seq() != handle.chunk_seq)
+ return NULL;
+ return chunk->GetEventAt(handle.event_index);
+ }
+
+ const TraceBufferChunk* NextChunk() override {
+ while (current_iteration_index_ < chunks_.size()) {
+ // Skip in-flight chunks.
+ const TraceBufferChunk* chunk = chunks_[current_iteration_index_++];
+ if (chunk)
+ return chunk;
+ }
+ return NULL;
+ }
+
+ scoped_ptr<TraceBuffer> CloneForIteration() const override {
+ NOTIMPLEMENTED();
+ return scoped_ptr<TraceBuffer>();
+ }
+
+ void EstimateTraceMemoryOverhead(
+ TraceEventMemoryOverhead* overhead) override {
+ const size_t chunks_ptr_vector_allocated_size =
+ sizeof(*this) + max_chunks_ * sizeof(decltype(chunks_)::value_type);
+ const size_t chunks_ptr_vector_resident_size =
+ sizeof(*this) + chunks_.size() * sizeof(decltype(chunks_)::value_type);
+ overhead->Add("TraceBufferVector", chunks_ptr_vector_allocated_size,
+ chunks_ptr_vector_resident_size);
+ for (size_t i = 0; i < chunks_.size(); ++i) {
+ TraceBufferChunk* chunk = chunks_[i];
+ // Skip the in-flight (nullptr) chunks. They will be accounted by the
+ // per-thread-local dumpers, see ThreadLocalEventBuffer::OnMemoryDump.
+ if (chunk)
+ chunk->EstimateTraceMemoryOverhead(overhead);
+ }
+ }
+
+ private:
+ size_t in_flight_chunk_count_;
+ size_t current_iteration_index_;
+ size_t max_chunks_;
+ ScopedVector<TraceBufferChunk> chunks_;
+
+ DISALLOW_COPY_AND_ASSIGN(TraceBufferVector);
+};
+
+} // namespace
+
+TraceBufferChunk::TraceBufferChunk(uint32 seq) : next_free_(0), seq_(seq) {}
+
+TraceBufferChunk::~TraceBufferChunk() {}
+
+void TraceBufferChunk::Reset(uint32 new_seq) {
+ for (size_t i = 0; i < next_free_; ++i)
+ chunk_[i].Reset();
+ next_free_ = 0;
+ seq_ = new_seq;
+ cached_overhead_estimate_.reset();
+}
+
+TraceEvent* TraceBufferChunk::AddTraceEvent(size_t* event_index) {
+ DCHECK(!IsFull());
+ *event_index = next_free_++;
+ return &chunk_[*event_index];
+}
+
+scoped_ptr<TraceBufferChunk> TraceBufferChunk::Clone() const {
+ scoped_ptr<TraceBufferChunk> cloned_chunk(new TraceBufferChunk(seq_));
+ cloned_chunk->next_free_ = next_free_;
+ for (size_t i = 0; i < next_free_; ++i)
+ cloned_chunk->chunk_[i].CopyFrom(chunk_[i]);
+ return cloned_chunk.Pass();
+}
+
+void TraceBufferChunk::EstimateTraceMemoryOverhead(
+ TraceEventMemoryOverhead* overhead) {
+ if (!cached_overhead_estimate_) {
+ cached_overhead_estimate_.reset(new TraceEventMemoryOverhead);
+
+ // When estimating the size of TraceBufferChunk, exclude the array of trace
+ // events, as they are computed individually below.
+ cached_overhead_estimate_->Add("TraceBufferChunk",
+ sizeof(*this) - sizeof(chunk_));
+ }
+
+ const size_t num_cached_estimated_events =
+ cached_overhead_estimate_->GetCount("TraceEvent");
+ DCHECK_LE(num_cached_estimated_events, size());
+
+ if (IsFull() && num_cached_estimated_events == size()) {
+ overhead->Update(*cached_overhead_estimate_);
+ return;
+ }
+
+ for (size_t i = num_cached_estimated_events; i < size(); ++i)
+ chunk_[i].EstimateTraceMemoryOverhead(cached_overhead_estimate_.get());
+
+ if (IsFull()) {
+ cached_overhead_estimate_->AddSelf();
+ } else {
+ // The unused TraceEvents in |chunks_| are not cached. They will keep
+ // changing as new TraceEvents are added to this chunk, so they are
+ // computed on the fly.
+ const size_t num_unused_trace_events = capacity() - size();
+ overhead->Add("TraceEvent (unused)",
+ num_unused_trace_events * sizeof(TraceEvent));
+ }
+
+ overhead->Update(*cached_overhead_estimate_);
+}
+
+TraceResultBuffer::OutputCallback
+TraceResultBuffer::SimpleOutput::GetCallback() {
+ return Bind(&SimpleOutput::Append, Unretained(this));
+}
+
+void TraceResultBuffer::SimpleOutput::Append(
+ const std::string& json_trace_output) {
+ json_output += json_trace_output;
+}
+
+TraceResultBuffer::TraceResultBuffer() : append_comma_(false) {}
+
+TraceResultBuffer::~TraceResultBuffer() {}
+
+void TraceResultBuffer::SetOutputCallback(
+ const OutputCallback& json_chunk_callback) {
+ output_callback_ = json_chunk_callback;
+}
+
+void TraceResultBuffer::Start() {
+ append_comma_ = false;
+ output_callback_.Run("[");
+}
+
+void TraceResultBuffer::AddFragment(const std::string& trace_fragment) {
+ if (append_comma_)
+ output_callback_.Run(",");
+ append_comma_ = true;
+ output_callback_.Run(trace_fragment);
+}
+
+void TraceResultBuffer::Finish() {
+ output_callback_.Run("]");
+}
+
+TraceBuffer* TraceBuffer::CreateTraceBufferRingBuffer(size_t max_chunks) {
+ return new TraceBufferRingBuffer(max_chunks);
+}
+
+TraceBuffer* TraceBuffer::CreateTraceBufferVectorOfSize(size_t max_chunks) {
+ return new TraceBufferVector(max_chunks);
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/base/trace_event/trace_buffer.h b/base/trace_event/trace_buffer.h
new file mode 100644
index 0000000..4074120
--- /dev/null
+++ b/base/trace_event/trace_buffer.h
@@ -0,0 +1,126 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_BUFFER_H_
+#define BASE_TRACE_EVENT_TRACE_BUFFER_H_
+
+#include "base/base_export.h"
+#include "base/trace_event/trace_event.h"
+#include "base/trace_event/trace_event_impl.h"
+
+namespace base {
+
+namespace trace_event {
+
+// TraceBufferChunk is the basic unit of TraceBuffer.
+class BASE_EXPORT TraceBufferChunk {
+ public:
+ explicit TraceBufferChunk(uint32 seq);
+ ~TraceBufferChunk();
+
+ void Reset(uint32 new_seq);
+ TraceEvent* AddTraceEvent(size_t* event_index);
+ bool IsFull() const { return next_free_ == kTraceBufferChunkSize; }
+
+ uint32 seq() const { return seq_; }
+ size_t capacity() const { return kTraceBufferChunkSize; }
+ size_t size() const { return next_free_; }
+
+ TraceEvent* GetEventAt(size_t index) {
+ DCHECK(index < size());
+ return &chunk_[index];
+ }
+ const TraceEvent* GetEventAt(size_t index) const {
+ DCHECK(index < size());
+ return &chunk_[index];
+ }
+
+ scoped_ptr<TraceBufferChunk> Clone() const;
+
+ void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead);
+
+ static const size_t kTraceBufferChunkSize = 64;
+
+ private:
+ size_t next_free_;
+ scoped_ptr<TraceEventMemoryOverhead> cached_overhead_estimate_;
+ TraceEvent chunk_[kTraceBufferChunkSize];
+ uint32 seq_;
+};
+
+// TraceBuffer holds the events as they are collected.
+class BASE_EXPORT TraceBuffer {
+ public:
+ virtual ~TraceBuffer() {}
+
+ virtual scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) = 0;
+ virtual void ReturnChunk(size_t index,
+ scoped_ptr<TraceBufferChunk> chunk) = 0;
+
+ virtual bool IsFull() const = 0;
+ virtual size_t Size() const = 0;
+ virtual size_t Capacity() const = 0;
+ virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) = 0;
+
+ // For iteration. Each TraceBuffer can only be iterated once.
+ virtual const TraceBufferChunk* NextChunk() = 0;
+
+ virtual scoped_ptr<TraceBuffer> CloneForIteration() const = 0;
+
+ // Computes an estimate of the size of the buffer, including all the retained
+ // objects.
+ virtual void EstimateTraceMemoryOverhead(
+ TraceEventMemoryOverhead* overhead) = 0;
+
+ static TraceBuffer* CreateTraceBufferRingBuffer(size_t max_chunks);
+ static TraceBuffer* CreateTraceBufferVectorOfSize(size_t max_chunks);
+};
+
+// TraceResultBuffer collects and converts trace fragments returned by TraceLog
+// to JSON output.
+class BASE_EXPORT TraceResultBuffer {
+ public:
+ typedef base::Callback<void(const std::string&)> OutputCallback;
+
+ // If you don't need to stream JSON chunks out efficiently, and just want to
+ // get a complete JSON string after calling Finish, use this struct to collect
+ // JSON trace output.
+ struct BASE_EXPORT SimpleOutput {
+ OutputCallback GetCallback();
+ void Append(const std::string& json_string);
+
+ // Do what you want with the json_output_ string after calling
+ // TraceResultBuffer::Finish.
+ std::string json_output;
+ };
+
+ TraceResultBuffer();
+ ~TraceResultBuffer();
+
+ // Set callback. The callback will be called during Start with the initial
+ // JSON output and during AddFragment and Finish with following JSON output
+ // chunks. The callback target must live past the last calls to
+ // TraceResultBuffer::Start/AddFragment/Finish.
+ void SetOutputCallback(const OutputCallback& json_chunk_callback);
+
+ // Start JSON output. This resets all internal state, so you can reuse
+ // the TraceResultBuffer by calling Start.
+ void Start();
+
+ // Call AddFragment 0 or more times to add trace fragments from TraceLog.
+ void AddFragment(const std::string& trace_fragment);
+
+ // When all fragments have been added, call Finish to complete the JSON
+ // formatted output.
+ void Finish();
+
+ private:
+ OutputCallback output_callback_;
+ bool append_comma_;
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_TRACE_BUFFER_H_
diff --git a/base/trace_event/trace_event.gypi b/base/trace_event/trace_event.gypi
index 05ec283..6e6c5f1 100644
--- a/base/trace_event/trace_event.gypi
+++ b/base/trace_event/trace_event.gypi
@@ -27,6 +27,8 @@
'trace_event/process_memory_totals.h',
'trace_event/process_memory_totals_dump_provider.cc',
'trace_event/process_memory_totals_dump_provider.h',
+ 'trace_event/trace_buffer.cc',
+ 'trace_event/trace_buffer.h',
'trace_event/trace_config.cc',
'trace_event/trace_config.h',
'trace_event/trace_event.h',
@@ -37,7 +39,6 @@
'trace_event/trace_event_etw_export_win.h',
'trace_event/trace_event_impl.cc',
'trace_event/trace_event_impl.h',
- 'trace_event/trace_event_impl_constants.cc',
'trace_event/trace_event_memory.cc',
'trace_event/trace_event_memory.h',
'trace_event/trace_event_memory_overhead.cc',
@@ -48,6 +49,11 @@
'trace_event/trace_event_system_stats_monitor.h',
'trace_event/trace_event_win.cc',
'trace_event/trace_event_win.h',
+ 'trace_event/trace_log.cc',
+ 'trace_event/trace_log.h',
+ 'trace_event/trace_log_constants.cc',
+ 'trace_event/trace_sampling_thread.cc',
+ 'trace_event/trace_sampling_thread.h',
'trace_event/winheap_dump_provider_win.cc',
'trace_event/winheap_dump_provider_win.h',
],
diff --git a/base/trace_event/trace_event.h b/base/trace_event/trace_event.h
index 9624937..db5ff33 100644
--- a/base/trace_event/trace_event.h
+++ b/base/trace_event/trace_event.h
@@ -193,9 +193,9 @@
#include "base/atomicops.h"
#include "base/time/time.h"
-#include "base/trace_event/trace_event_impl.h"
#include "base/trace_event/trace_event_memory.h"
#include "base/trace_event/trace_event_system_stats_monitor.h"
+#include "base/trace_event/trace_log.h"
#include "build/build_config.h"
// By default, const char* argument values are assumed to have long-lived scope
diff --git a/base/trace_event/trace_event_impl.cc b/base/trace_event/trace_event_impl.cc
index 90314d7..7968d78ec 100644
--- a/base/trace_event/trace_event_impl.cc
+++ b/base/trace_event/trace_event_impl.cc
@@ -4,543 +4,21 @@
#include "base/trace_event/trace_event_impl.h"
-#include <algorithm>
-#include <cmath>
-
-#include "base/base_switches.h"
-#include "base/bind.h"
-#include "base/command_line.h"
-#include "base/debug/leak_annotations.h"
#include "base/format_macros.h"
#include "base/json/string_escape.h"
-#include "base/lazy_instance.h"
-#include "base/location.h"
-#include "base/memory/singleton.h"
-#include "base/process/process_metrics.h"
#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
-#include "base/strings/string_split.h"
-#include "base/strings/string_tokenizer.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "base/strings/utf_string_conversions.h"
-#include "base/synchronization/cancellation_flag.h"
-#include "base/synchronization/waitable_event.h"
-#include "base/sys_info.h"
-#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
-#include "base/thread_task_runner_handle.h"
-#include "base/threading/platform_thread.h"
-#include "base/threading/thread_id_name_manager.h"
-#include "base/threading/worker_pool.h"
-#include "base/time/time.h"
-#include "base/trace_event/memory_dump_manager.h"
-#include "base/trace_event/memory_dump_provider.h"
-#include "base/trace_event/process_memory_dump.h"
#include "base/trace_event/trace_event.h"
-#include "base/trace_event/trace_event_synthetic_delay.h"
-
-#if defined(OS_WIN)
-#include "base/trace_event/trace_event_etw_export_win.h"
-#include "base/trace_event/trace_event_win.h"
-#endif
-
-class DeleteTraceLogForTesting {
- public:
- static void Delete() {
- Singleton<base::trace_event::TraceLog,
- LeakySingletonTraits<base::trace_event::TraceLog>>::OnExit(0);
- }
-};
-
-// The thread buckets for the sampling profiler.
-BASE_EXPORT TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
+#include "base/trace_event/trace_log.h"
namespace base {
namespace trace_event {
namespace {
-// The overhead of TraceEvent above this threshold will be reported in the
-// trace.
-const int kOverheadReportThresholdInMicroseconds = 50;
-
-// Controls the number of trace events we will buffer in-memory
-// before throwing them away.
-const size_t kTraceBufferChunkSize = TraceBufferChunk::kTraceBufferChunkSize;
-const size_t kTraceEventVectorBigBufferChunks =
- 512000000 / kTraceBufferChunkSize;
-const size_t kTraceEventVectorBufferChunks = 256000 / kTraceBufferChunkSize;
-const size_t kTraceEventRingBufferChunks = kTraceEventVectorBufferChunks / 4;
-const size_t kTraceEventBufferSizeInBytes = 100 * 1024;
-// Can store results for 30 seconds with 1 ms sampling interval.
-const size_t kMonitorTraceEventBufferChunks = 30000 / kTraceBufferChunkSize;
-// ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events.
-const size_t kEchoToConsoleTraceEventBufferChunks = 256;
-
-const int kThreadFlushTimeoutMs = 3000;
-
-#if !defined(OS_NACL)
-// These categories will cause deadlock when ECHO_TO_CONSOLE. crbug.com/325575.
-const char kEchoToConsoleCategoryFilter[] = "-ipc,-task";
-#endif
-
-#define MAX_CATEGORY_GROUPS 100
-
-// Parallel arrays g_category_groups and g_category_group_enabled are separate
-// so that a pointer to a member of g_category_group_enabled can be easily
-// converted to an index into g_category_groups. This allows macros to deal
-// only with char enabled pointers from g_category_group_enabled, and we can
-// convert internally to determine the category name from the char enabled
-// pointer.
-const char* g_category_groups[MAX_CATEGORY_GROUPS] = {
- "toplevel",
- "tracing already shutdown",
- "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS",
- "__metadata",
- // For reporting trace_event overhead. For thread local event buffers only.
- "trace_event_overhead"};
-
-// The enabled flag is char instead of bool so that the API can be used from C.
-unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = { 0 };
-// Indexes here have to match the g_category_groups array indexes above.
-const int g_category_already_shutdown = 1;
-const int g_category_categories_exhausted = 2;
-const int g_category_metadata = 3;
-const int g_category_trace_event_overhead = 4;
-const int g_num_builtin_categories = 5;
-// Skip default categories.
-base::subtle::AtomicWord g_category_index = g_num_builtin_categories;
-
-// The name of the current thread. This is used to decide if the current
-// thread name has changed. We combine all the seen thread names into the
-// output name for the thread.
-LazyInstance<ThreadLocalPointer<const char> >::Leaky
- g_current_thread_name = LAZY_INSTANCE_INITIALIZER;
-
-ThreadTicks ThreadNow() {
- return ThreadTicks::IsSupported() ? ThreadTicks::Now() : ThreadTicks();
-}
-
-class TraceBufferRingBuffer : public TraceBuffer {
- public:
- TraceBufferRingBuffer(size_t max_chunks)
- : max_chunks_(max_chunks),
- recyclable_chunks_queue_(new size_t[queue_capacity()]),
- queue_head_(0),
- queue_tail_(max_chunks),
- current_iteration_index_(0),
- current_chunk_seq_(1) {
- chunks_.reserve(max_chunks);
- for (size_t i = 0; i < max_chunks; ++i)
- recyclable_chunks_queue_[i] = i;
- }
-
- scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
- // Because the number of threads is much less than the number of chunks,
- // the queue should never be empty.
- DCHECK(!QueueIsEmpty());
-
- *index = recyclable_chunks_queue_[queue_head_];
- queue_head_ = NextQueueIndex(queue_head_);
- current_iteration_index_ = queue_head_;
-
- if (*index >= chunks_.size())
- chunks_.resize(*index + 1);
-
- TraceBufferChunk* chunk = chunks_[*index];
- chunks_[*index] = NULL; // Put NULL in the slot of a in-flight chunk.
- if (chunk)
- chunk->Reset(current_chunk_seq_++);
- else
- chunk = new TraceBufferChunk(current_chunk_seq_++);
-
- return scoped_ptr<TraceBufferChunk>(chunk);
- }
-
- void ReturnChunk(size_t index, scoped_ptr<TraceBufferChunk> chunk) override {
- // When this method is called, the queue should not be full because it
- // can contain all chunks including the one to be returned.
- DCHECK(!QueueIsFull());
- DCHECK(chunk);
- DCHECK_LT(index, chunks_.size());
- DCHECK(!chunks_[index]);
- chunks_[index] = chunk.release();
- recyclable_chunks_queue_[queue_tail_] = index;
- queue_tail_ = NextQueueIndex(queue_tail_);
- }
-
- bool IsFull() const override { return false; }
-
- size_t Size() const override {
- // This is approximate because not all of the chunks are full.
- return chunks_.size() * kTraceBufferChunkSize;
- }
-
- size_t Capacity() const override {
- return max_chunks_ * kTraceBufferChunkSize;
- }
-
- TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
- if (handle.chunk_index >= chunks_.size())
- return NULL;
- TraceBufferChunk* chunk = chunks_[handle.chunk_index];
- if (!chunk || chunk->seq() != handle.chunk_seq)
- return NULL;
- return chunk->GetEventAt(handle.event_index);
- }
-
- const TraceBufferChunk* NextChunk() override {
- if (chunks_.empty())
- return NULL;
-
- while (current_iteration_index_ != queue_tail_) {
- size_t chunk_index = recyclable_chunks_queue_[current_iteration_index_];
- current_iteration_index_ = NextQueueIndex(current_iteration_index_);
- if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
- continue;
- DCHECK(chunks_[chunk_index]);
- return chunks_[chunk_index];
- }
- return NULL;
- }
-
- scoped_ptr<TraceBuffer> CloneForIteration() const override {
- scoped_ptr<ClonedTraceBuffer> cloned_buffer(new ClonedTraceBuffer());
- for (size_t queue_index = queue_head_; queue_index != queue_tail_;
- queue_index = NextQueueIndex(queue_index)) {
- size_t chunk_index = recyclable_chunks_queue_[queue_index];
- if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
- continue;
- TraceBufferChunk* chunk = chunks_[chunk_index];
- cloned_buffer->chunks_.push_back(chunk ? chunk->Clone().release() : NULL);
- }
- return cloned_buffer.Pass();
- }
-
- void EstimateTraceMemoryOverhead(
- TraceEventMemoryOverhead* overhead) override {
- overhead->Add("TraceBufferRingBuffer", sizeof(*this));
- for (size_t queue_index = queue_head_; queue_index != queue_tail_;
- queue_index = NextQueueIndex(queue_index)) {
- size_t chunk_index = recyclable_chunks_queue_[queue_index];
- if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
- continue;
- chunks_[chunk_index]->EstimateTraceMemoryOverhead(overhead);
- }
- }
-
- private:
- class ClonedTraceBuffer : public TraceBuffer {
- public:
- ClonedTraceBuffer() : current_iteration_index_(0) {}
-
- // The only implemented method.
- const TraceBufferChunk* NextChunk() override {
- return current_iteration_index_ < chunks_.size() ?
- chunks_[current_iteration_index_++] : NULL;
- }
-
- scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
- NOTIMPLEMENTED();
- return scoped_ptr<TraceBufferChunk>();
- }
- void ReturnChunk(size_t index, scoped_ptr<TraceBufferChunk>) override {
- NOTIMPLEMENTED();
- }
- bool IsFull() const override { return false; }
- size_t Size() const override { return 0; }
- size_t Capacity() const override { return 0; }
- TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
- return NULL;
- }
- scoped_ptr<TraceBuffer> CloneForIteration() const override {
- NOTIMPLEMENTED();
- return scoped_ptr<TraceBuffer>();
- }
- void EstimateTraceMemoryOverhead(
- TraceEventMemoryOverhead* overhead) override {
- NOTIMPLEMENTED();
- }
-
- size_t current_iteration_index_;
- ScopedVector<TraceBufferChunk> chunks_;
- };
-
- bool QueueIsEmpty() const {
- return queue_head_ == queue_tail_;
- }
-
- size_t QueueSize() const {
- return queue_tail_ > queue_head_ ? queue_tail_ - queue_head_ :
- queue_tail_ + queue_capacity() - queue_head_;
- }
-
- bool QueueIsFull() const {
- return QueueSize() == queue_capacity() - 1;
- }
-
- size_t queue_capacity() const {
- // One extra space to help distinguish full state and empty state.
- return max_chunks_ + 1;
- }
-
- size_t NextQueueIndex(size_t index) const {
- index++;
- if (index >= queue_capacity())
- index = 0;
- return index;
- }
-
- size_t max_chunks_;
- ScopedVector<TraceBufferChunk> chunks_;
-
- scoped_ptr<size_t[]> recyclable_chunks_queue_;
- size_t queue_head_;
- size_t queue_tail_;
-
- size_t current_iteration_index_;
- uint32 current_chunk_seq_;
-
- DISALLOW_COPY_AND_ASSIGN(TraceBufferRingBuffer);
-};
-
-class TraceBufferVector : public TraceBuffer {
- public:
- TraceBufferVector(size_t max_chunks)
- : in_flight_chunk_count_(0),
- current_iteration_index_(0),
- max_chunks_(max_chunks) {
- chunks_.reserve(max_chunks_);
- }
-
- scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
- // This function may be called when adding normal events or indirectly from
- // AddMetadataEventsWhileLocked(). We can not DECHECK(!IsFull()) because we
- // have to add the metadata events and flush thread-local buffers even if
- // the buffer is full.
- *index = chunks_.size();
- chunks_.push_back(NULL); // Put NULL in the slot of a in-flight chunk.
- ++in_flight_chunk_count_;
- // + 1 because zero chunk_seq is not allowed.
- return scoped_ptr<TraceBufferChunk>(
- new TraceBufferChunk(static_cast<uint32>(*index) + 1));
- }
-
- void ReturnChunk(size_t index, scoped_ptr<TraceBufferChunk> chunk) override {
- DCHECK_GT(in_flight_chunk_count_, 0u);
- DCHECK_LT(index, chunks_.size());
- DCHECK(!chunks_[index]);
- --in_flight_chunk_count_;
- chunks_[index] = chunk.release();
- }
-
- bool IsFull() const override { return chunks_.size() >= max_chunks_; }
-
- size_t Size() const override {
- // This is approximate because not all of the chunks are full.
- return chunks_.size() * kTraceBufferChunkSize;
- }
-
- size_t Capacity() const override {
- return max_chunks_ * kTraceBufferChunkSize;
- }
-
- TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
- if (handle.chunk_index >= chunks_.size())
- return NULL;
- TraceBufferChunk* chunk = chunks_[handle.chunk_index];
- if (!chunk || chunk->seq() != handle.chunk_seq)
- return NULL;
- return chunk->GetEventAt(handle.event_index);
- }
-
- const TraceBufferChunk* NextChunk() override {
- while (current_iteration_index_ < chunks_.size()) {
- // Skip in-flight chunks.
- const TraceBufferChunk* chunk = chunks_[current_iteration_index_++];
- if (chunk)
- return chunk;
- }
- return NULL;
- }
-
- scoped_ptr<TraceBuffer> CloneForIteration() const override {
- NOTIMPLEMENTED();
- return scoped_ptr<TraceBuffer>();
- }
-
- void EstimateTraceMemoryOverhead(
- TraceEventMemoryOverhead* overhead) override {
- const size_t chunks_ptr_vector_allocated_size =
- sizeof(*this) + max_chunks_ * sizeof(decltype(chunks_)::value_type);
- const size_t chunks_ptr_vector_resident_size =
- sizeof(*this) + chunks_.size() * sizeof(decltype(chunks_)::value_type);
- overhead->Add("TraceBufferVector", chunks_ptr_vector_allocated_size,
- chunks_ptr_vector_resident_size);
- for (size_t i = 0; i < chunks_.size(); ++i) {
- TraceBufferChunk* chunk = chunks_[i];
- // Skip the in-flight (nullptr) chunks. They will be accounted by the
- // per-thread-local dumpers, see ThreadLocalEventBuffer::OnMemoryDump.
- if (chunk)
- chunk->EstimateTraceMemoryOverhead(overhead);
- }
- }
-
- private:
- size_t in_flight_chunk_count_;
- size_t current_iteration_index_;
- size_t max_chunks_;
- ScopedVector<TraceBufferChunk> chunks_;
-
- DISALLOW_COPY_AND_ASSIGN(TraceBufferVector);
-};
-
-template <typename T>
-void InitializeMetadataEvent(TraceEvent* trace_event,
- int thread_id,
- const char* metadata_name, const char* arg_name,
- const T& value) {
- if (!trace_event)
- return;
-
- int num_args = 1;
- unsigned char arg_type;
- unsigned long long arg_value;
- ::trace_event_internal::SetTraceValue(value, &arg_type, &arg_value);
- trace_event->Initialize(
- thread_id, TraceTicks(), ThreadTicks(), TRACE_EVENT_PHASE_METADATA,
- &g_category_group_enabled[g_category_metadata], metadata_name,
- trace_event_internal::kNoId, trace_event_internal::kNoId, num_args,
- &arg_name, &arg_type, &arg_value, NULL, TRACE_EVENT_FLAG_NONE);
-}
-
-class AutoThreadLocalBoolean {
- public:
- explicit AutoThreadLocalBoolean(ThreadLocalBoolean* thread_local_boolean)
- : thread_local_boolean_(thread_local_boolean) {
- DCHECK(!thread_local_boolean_->Get());
- thread_local_boolean_->Set(true);
- }
- ~AutoThreadLocalBoolean() {
- thread_local_boolean_->Set(false);
- }
-
- private:
- ThreadLocalBoolean* thread_local_boolean_;
- DISALLOW_COPY_AND_ASSIGN(AutoThreadLocalBoolean);
-};
-
-} // namespace
-
-TraceBufferChunk::TraceBufferChunk(uint32 seq) : next_free_(0), seq_(seq) {
-}
-
-TraceBufferChunk::~TraceBufferChunk() {
-}
-
-void TraceBufferChunk::Reset(uint32 new_seq) {
- for (size_t i = 0; i < next_free_; ++i)
- chunk_[i].Reset();
- next_free_ = 0;
- seq_ = new_seq;
- cached_overhead_estimate_.reset();
-}
-
-TraceEvent* TraceBufferChunk::AddTraceEvent(size_t* event_index) {
- DCHECK(!IsFull());
- *event_index = next_free_++;
- return &chunk_[*event_index];
-}
-
-scoped_ptr<TraceBufferChunk> TraceBufferChunk::Clone() const {
- scoped_ptr<TraceBufferChunk> cloned_chunk(new TraceBufferChunk(seq_));
- cloned_chunk->next_free_ = next_free_;
- for (size_t i = 0; i < next_free_; ++i)
- cloned_chunk->chunk_[i].CopyFrom(chunk_[i]);
- return cloned_chunk.Pass();
-}
-
-void TraceBufferChunk::EstimateTraceMemoryOverhead(
- TraceEventMemoryOverhead* overhead) {
- if (!cached_overhead_estimate_) {
- cached_overhead_estimate_.reset(new TraceEventMemoryOverhead);
-
- // When estimating the size of TraceBufferChunk, exclude the array of trace
- // events, as they are computed individually below.
- cached_overhead_estimate_->Add("TraceBufferChunk",
- sizeof(*this) - sizeof(chunk_));
- }
-
- const size_t num_cached_estimated_events =
- cached_overhead_estimate_->GetCount("TraceEvent");
- DCHECK_LE(num_cached_estimated_events, size());
-
- if (IsFull() && num_cached_estimated_events == size()) {
- overhead->Update(*cached_overhead_estimate_);
- return;
- }
-
- for (size_t i = num_cached_estimated_events; i < size(); ++i)
- chunk_[i].EstimateTraceMemoryOverhead(cached_overhead_estimate_.get());
-
- if (IsFull()) {
- cached_overhead_estimate_->AddSelf();
- } else {
- // The unused TraceEvents in |chunks_| are not cached. They will keep
- // changing as new TraceEvents are added to this chunk, so they are
- // computed on the fly.
- const size_t num_unused_trace_events = capacity() - size();
- overhead->Add("TraceEvent (unused)",
- num_unused_trace_events * sizeof(TraceEvent));
- }
-
- overhead->Update(*cached_overhead_estimate_);
-}
-
-// A helper class that allows the lock to be acquired in the middle of the scope
-// and unlocks at the end of scope if locked.
-class TraceLog::OptionalAutoLock {
- public:
- explicit OptionalAutoLock(Lock* lock) : lock_(lock), locked_(false) {}
-
- ~OptionalAutoLock() {
- if (locked_)
- lock_->Release();
- }
-
- void EnsureAcquired() {
- if (!locked_) {
- lock_->Acquire();
- locked_ = true;
- }
- }
-
- private:
- Lock* lock_;
- bool locked_;
- DISALLOW_COPY_AND_ASSIGN(OptionalAutoLock);
-};
-
-// Use this function instead of TraceEventHandle constructor to keep the
-// overhead of ScopedTracer (trace_event.h) constructor minimum.
-void MakeHandle(uint32 chunk_seq, size_t chunk_index, size_t event_index,
- TraceEventHandle* handle) {
- DCHECK(chunk_seq);
- DCHECK(chunk_index < (1u << 16));
- DCHECK(event_index < (1u << 16));
- handle->chunk_seq = chunk_seq;
- handle->chunk_index = static_cast<uint16>(chunk_index);
- handle->event_index = static_cast<uint16>(event_index);
-}
-
-////////////////////////////////////////////////////////////////////////////////
-//
-// TraceEvent
-//
-////////////////////////////////////////////////////////////////////////////////
-
-namespace {
-
size_t GetAllocLength(const char* str) { return str ? strlen(str) + 1 : 0; }
// Copies |*member| into |*buffer|, sets |*member| to point to this new
@@ -903,1657 +381,5 @@ void TraceEvent::AppendPrettyPrinted(std::ostringstream* out) const {
}
}
-////////////////////////////////////////////////////////////////////////////////
-//
-// TraceResultBuffer
-//
-////////////////////////////////////////////////////////////////////////////////
-
-TraceResultBuffer::OutputCallback
- TraceResultBuffer::SimpleOutput::GetCallback() {
- return Bind(&SimpleOutput::Append, Unretained(this));
-}
-
-void TraceResultBuffer::SimpleOutput::Append(
- const std::string& json_trace_output) {
- json_output += json_trace_output;
-}
-
-TraceResultBuffer::TraceResultBuffer() : append_comma_(false) {
-}
-
-TraceResultBuffer::~TraceResultBuffer() {
-}
-
-void TraceResultBuffer::SetOutputCallback(
- const OutputCallback& json_chunk_callback) {
- output_callback_ = json_chunk_callback;
-}
-
-void TraceResultBuffer::Start() {
- append_comma_ = false;
- output_callback_.Run("[");
-}
-
-void TraceResultBuffer::AddFragment(const std::string& trace_fragment) {
- if (append_comma_)
- output_callback_.Run(",");
- append_comma_ = true;
- output_callback_.Run(trace_fragment);
-}
-
-void TraceResultBuffer::Finish() {
- output_callback_.Run("]");
-}
-
-////////////////////////////////////////////////////////////////////////////////
-//
-// TraceSamplingThread
-//
-////////////////////////////////////////////////////////////////////////////////
-class TraceBucketData;
-typedef base::Callback<void(TraceBucketData*)> TraceSampleCallback;
-
-class TraceBucketData {
- public:
- TraceBucketData(base::subtle::AtomicWord* bucket,
- const char* name,
- TraceSampleCallback callback);
- ~TraceBucketData();
-
- TRACE_EVENT_API_ATOMIC_WORD* bucket;
- const char* bucket_name;
- TraceSampleCallback callback;
-};
-
-// This object must be created on the IO thread.
-class TraceSamplingThread : public PlatformThread::Delegate {
- public:
- TraceSamplingThread();
- ~TraceSamplingThread() override;
-
- // Implementation of PlatformThread::Delegate:
- void ThreadMain() override;
-
- static void DefaultSamplingCallback(TraceBucketData* bucekt_data);
-
- void Stop();
- void WaitSamplingEventForTesting();
-
- private:
- friend class TraceLog;
-
- void GetSamples();
- // Not thread-safe. Once the ThreadMain has been called, this can no longer
- // be called.
- void RegisterSampleBucket(TRACE_EVENT_API_ATOMIC_WORD* bucket,
- const char* const name,
- TraceSampleCallback callback);
- // Splits a combined "category\0name" into the two component parts.
- static void ExtractCategoryAndName(const char* combined,
- const char** category,
- const char** name);
- std::vector<TraceBucketData> sample_buckets_;
- bool thread_running_;
- CancellationFlag cancellation_flag_;
- WaitableEvent waitable_event_for_testing_;
-};
-
-
-TraceSamplingThread::TraceSamplingThread()
- : thread_running_(false),
- waitable_event_for_testing_(false, false) {
-}
-
-TraceSamplingThread::~TraceSamplingThread() {
-}
-
-void TraceSamplingThread::ThreadMain() {
- PlatformThread::SetName("Sampling Thread");
- thread_running_ = true;
- const int kSamplingFrequencyMicroseconds = 1000;
- while (!cancellation_flag_.IsSet()) {
- PlatformThread::Sleep(
- TimeDelta::FromMicroseconds(kSamplingFrequencyMicroseconds));
- GetSamples();
- waitable_event_for_testing_.Signal();
- }
-}
-
-// static
-void TraceSamplingThread::DefaultSamplingCallback(
- TraceBucketData* bucket_data) {
- TRACE_EVENT_API_ATOMIC_WORD category_and_name =
- TRACE_EVENT_API_ATOMIC_LOAD(*bucket_data->bucket);
- if (!category_and_name)
- return;
- const char* const combined =
- reinterpret_cast<const char* const>(category_and_name);
- const char* category_group;
- const char* name;
- ExtractCategoryAndName(combined, &category_group, &name);
- TRACE_EVENT_API_ADD_TRACE_EVENT(TRACE_EVENT_PHASE_SAMPLE,
- TraceLog::GetCategoryGroupEnabled(category_group),
- name, 0, 0, NULL, NULL, NULL, NULL, 0);
-}
-
-void TraceSamplingThread::GetSamples() {
- for (size_t i = 0; i < sample_buckets_.size(); ++i) {
- TraceBucketData* bucket_data = &sample_buckets_[i];
- bucket_data->callback.Run(bucket_data);
- }
-}
-
-void TraceSamplingThread::RegisterSampleBucket(
- TRACE_EVENT_API_ATOMIC_WORD* bucket,
- const char* const name,
- TraceSampleCallback callback) {
- // Access to sample_buckets_ doesn't cause races with the sampling thread
- // that uses the sample_buckets_, because it is guaranteed that
- // RegisterSampleBucket is called before the sampling thread is created.
- DCHECK(!thread_running_);
- sample_buckets_.push_back(TraceBucketData(bucket, name, callback));
-}
-
-// static
-void TraceSamplingThread::ExtractCategoryAndName(const char* combined,
- const char** category,
- const char** name) {
- *category = combined;
- *name = &combined[strlen(combined) + 1];
-}
-
-void TraceSamplingThread::Stop() {
- cancellation_flag_.Set();
-}
-
-void TraceSamplingThread::WaitSamplingEventForTesting() {
- waitable_event_for_testing_.Wait();
-}
-
-TraceBucketData::TraceBucketData(base::subtle::AtomicWord* bucket,
- const char* name,
- TraceSampleCallback callback)
- : bucket(bucket),
- bucket_name(name),
- callback(callback) {
-}
-
-TraceBucketData::~TraceBucketData() {
-}
-
-////////////////////////////////////////////////////////////////////////////////
-//
-// TraceLog
-//
-////////////////////////////////////////////////////////////////////////////////
-
-class TraceLog::ThreadLocalEventBuffer
- : public MessageLoop::DestructionObserver,
- public MemoryDumpProvider {
- public:
- ThreadLocalEventBuffer(TraceLog* trace_log);
- ~ThreadLocalEventBuffer() override;
-
- TraceEvent* AddTraceEvent(TraceEventHandle* handle);
-
- void ReportOverhead(const TraceTicks& event_timestamp,
- const ThreadTicks& event_thread_timestamp);
-
- TraceEvent* GetEventByHandle(TraceEventHandle handle) {
- if (!chunk_ || handle.chunk_seq != chunk_->seq() ||
- handle.chunk_index != chunk_index_)
- return NULL;
-
- return chunk_->GetEventAt(handle.event_index);
- }
-
- int generation() const { return generation_; }
-
- private:
- // MessageLoop::DestructionObserver
- void WillDestroyCurrentMessageLoop() override;
-
- // MemoryDumpProvider implementation.
- bool OnMemoryDump(ProcessMemoryDump* pmd) override;
-
- void FlushWhileLocked();
-
- void CheckThisIsCurrentBuffer() const {
- DCHECK(trace_log_->thread_local_event_buffer_.Get() == this);
- }
-
- // Since TraceLog is a leaky singleton, trace_log_ will always be valid
- // as long as the thread exists.
- TraceLog* trace_log_;
- scoped_ptr<TraceBufferChunk> chunk_;
- size_t chunk_index_;
- int event_count_;
- TimeDelta overhead_;
- int generation_;
-
- DISALLOW_COPY_AND_ASSIGN(ThreadLocalEventBuffer);
-};
-
-TraceLog::ThreadLocalEventBuffer::ThreadLocalEventBuffer(TraceLog* trace_log)
- : trace_log_(trace_log),
- chunk_index_(0),
- event_count_(0),
- generation_(trace_log->generation()) {
- // ThreadLocalEventBuffer is created only if the thread has a message loop, so
- // the following message_loop won't be NULL.
- MessageLoop* message_loop = MessageLoop::current();
- message_loop->AddDestructionObserver(this);
-
- // This is to report the local memory usage when memory-infra is enabled.
- MemoryDumpManager::GetInstance()->RegisterDumpProvider(
- this, ThreadTaskRunnerHandle::Get());
-
- AutoLock lock(trace_log->lock_);
- trace_log->thread_message_loops_.insert(message_loop);
-}
-
-TraceLog::ThreadLocalEventBuffer::~ThreadLocalEventBuffer() {
- CheckThisIsCurrentBuffer();
- MessageLoop::current()->RemoveDestructionObserver(this);
- MemoryDumpManager::GetInstance()->UnregisterDumpProvider(this);
-
- // Zero event_count_ happens in either of the following cases:
- // - no event generated for the thread;
- // - the thread has no message loop;
- // - trace_event_overhead is disabled.
- if (event_count_) {
- InitializeMetadataEvent(AddTraceEvent(NULL),
- static_cast<int>(base::PlatformThread::CurrentId()),
- "overhead", "average_overhead",
- overhead_.InMillisecondsF() / event_count_);
- }
-
- {
- AutoLock lock(trace_log_->lock_);
- FlushWhileLocked();
- trace_log_->thread_message_loops_.erase(MessageLoop::current());
- }
- trace_log_->thread_local_event_buffer_.Set(NULL);
-}
-
-TraceEvent* TraceLog::ThreadLocalEventBuffer::AddTraceEvent(
- TraceEventHandle* handle) {
- CheckThisIsCurrentBuffer();
-
- if (chunk_ && chunk_->IsFull()) {
- AutoLock lock(trace_log_->lock_);
- FlushWhileLocked();
- chunk_.reset();
- }
- if (!chunk_) {
- AutoLock lock(trace_log_->lock_);
- chunk_ = trace_log_->logged_events_->GetChunk(&chunk_index_);
- trace_log_->CheckIfBufferIsFullWhileLocked();
- }
- if (!chunk_)
- return NULL;
-
- size_t event_index;
- TraceEvent* trace_event = chunk_->AddTraceEvent(&event_index);
- if (trace_event && handle)
- MakeHandle(chunk_->seq(), chunk_index_, event_index, handle);
-
- return trace_event;
-}
-
-void TraceLog::ThreadLocalEventBuffer::ReportOverhead(
- const TraceTicks& event_timestamp,
- const ThreadTicks& event_thread_timestamp) {
- if (!g_category_group_enabled[g_category_trace_event_overhead])
- return;
-
- CheckThisIsCurrentBuffer();
-
- event_count_++;
- ThreadTicks thread_now = ThreadNow();
- TraceTicks now = trace_log_->OffsetNow();
- TimeDelta overhead = now - event_timestamp;
- if (overhead.InMicroseconds() >= kOverheadReportThresholdInMicroseconds) {
- TraceEvent* trace_event = AddTraceEvent(NULL);
- if (trace_event) {
- trace_event->Initialize(
- static_cast<int>(PlatformThread::CurrentId()), event_timestamp,
- event_thread_timestamp, TRACE_EVENT_PHASE_COMPLETE,
- &g_category_group_enabled[g_category_trace_event_overhead],
- "overhead", ::trace_event_internal::kNoId,
- ::trace_event_internal::kNoId, ::trace_event_internal::kZeroNumArgs,
- NULL, NULL, NULL, NULL, 0);
- trace_event->UpdateDuration(now, thread_now);
- }
- }
- overhead_ += overhead;
-}
-
-void TraceLog::ThreadLocalEventBuffer::WillDestroyCurrentMessageLoop() {
- delete this;
-}
-
-bool TraceLog::ThreadLocalEventBuffer::OnMemoryDump(ProcessMemoryDump* pmd) {
- if (!chunk_)
- return true;
- std::string dump_base_name = StringPrintf(
- "tracing/thread_%d", static_cast<int>(PlatformThread::CurrentId()));
- TraceEventMemoryOverhead overhead;
- chunk_->EstimateTraceMemoryOverhead(&overhead);
- overhead.DumpInto(dump_base_name.c_str(), pmd);
- return true;
-}
-
-void TraceLog::ThreadLocalEventBuffer::FlushWhileLocked() {
- if (!chunk_)
- return;
-
- trace_log_->lock_.AssertAcquired();
- if (trace_log_->CheckGeneration(generation_)) {
- // Return the chunk to the buffer only if the generation matches.
- trace_log_->logged_events_->ReturnChunk(chunk_index_, chunk_.Pass());
- }
- // Otherwise this method may be called from the destructor, or TraceLog will
- // find the generation mismatch and delete this buffer soon.
-}
-
-TraceLogStatus::TraceLogStatus() : event_capacity(0), event_count(0) {
-}
-
-TraceLogStatus::~TraceLogStatus() {
-}
-
-// static
-TraceLog* TraceLog::GetInstance() {
- return Singleton<TraceLog, LeakySingletonTraits<TraceLog> >::get();
-}
-
-TraceLog::TraceLog()
- : mode_(DISABLED),
- num_traces_recorded_(0),
- event_callback_(0),
- dispatching_to_observer_list_(false),
- process_sort_index_(0),
- process_id_hash_(0),
- process_id_(0),
- watch_category_(0),
- trace_options_(kInternalRecordUntilFull),
- sampling_thread_handle_(0),
- trace_config_(TraceConfig()),
- event_callback_trace_config_(TraceConfig()),
- thread_shared_chunk_index_(0),
- generation_(0),
- use_worker_thread_(false) {
- // Trace is enabled or disabled on one thread while other threads are
- // accessing the enabled flag. We don't care whether edge-case events are
- // traced or not, so we allow races on the enabled flag to keep the trace
- // macros fast.
- // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots:
- // ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled,
- // sizeof(g_category_group_enabled),
- // "trace_event category enabled");
- for (int i = 0; i < MAX_CATEGORY_GROUPS; ++i) {
- ANNOTATE_BENIGN_RACE(&g_category_group_enabled[i],
- "trace_event category enabled");
- }
-#if defined(OS_NACL) // NaCl shouldn't expose the process id.
- SetProcessID(0);
-#else
- SetProcessID(static_cast<int>(GetCurrentProcId()));
-
- // NaCl also shouldn't access the command line.
- if (CommandLine::InitializedForCurrentProcess() &&
- CommandLine::ForCurrentProcess()->HasSwitch(switches::kTraceToConsole)) {
- std::string filter = CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
- switches::kTraceToConsole);
- if (filter.empty()) {
- filter = kEchoToConsoleCategoryFilter;
- } else {
- filter.append(",");
- filter.append(kEchoToConsoleCategoryFilter);
- }
-
- LOG(ERROR) << "Start " << switches::kTraceToConsole
- << " with CategoryFilter '" << filter << "'.";
- SetEnabled(TraceConfig(filter, ECHO_TO_CONSOLE), RECORDING_MODE);
- }
-#endif
-
- logged_events_.reset(CreateTraceBuffer());
-
- MemoryDumpManager::GetInstance()->RegisterDumpProvider(this);
-}
-
-TraceLog::~TraceLog() {
-}
-
-void TraceLog::InitializeThreadLocalEventBufferIfSupported() {
- // A ThreadLocalEventBuffer needs the message loop
- // - to know when the thread exits;
- // - to handle the final flush.
- // For a thread without a message loop or the message loop may be blocked, the
- // trace events will be added into the main buffer directly.
- if (thread_blocks_message_loop_.Get() || !MessageLoop::current())
- return;
- auto thread_local_event_buffer = thread_local_event_buffer_.Get();
- if (thread_local_event_buffer &&
- !CheckGeneration(thread_local_event_buffer->generation())) {
- delete thread_local_event_buffer;
- thread_local_event_buffer = NULL;
- }
- if (!thread_local_event_buffer) {
- thread_local_event_buffer = new ThreadLocalEventBuffer(this);
- thread_local_event_buffer_.Set(thread_local_event_buffer);
- }
-}
-
-bool TraceLog::OnMemoryDump(ProcessMemoryDump* pmd) {
- TraceEventMemoryOverhead overhead;
- overhead.Add("TraceLog", sizeof(*this));
- {
- AutoLock lock(lock_);
- if (logged_events_)
- logged_events_->EstimateTraceMemoryOverhead(&overhead);
- }
- overhead.AddSelf();
- overhead.DumpInto("tracing/main_trace_log", pmd);
- return true;
-}
-
-const unsigned char* TraceLog::GetCategoryGroupEnabled(
- const char* category_group) {
- TraceLog* tracelog = GetInstance();
- if (!tracelog) {
- DCHECK(!g_category_group_enabled[g_category_already_shutdown]);
- return &g_category_group_enabled[g_category_already_shutdown];
- }
- return tracelog->GetCategoryGroupEnabledInternal(category_group);
-}
-
-const char* TraceLog::GetCategoryGroupName(
- const unsigned char* category_group_enabled) {
- // Calculate the index of the category group by finding
- // category_group_enabled in g_category_group_enabled array.
- uintptr_t category_begin =
- reinterpret_cast<uintptr_t>(g_category_group_enabled);
- uintptr_t category_ptr = reinterpret_cast<uintptr_t>(category_group_enabled);
- DCHECK(category_ptr >= category_begin &&
- category_ptr < reinterpret_cast<uintptr_t>(
- g_category_group_enabled + MAX_CATEGORY_GROUPS)) <<
- "out of bounds category pointer";
- uintptr_t category_index =
- (category_ptr - category_begin) / sizeof(g_category_group_enabled[0]);
- return g_category_groups[category_index];
-}
-
-void TraceLog::UpdateCategoryGroupEnabledFlag(size_t category_index) {
- unsigned char enabled_flag = 0;
- const char* category_group = g_category_groups[category_index];
- if (mode_ == RECORDING_MODE &&
- trace_config_.IsCategoryGroupEnabled(category_group))
- enabled_flag |= ENABLED_FOR_RECORDING;
- else if (mode_ == MONITORING_MODE &&
- trace_config_.IsCategoryGroupEnabled(category_group))
- enabled_flag |= ENABLED_FOR_MONITORING;
- if (event_callback_ &&
- event_callback_trace_config_.IsCategoryGroupEnabled(category_group))
- enabled_flag |= ENABLED_FOR_EVENT_CALLBACK;
-#if defined(OS_WIN)
- if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled(
- category_group)) {
- enabled_flag |= ENABLED_FOR_ETW_EXPORT;
- }
-#endif
-
- g_category_group_enabled[category_index] = enabled_flag;
-}
-
-void TraceLog::UpdateCategoryGroupEnabledFlags() {
- size_t category_index = base::subtle::NoBarrier_Load(&g_category_index);
- for (size_t i = 0; i < category_index; i++)
- UpdateCategoryGroupEnabledFlag(i);
-}
-
-void TraceLog::UpdateSyntheticDelaysFromTraceConfig() {
- ResetTraceEventSyntheticDelays();
- const TraceConfig::StringList& delays =
- trace_config_.GetSyntheticDelayValues();
- TraceConfig::StringList::const_iterator ci;
- for (ci = delays.begin(); ci != delays.end(); ++ci) {
- StringTokenizer tokens(*ci, ";");
- if (!tokens.GetNext())
- continue;
- TraceEventSyntheticDelay* delay =
- TraceEventSyntheticDelay::Lookup(tokens.token());
- while (tokens.GetNext()) {
- std::string token = tokens.token();
- char* duration_end;
- double target_duration = strtod(token.c_str(), &duration_end);
- if (duration_end != token.c_str()) {
- delay->SetTargetDuration(TimeDelta::FromMicroseconds(
- static_cast<int64>(target_duration * 1e6)));
- } else if (token == "static") {
- delay->SetMode(TraceEventSyntheticDelay::STATIC);
- } else if (token == "oneshot") {
- delay->SetMode(TraceEventSyntheticDelay::ONE_SHOT);
- } else if (token == "alternating") {
- delay->SetMode(TraceEventSyntheticDelay::ALTERNATING);
- }
- }
- }
-}
-
-const unsigned char* TraceLog::GetCategoryGroupEnabledInternal(
- const char* category_group) {
- DCHECK(!strchr(category_group, '"')) <<
- "Category groups may not contain double quote";
- // The g_category_groups is append only, avoid using a lock for the fast path.
- size_t current_category_index = base::subtle::Acquire_Load(&g_category_index);
-
- // Search for pre-existing category group.
- for (size_t i = 0; i < current_category_index; ++i) {
- if (strcmp(g_category_groups[i], category_group) == 0) {
- return &g_category_group_enabled[i];
- }
- }
-
- unsigned char* category_group_enabled = NULL;
- // This is the slow path: the lock is not held in the case above, so more
- // than one thread could have reached here trying to add the same category.
- // Only hold to lock when actually appending a new category, and
- // check the categories groups again.
- AutoLock lock(lock_);
- size_t category_index = base::subtle::Acquire_Load(&g_category_index);
- for (size_t i = 0; i < category_index; ++i) {
- if (strcmp(g_category_groups[i], category_group) == 0) {
- return &g_category_group_enabled[i];
- }
- }
-
- // Create a new category group.
- DCHECK(category_index < MAX_CATEGORY_GROUPS) <<
- "must increase MAX_CATEGORY_GROUPS";
- if (category_index < MAX_CATEGORY_GROUPS) {
- // Don't hold on to the category_group pointer, so that we can create
- // category groups with strings not known at compile time (this is
- // required by SetWatchEvent).
- const char* new_group = strdup(category_group);
- ANNOTATE_LEAKING_OBJECT_PTR(new_group);
- g_category_groups[category_index] = new_group;
- DCHECK(!g_category_group_enabled[category_index]);
- // Note that if both included and excluded patterns in the
- // TraceConfig are empty, we exclude nothing,
- // thereby enabling this category group.
- UpdateCategoryGroupEnabledFlag(category_index);
- category_group_enabled = &g_category_group_enabled[category_index];
- // Update the max index now.
- base::subtle::Release_Store(&g_category_index, category_index + 1);
- } else {
- category_group_enabled =
- &g_category_group_enabled[g_category_categories_exhausted];
- }
- return category_group_enabled;
-}
-
-void TraceLog::GetKnownCategoryGroups(
- std::vector<std::string>* category_groups) {
- AutoLock lock(lock_);
- category_groups->push_back(
- g_category_groups[g_category_trace_event_overhead]);
- size_t category_index = base::subtle::NoBarrier_Load(&g_category_index);
- for (size_t i = g_num_builtin_categories; i < category_index; i++)
- category_groups->push_back(g_category_groups[i]);
-}
-
-void TraceLog::SetEnabled(const TraceConfig& trace_config, Mode mode) {
- std::vector<EnabledStateObserver*> observer_list;
- {
- AutoLock lock(lock_);
-
- // Can't enable tracing when Flush() is in progress.
- DCHECK(!flush_task_runner_);
-
- InternalTraceOptions new_options =
- GetInternalOptionsFromTraceConfig(trace_config);
-
- InternalTraceOptions old_options = trace_options();
-
- if (IsEnabled()) {
- if (new_options != old_options) {
- DLOG(ERROR) << "Attempting to re-enable tracing with a different "
- << "set of options.";
- }
-
- if (mode != mode_) {
- DLOG(ERROR) << "Attempting to re-enable tracing with a different mode.";
- }
-
- trace_config_.Merge(trace_config);
- UpdateCategoryGroupEnabledFlags();
- return;
- }
-
- if (dispatching_to_observer_list_) {
- DLOG(ERROR) <<
- "Cannot manipulate TraceLog::Enabled state from an observer.";
- return;
- }
-
- mode_ = mode;
-
- if (new_options != old_options) {
- subtle::NoBarrier_Store(&trace_options_, new_options);
- UseNextTraceBuffer();
- }
-
- num_traces_recorded_++;
-
- trace_config_ = TraceConfig(trace_config);
- UpdateCategoryGroupEnabledFlags();
- UpdateSyntheticDelaysFromTraceConfig();
-
- if (new_options & kInternalEnableSampling) {
- sampling_thread_.reset(new TraceSamplingThread);
- sampling_thread_->RegisterSampleBucket(
- &g_trace_state[0],
- "bucket0",
- Bind(&TraceSamplingThread::DefaultSamplingCallback));
- sampling_thread_->RegisterSampleBucket(
- &g_trace_state[1],
- "bucket1",
- Bind(&TraceSamplingThread::DefaultSamplingCallback));
- sampling_thread_->RegisterSampleBucket(
- &g_trace_state[2],
- "bucket2",
- Bind(&TraceSamplingThread::DefaultSamplingCallback));
- if (!PlatformThread::Create(
- 0, sampling_thread_.get(), &sampling_thread_handle_)) {
- DCHECK(false) << "failed to create thread";
- }
- }
-
- dispatching_to_observer_list_ = true;
- observer_list = enabled_state_observer_list_;
- }
- // Notify observers outside the lock in case they trigger trace events.
- for (size_t i = 0; i < observer_list.size(); ++i)
- observer_list[i]->OnTraceLogEnabled();
-
- {
- AutoLock lock(lock_);
- dispatching_to_observer_list_ = false;
- }
-}
-
-void TraceLog::SetArgumentFilterPredicate(
- const TraceEvent::ArgumentFilterPredicate& argument_filter_predicate) {
- AutoLock lock(lock_);
- DCHECK(!argument_filter_predicate.is_null());
- DCHECK(argument_filter_predicate_.is_null());
- argument_filter_predicate_ = argument_filter_predicate;
-}
-
-TraceLog::InternalTraceOptions TraceLog::GetInternalOptionsFromTraceConfig(
- const TraceConfig& config) {
- InternalTraceOptions ret =
- config.IsSamplingEnabled() ? kInternalEnableSampling : kInternalNone;
- if (config.IsArgumentFilterEnabled())
- ret |= kInternalEnableArgumentFilter;
- switch (config.GetTraceRecordMode()) {
- case RECORD_UNTIL_FULL:
- return ret | kInternalRecordUntilFull;
- case RECORD_CONTINUOUSLY:
- return ret | kInternalRecordContinuously;
- case ECHO_TO_CONSOLE:
- return ret | kInternalEchoToConsole;
- case RECORD_AS_MUCH_AS_POSSIBLE:
- return ret | kInternalRecordAsMuchAsPossible;
- }
- NOTREACHED();
- return kInternalNone;
-}
-
-TraceConfig TraceLog::GetCurrentTraceConfig() const {
- AutoLock lock(lock_);
- return trace_config_;
-}
-
-void TraceLog::SetDisabled() {
- AutoLock lock(lock_);
- SetDisabledWhileLocked();
-}
-
-void TraceLog::SetDisabledWhileLocked() {
- lock_.AssertAcquired();
-
- if (!IsEnabled())
- return;
-
- if (dispatching_to_observer_list_) {
- DLOG(ERROR)
- << "Cannot manipulate TraceLog::Enabled state from an observer.";
- return;
- }
-
- mode_ = DISABLED;
-
- if (sampling_thread_.get()) {
- // Stop the sampling thread.
- sampling_thread_->Stop();
- lock_.Release();
- PlatformThread::Join(sampling_thread_handle_);
- lock_.Acquire();
- sampling_thread_handle_ = PlatformThreadHandle();
- sampling_thread_.reset();
- }
-
- trace_config_.Clear();
- subtle::NoBarrier_Store(&watch_category_, 0);
- watch_event_name_ = "";
- UpdateCategoryGroupEnabledFlags();
- AddMetadataEventsWhileLocked();
-
- dispatching_to_observer_list_ = true;
- std::vector<EnabledStateObserver*> observer_list =
- enabled_state_observer_list_;
-
- {
- // Dispatch to observers outside the lock in case the observer triggers a
- // trace event.
- AutoUnlock unlock(lock_);
- for (size_t i = 0; i < observer_list.size(); ++i)
- observer_list[i]->OnTraceLogDisabled();
- }
- dispatching_to_observer_list_ = false;
-}
-
-int TraceLog::GetNumTracesRecorded() {
- AutoLock lock(lock_);
- if (!IsEnabled())
- return -1;
- return num_traces_recorded_;
-}
-
-void TraceLog::AddEnabledStateObserver(EnabledStateObserver* listener) {
- enabled_state_observer_list_.push_back(listener);
-}
-
-void TraceLog::RemoveEnabledStateObserver(EnabledStateObserver* listener) {
- std::vector<EnabledStateObserver*>::iterator it =
- std::find(enabled_state_observer_list_.begin(),
- enabled_state_observer_list_.end(),
- listener);
- if (it != enabled_state_observer_list_.end())
- enabled_state_observer_list_.erase(it);
-}
-
-bool TraceLog::HasEnabledStateObserver(EnabledStateObserver* listener) const {
- std::vector<EnabledStateObserver*>::const_iterator it =
- std::find(enabled_state_observer_list_.begin(),
- enabled_state_observer_list_.end(),
- listener);
- return it != enabled_state_observer_list_.end();
-}
-
-TraceLogStatus TraceLog::GetStatus() const {
- AutoLock lock(lock_);
- TraceLogStatus result;
- result.event_capacity = logged_events_->Capacity();
- result.event_count = logged_events_->Size();
- return result;
-}
-
-bool TraceLog::BufferIsFull() const {
- AutoLock lock(lock_);
- return logged_events_->IsFull();
-}
-
-TraceBuffer* TraceLog::CreateTraceBuffer() {
- InternalTraceOptions options = trace_options();
- if (options & kInternalRecordContinuously)
- return new TraceBufferRingBuffer(kTraceEventRingBufferChunks);
- else if ((options & kInternalEnableSampling) && mode_ == MONITORING_MODE)
- return new TraceBufferRingBuffer(kMonitorTraceEventBufferChunks);
- else if (options & kInternalEchoToConsole)
- return new TraceBufferRingBuffer(kEchoToConsoleTraceEventBufferChunks);
- else if (options & kInternalRecordAsMuchAsPossible)
- return CreateTraceBufferVectorOfSize(kTraceEventVectorBigBufferChunks);
- return CreateTraceBufferVectorOfSize(kTraceEventVectorBufferChunks);
-}
-
-TraceBuffer* TraceLog::CreateTraceBufferVectorOfSize(size_t max_chunks) {
- return new TraceBufferVector(max_chunks);
-}
-
-TraceEvent* TraceLog::AddEventToThreadSharedChunkWhileLocked(
- TraceEventHandle* handle, bool check_buffer_is_full) {
- lock_.AssertAcquired();
-
- if (thread_shared_chunk_ && thread_shared_chunk_->IsFull()) {
- logged_events_->ReturnChunk(thread_shared_chunk_index_,
- thread_shared_chunk_.Pass());
- }
-
- if (!thread_shared_chunk_) {
- thread_shared_chunk_ = logged_events_->GetChunk(
- &thread_shared_chunk_index_);
- if (check_buffer_is_full)
- CheckIfBufferIsFullWhileLocked();
- }
- if (!thread_shared_chunk_)
- return NULL;
-
- size_t event_index;
- TraceEvent* trace_event = thread_shared_chunk_->AddTraceEvent(&event_index);
- if (trace_event && handle) {
- MakeHandle(thread_shared_chunk_->seq(), thread_shared_chunk_index_,
- event_index, handle);
- }
- return trace_event;
-}
-
-void TraceLog::CheckIfBufferIsFullWhileLocked() {
- lock_.AssertAcquired();
- if (logged_events_->IsFull()) {
- if (buffer_limit_reached_timestamp_.is_null()) {
- buffer_limit_reached_timestamp_ = OffsetNow();
- }
- SetDisabledWhileLocked();
- }
-}
-
-void TraceLog::SetEventCallbackEnabled(const TraceConfig& trace_config,
- EventCallback cb) {
- AutoLock lock(lock_);
- subtle::NoBarrier_Store(&event_callback_,
- reinterpret_cast<subtle::AtomicWord>(cb));
- event_callback_trace_config_ = trace_config;
- UpdateCategoryGroupEnabledFlags();
-};
-
-void TraceLog::SetEventCallbackDisabled() {
- AutoLock lock(lock_);
- subtle::NoBarrier_Store(&event_callback_, 0);
- UpdateCategoryGroupEnabledFlags();
-}
-
-// Flush() works as the following:
-// 1. Flush() is called in thread A whose task runner is saved in
-// flush_task_runner_;
-// 2. If thread_message_loops_ is not empty, thread A posts task to each message
-// loop to flush the thread local buffers; otherwise finish the flush;
-// 3. FlushCurrentThread() deletes the thread local event buffer:
-// - The last batch of events of the thread are flushed into the main buffer;
-// - The message loop will be removed from thread_message_loops_;
-// If this is the last message loop, finish the flush;
-// 4. If any thread hasn't finish its flush in time, finish the flush.
-void TraceLog::Flush(const TraceLog::OutputCallback& cb,
- bool use_worker_thread) {
- FlushInternal(cb, use_worker_thread, false);
-}
-
-void TraceLog::CancelTracing(const OutputCallback& cb) {
- SetDisabled();
- FlushInternal(cb, false, true);
-}
-
-void TraceLog::FlushInternal(const TraceLog::OutputCallback& cb,
- bool use_worker_thread,
- bool discard_events) {
- use_worker_thread_ = use_worker_thread;
- if (IsEnabled()) {
- // Can't flush when tracing is enabled because otherwise PostTask would
- // - generate more trace events;
- // - deschedule the calling thread on some platforms causing inaccurate
- // timing of the trace events.
- scoped_refptr<RefCountedString> empty_result = new RefCountedString;
- if (!cb.is_null())
- cb.Run(empty_result, false);
- LOG(WARNING) << "Ignored TraceLog::Flush called when tracing is enabled";
- return;
- }
-
- int generation = this->generation();
- // Copy of thread_message_loops_ to be used without locking.
- std::vector<scoped_refptr<SingleThreadTaskRunner> >
- thread_message_loop_task_runners;
- {
- AutoLock lock(lock_);
- DCHECK(!flush_task_runner_);
- flush_task_runner_ = ThreadTaskRunnerHandle::IsSet()
- ? ThreadTaskRunnerHandle::Get()
- : nullptr;
- DCHECK_IMPLIES(thread_message_loops_.size(), flush_task_runner_);
- flush_output_callback_ = cb;
-
- if (thread_shared_chunk_) {
- logged_events_->ReturnChunk(thread_shared_chunk_index_,
- thread_shared_chunk_.Pass());
- }
-
- if (thread_message_loops_.size()) {
- for (hash_set<MessageLoop*>::const_iterator it =
- thread_message_loops_.begin();
- it != thread_message_loops_.end(); ++it) {
- thread_message_loop_task_runners.push_back((*it)->task_runner());
- }
- }
- }
-
- if (thread_message_loop_task_runners.size()) {
- for (size_t i = 0; i < thread_message_loop_task_runners.size(); ++i) {
- thread_message_loop_task_runners[i]->PostTask(
- FROM_HERE, Bind(&TraceLog::FlushCurrentThread, Unretained(this),
- generation, discard_events));
- }
- flush_task_runner_->PostDelayedTask(
- FROM_HERE, Bind(&TraceLog::OnFlushTimeout, Unretained(this), generation,
- discard_events),
- TimeDelta::FromMilliseconds(kThreadFlushTimeoutMs));
- return;
- }
-
- FinishFlush(generation, discard_events);
-}
-
-// Usually it runs on a different thread.
-void TraceLog::ConvertTraceEventsToTraceFormat(
- scoped_ptr<TraceBuffer> logged_events,
- const OutputCallback& flush_output_callback,
- const TraceEvent::ArgumentFilterPredicate& argument_filter_predicate) {
- if (flush_output_callback.is_null())
- return;
-
- // The callback need to be called at least once even if there is no events
- // to let the caller know the completion of flush.
- scoped_refptr<RefCountedString> json_events_str_ptr = new RefCountedString();
- while (const TraceBufferChunk* chunk = logged_events->NextChunk()) {
- for (size_t j = 0; j < chunk->size(); ++j) {
- size_t size = json_events_str_ptr->size();
- if (size > kTraceEventBufferSizeInBytes) {
- flush_output_callback.Run(json_events_str_ptr, true);
- json_events_str_ptr = new RefCountedString();
- } else if (size) {
- json_events_str_ptr->data().append(",\n");
- }
- chunk->GetEventAt(j)->AppendAsJSON(&(json_events_str_ptr->data()),
- argument_filter_predicate);
- }
- }
- flush_output_callback.Run(json_events_str_ptr, false);
-}
-
-void TraceLog::FinishFlush(int generation, bool discard_events) {
- scoped_ptr<TraceBuffer> previous_logged_events;
- OutputCallback flush_output_callback;
- TraceEvent::ArgumentFilterPredicate argument_filter_predicate;
-
- if (!CheckGeneration(generation))
- return;
-
- {
- AutoLock lock(lock_);
-
- previous_logged_events.swap(logged_events_);
- UseNextTraceBuffer();
- thread_message_loops_.clear();
-
- flush_task_runner_ = NULL;
- flush_output_callback = flush_output_callback_;
- flush_output_callback_.Reset();
-
- if (trace_options() & kInternalEnableArgumentFilter) {
- CHECK(!argument_filter_predicate_.is_null());
- argument_filter_predicate = argument_filter_predicate_;
- }
- }
-
- if (discard_events) {
- if (!flush_output_callback.is_null()) {
- scoped_refptr<RefCountedString> empty_result = new RefCountedString;
- flush_output_callback.Run(empty_result, false);
- }
- return;
- }
-
- if (use_worker_thread_ &&
- WorkerPool::PostTask(
- FROM_HERE, Bind(&TraceLog::ConvertTraceEventsToTraceFormat,
- Passed(&previous_logged_events),
- flush_output_callback, argument_filter_predicate),
- true)) {
- return;
- }
-
- ConvertTraceEventsToTraceFormat(previous_logged_events.Pass(),
- flush_output_callback,
- argument_filter_predicate);
-}
-
-// Run in each thread holding a local event buffer.
-void TraceLog::FlushCurrentThread(int generation, bool discard_events) {
- {
- AutoLock lock(lock_);
- if (!CheckGeneration(generation) || !flush_task_runner_) {
- // This is late. The corresponding flush has finished.
- return;
- }
- }
-
- // This will flush the thread local buffer.
- delete thread_local_event_buffer_.Get();
-
- AutoLock lock(lock_);
- if (!CheckGeneration(generation) || !flush_task_runner_ ||
- thread_message_loops_.size())
- return;
-
- flush_task_runner_->PostTask(
- FROM_HERE, Bind(&TraceLog::FinishFlush, Unretained(this), generation,
- discard_events));
-}
-
-void TraceLog::OnFlushTimeout(int generation, bool discard_events) {
- {
- AutoLock lock(lock_);
- if (!CheckGeneration(generation) || !flush_task_runner_) {
- // Flush has finished before timeout.
- return;
- }
-
- LOG(WARNING) <<
- "The following threads haven't finished flush in time. "
- "If this happens stably for some thread, please call "
- "TraceLog::GetInstance()->SetCurrentThreadBlocksMessageLoop() from "
- "the thread to avoid its trace events from being lost.";
- for (hash_set<MessageLoop*>::const_iterator it =
- thread_message_loops_.begin();
- it != thread_message_loops_.end(); ++it) {
- LOG(WARNING) << "Thread: " << (*it)->thread_name();
- }
- }
- FinishFlush(generation, discard_events);
-}
-
-void TraceLog::FlushButLeaveBufferIntact(
- const TraceLog::OutputCallback& flush_output_callback) {
- scoped_ptr<TraceBuffer> previous_logged_events;
- TraceEvent::ArgumentFilterPredicate argument_filter_predicate;
- {
- AutoLock lock(lock_);
- AddMetadataEventsWhileLocked();
- if (thread_shared_chunk_) {
- // Return the chunk to the main buffer to flush the sampling data.
- logged_events_->ReturnChunk(thread_shared_chunk_index_,
- thread_shared_chunk_.Pass());
- }
- previous_logged_events = logged_events_->CloneForIteration().Pass();
-
- if (trace_options() & kInternalEnableArgumentFilter) {
- CHECK(!argument_filter_predicate_.is_null());
- argument_filter_predicate = argument_filter_predicate_;
- }
- } // release lock
-
- ConvertTraceEventsToTraceFormat(previous_logged_events.Pass(),
- flush_output_callback,
- argument_filter_predicate);
-}
-
-void TraceLog::UseNextTraceBuffer() {
- logged_events_.reset(CreateTraceBuffer());
- subtle::NoBarrier_AtomicIncrement(&generation_, 1);
- thread_shared_chunk_.reset();
- thread_shared_chunk_index_ = 0;
-}
-
-TraceEventHandle TraceLog::AddTraceEvent(
- char phase,
- const unsigned char* category_group_enabled,
- const char* name,
- unsigned long long id,
- int num_args,
- const char** arg_names,
- const unsigned char* arg_types,
- const unsigned long long* arg_values,
- const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
- unsigned int flags) {
- int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
- base::TraceTicks now = base::TraceTicks::Now();
- return AddTraceEventWithThreadIdAndTimestamp(
- phase, category_group_enabled, name, id, ::trace_event_internal::kNoId,
- thread_id, now, num_args, arg_names, arg_types, arg_values,
- convertable_values, flags);
-}
-
-TraceEventHandle TraceLog::AddTraceEventWithContextId(
- char phase,
- const unsigned char* category_group_enabled,
- const char* name,
- unsigned long long id,
- unsigned long long context_id,
- int num_args,
- const char** arg_names,
- const unsigned char* arg_types,
- const unsigned long long* arg_values,
- const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
- unsigned int flags) {
- int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
- base::TraceTicks now = base::TraceTicks::Now();
- return AddTraceEventWithThreadIdAndTimestamp(
- phase, category_group_enabled, name, id, context_id, thread_id, now,
- num_args, arg_names, arg_types, arg_values, convertable_values,
- flags | TRACE_EVENT_FLAG_HAS_CONTEXT_ID);
-}
-
-TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
- char phase,
- const unsigned char* category_group_enabled,
- const char* name,
- unsigned long long id,
- unsigned long long context_id,
- int thread_id,
- const TraceTicks& timestamp,
- int num_args,
- const char** arg_names,
- const unsigned char* arg_types,
- const unsigned long long* arg_values,
- const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
- unsigned int flags) {
- TraceEventHandle handle = { 0, 0, 0 };
- if (!*category_group_enabled)
- return handle;
-
- // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
- // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
- // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
- if (thread_is_in_trace_event_.Get())
- return handle;
-
- AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_);
-
- DCHECK(name);
- DCHECK(!timestamp.is_null());
-
- if (flags & TRACE_EVENT_FLAG_MANGLE_ID)
- id = MangleEventId(id);
-
- TraceTicks offset_event_timestamp = OffsetTimestamp(timestamp);
- TraceTicks now = flags & TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP ?
- OffsetNow() : offset_event_timestamp;
- ThreadTicks thread_now = ThreadNow();
-
- // |thread_local_event_buffer_| can be null if the current thread doesn't have
- // a message loop or the message loop is blocked.
- InitializeThreadLocalEventBufferIfSupported();
- auto thread_local_event_buffer = thread_local_event_buffer_.Get();
-
- // Check and update the current thread name only if the event is for the
- // current thread to avoid locks in most cases.
- if (thread_id == static_cast<int>(PlatformThread::CurrentId())) {
- const char* new_name = ThreadIdNameManager::GetInstance()->
- GetName(thread_id);
- // Check if the thread name has been set or changed since the previous
- // call (if any), but don't bother if the new name is empty. Note this will
- // not detect a thread name change within the same char* buffer address: we
- // favor common case performance over corner case correctness.
- if (new_name != g_current_thread_name.Get().Get() &&
- new_name && *new_name) {
- g_current_thread_name.Get().Set(new_name);
-
- AutoLock thread_info_lock(thread_info_lock_);
-
- hash_map<int, std::string>::iterator existing_name =
- thread_names_.find(thread_id);
- if (existing_name == thread_names_.end()) {
- // This is a new thread id, and a new name.
- thread_names_[thread_id] = new_name;
- } else {
- // This is a thread id that we've seen before, but potentially with a
- // new name.
- std::vector<StringPiece> existing_names =
- base::SplitStringPiece(existing_name->second, ",",
- base::KEEP_WHITESPACE,
- base::SPLIT_WANT_NONEMPTY);
- bool found = std::find(existing_names.begin(),
- existing_names.end(),
- new_name) != existing_names.end();
- if (!found) {
- if (existing_names.size())
- existing_name->second.push_back(',');
- existing_name->second.append(new_name);
- }
- }
- }
- }
-
-#if defined(OS_WIN)
- // This is done sooner rather than later, to avoid creating the event and
- // acquiring the lock, which is not needed for ETW as it's already threadsafe.
- if (*category_group_enabled & ENABLED_FOR_ETW_EXPORT)
- TraceEventETWExport::AddEvent(phase, category_group_enabled, name, id,
- num_args, arg_names, arg_types, arg_values,
- convertable_values);
-#endif // OS_WIN
-
- std::string console_message;
- if (*category_group_enabled &
- (ENABLED_FOR_RECORDING | ENABLED_FOR_MONITORING)) {
- OptionalAutoLock lock(&lock_);
-
- TraceEvent* trace_event = NULL;
- if (thread_local_event_buffer) {
- trace_event = thread_local_event_buffer->AddTraceEvent(&handle);
- } else {
- lock.EnsureAcquired();
- trace_event = AddEventToThreadSharedChunkWhileLocked(&handle, true);
- }
-
- if (trace_event) {
- trace_event->Initialize(thread_id, offset_event_timestamp, thread_now,
- phase, category_group_enabled, name, id,
- context_id, num_args, arg_names, arg_types,
- arg_values, convertable_values, flags);
-
-#if defined(OS_ANDROID)
- trace_event->SendToATrace();
-#endif
- }
-
- if (trace_options() & kInternalEchoToConsole) {
- console_message = EventToConsoleMessage(
- phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase,
- timestamp, trace_event);
- }
- }
-
- if (console_message.size())
- LOG(ERROR) << console_message;
-
- if (reinterpret_cast<const unsigned char*>(subtle::NoBarrier_Load(
- &watch_category_)) == category_group_enabled) {
- bool event_name_matches;
- WatchEventCallback watch_event_callback_copy;
- {
- AutoLock lock(lock_);
- event_name_matches = watch_event_name_ == name;
- watch_event_callback_copy = watch_event_callback_;
- }
- if (event_name_matches) {
- if (!watch_event_callback_copy.is_null())
- watch_event_callback_copy.Run();
- }
- }
-
- if (*category_group_enabled & ENABLED_FOR_EVENT_CALLBACK) {
- EventCallback event_callback = reinterpret_cast<EventCallback>(
- subtle::NoBarrier_Load(&event_callback_));
- if (event_callback) {
- event_callback(offset_event_timestamp,
- phase == TRACE_EVENT_PHASE_COMPLETE ?
- TRACE_EVENT_PHASE_BEGIN : phase,
- category_group_enabled, name, id,
- num_args, arg_names, arg_types, arg_values,
- flags);
- }
- }
-
- if (thread_local_event_buffer)
- thread_local_event_buffer->ReportOverhead(now, thread_now);
-
- return handle;
-}
-
-// May be called when a COMPELETE event ends and the unfinished event has been
-// recycled (phase == TRACE_EVENT_PHASE_END and trace_event == NULL).
-std::string TraceLog::EventToConsoleMessage(unsigned char phase,
- const TraceTicks& timestamp,
- TraceEvent* trace_event) {
- AutoLock thread_info_lock(thread_info_lock_);
-
- // The caller should translate TRACE_EVENT_PHASE_COMPLETE to
- // TRACE_EVENT_PHASE_BEGIN or TRACE_EVENT_END.
- DCHECK(phase != TRACE_EVENT_PHASE_COMPLETE);
-
- TimeDelta duration;
- int thread_id = trace_event ?
- trace_event->thread_id() : PlatformThread::CurrentId();
- if (phase == TRACE_EVENT_PHASE_END) {
- duration = timestamp - thread_event_start_times_[thread_id].top();
- thread_event_start_times_[thread_id].pop();
- }
-
- std::string thread_name = thread_names_[thread_id];
- if (thread_colors_.find(thread_name) == thread_colors_.end())
- thread_colors_[thread_name] = (thread_colors_.size() % 6) + 1;
-
- std::ostringstream log;
- log << base::StringPrintf("%s: \x1b[0;3%dm",
- thread_name.c_str(),
- thread_colors_[thread_name]);
-
- size_t depth = 0;
- if (thread_event_start_times_.find(thread_id) !=
- thread_event_start_times_.end())
- depth = thread_event_start_times_[thread_id].size();
-
- for (size_t i = 0; i < depth; ++i)
- log << "| ";
-
- if (trace_event)
- trace_event->AppendPrettyPrinted(&log);
- if (phase == TRACE_EVENT_PHASE_END)
- log << base::StringPrintf(" (%.3f ms)", duration.InMillisecondsF());
-
- log << "\x1b[0;m";
-
- if (phase == TRACE_EVENT_PHASE_BEGIN)
- thread_event_start_times_[thread_id].push(timestamp);
-
- return log.str();
-}
-
-void TraceLog::AddTraceEventEtw(char phase,
- const char* name,
- const void* id,
- const char* extra) {
-#if defined(OS_WIN)
- TraceEventETWProvider::Trace(name, phase, id, extra);
-#endif
- INTERNAL_TRACE_EVENT_ADD(phase, "ETW Trace Event", name,
- TRACE_EVENT_FLAG_COPY, "id", id, "extra", extra);
-}
-
-void TraceLog::AddTraceEventEtw(char phase,
- const char* name,
- const void* id,
- const std::string& extra) {
-#if defined(OS_WIN)
- TraceEventETWProvider::Trace(name, phase, id, extra);
-#endif
- INTERNAL_TRACE_EVENT_ADD(phase, "ETW Trace Event", name,
- TRACE_EVENT_FLAG_COPY, "id", id, "extra", extra);
-}
-
-void TraceLog::UpdateTraceEventDuration(
- const unsigned char* category_group_enabled,
- const char* name,
- TraceEventHandle handle) {
- // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
- // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
- // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
- if (thread_is_in_trace_event_.Get())
- return;
-
- AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_);
-
- ThreadTicks thread_now = ThreadNow();
- TraceTicks now = OffsetNow();
-
- std::string console_message;
- if (*category_group_enabled & ENABLED_FOR_RECORDING) {
- OptionalAutoLock lock(&lock_);
-
- TraceEvent* trace_event = GetEventByHandleInternal(handle, &lock);
- if (trace_event) {
- DCHECK(trace_event->phase() == TRACE_EVENT_PHASE_COMPLETE);
- trace_event->UpdateDuration(now, thread_now);
-#if defined(OS_ANDROID)
- trace_event->SendToATrace();
-#endif
- }
-
- if (trace_options() & kInternalEchoToConsole) {
- console_message = EventToConsoleMessage(TRACE_EVENT_PHASE_END,
- now, trace_event);
- }
- }
-
- if (console_message.size())
- LOG(ERROR) << console_message;
-
- if (*category_group_enabled & ENABLED_FOR_EVENT_CALLBACK) {
- EventCallback event_callback = reinterpret_cast<EventCallback>(
- subtle::NoBarrier_Load(&event_callback_));
- if (event_callback) {
- event_callback(now, TRACE_EVENT_PHASE_END, category_group_enabled, name,
- trace_event_internal::kNoId, trace_event_internal::kNoId,
- NULL, NULL, NULL, TRACE_EVENT_FLAG_NONE);
- }
- }
-}
-
-void TraceLog::SetWatchEvent(const std::string& category_name,
- const std::string& event_name,
- const WatchEventCallback& callback) {
- const unsigned char* category = GetCategoryGroupEnabled(
- category_name.c_str());
- AutoLock lock(lock_);
- subtle::NoBarrier_Store(&watch_category_,
- reinterpret_cast<subtle::AtomicWord>(category));
- watch_event_name_ = event_name;
- watch_event_callback_ = callback;
-}
-
-void TraceLog::CancelWatchEvent() {
- AutoLock lock(lock_);
- subtle::NoBarrier_Store(&watch_category_, 0);
- watch_event_name_ = "";
- watch_event_callback_.Reset();
-}
-
-uint64 TraceLog::MangleEventId(uint64 id) {
- return id ^ process_id_hash_;
-}
-
-void TraceLog::AddMetadataEventsWhileLocked() {
- lock_.AssertAcquired();
-
-#if !defined(OS_NACL) // NaCl shouldn't expose the process id.
- InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
- 0,
- "num_cpus", "number",
- base::SysInfo::NumberOfProcessors());
-#endif
-
-
- int current_thread_id = static_cast<int>(base::PlatformThread::CurrentId());
- if (process_sort_index_ != 0) {
- InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
- current_thread_id,
- "process_sort_index", "sort_index",
- process_sort_index_);
- }
-
- if (process_name_.size()) {
- InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
- current_thread_id,
- "process_name", "name",
- process_name_);
- }
-
- if (process_labels_.size() > 0) {
- std::vector<std::string> labels;
- for(base::hash_map<int, std::string>::iterator it = process_labels_.begin();
- it != process_labels_.end();
- it++) {
- labels.push_back(it->second);
- }
- InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
- current_thread_id,
- "process_labels", "labels",
- base::JoinString(labels, ","));
- }
-
- // Thread sort indices.
- for(hash_map<int, int>::iterator it = thread_sort_indices_.begin();
- it != thread_sort_indices_.end();
- it++) {
- if (it->second == 0)
- continue;
- InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
- it->first,
- "thread_sort_index", "sort_index",
- it->second);
- }
-
- // Thread names.
- AutoLock thread_info_lock(thread_info_lock_);
- for(hash_map<int, std::string>::iterator it = thread_names_.begin();
- it != thread_names_.end();
- it++) {
- if (it->second.empty())
- continue;
- InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
- it->first,
- "thread_name", "name",
- it->second);
- }
-
- // If buffer is full, add a metadata record to report this.
- if (!buffer_limit_reached_timestamp_.is_null()) {
- InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
- current_thread_id,
- "trace_buffer_overflowed",
- "overflowed_at_ts",
- buffer_limit_reached_timestamp_);
- }
-}
-
-void TraceLog::WaitSamplingEventForTesting() {
- if (!sampling_thread_)
- return;
- sampling_thread_->WaitSamplingEventForTesting();
-}
-
-void TraceLog::DeleteForTesting() {
- DeleteTraceLogForTesting::Delete();
-}
-
-TraceEvent* TraceLog::GetEventByHandle(TraceEventHandle handle) {
- return GetEventByHandleInternal(handle, NULL);
-}
-
-TraceEvent* TraceLog::GetEventByHandleInternal(TraceEventHandle handle,
- OptionalAutoLock* lock) {
- if (!handle.chunk_seq)
- return NULL;
-
- if (thread_local_event_buffer_.Get()) {
- TraceEvent* trace_event =
- thread_local_event_buffer_.Get()->GetEventByHandle(handle);
- if (trace_event)
- return trace_event;
- }
-
- // The event has been out-of-control of the thread local buffer.
- // Try to get the event from the main buffer with a lock.
- if (lock)
- lock->EnsureAcquired();
-
- if (thread_shared_chunk_ &&
- handle.chunk_index == thread_shared_chunk_index_) {
- return handle.chunk_seq == thread_shared_chunk_->seq() ?
- thread_shared_chunk_->GetEventAt(handle.event_index) : NULL;
- }
-
- return logged_events_->GetEventByHandle(handle);
-}
-
-void TraceLog::SetProcessID(int process_id) {
- process_id_ = process_id;
- // Create a FNV hash from the process ID for XORing.
- // See http://isthe.com/chongo/tech/comp/fnv/ for algorithm details.
- unsigned long long offset_basis = 14695981039346656037ull;
- unsigned long long fnv_prime = 1099511628211ull;
- unsigned long long pid = static_cast<unsigned long long>(process_id_);
- process_id_hash_ = (offset_basis ^ pid) * fnv_prime;
-}
-
-void TraceLog::SetProcessSortIndex(int sort_index) {
- AutoLock lock(lock_);
- process_sort_index_ = sort_index;
-}
-
-void TraceLog::SetProcessName(const std::string& process_name) {
- AutoLock lock(lock_);
- process_name_ = process_name;
-}
-
-void TraceLog::UpdateProcessLabel(
- int label_id, const std::string& current_label) {
- if(!current_label.length())
- return RemoveProcessLabel(label_id);
-
- AutoLock lock(lock_);
- process_labels_[label_id] = current_label;
-}
-
-void TraceLog::RemoveProcessLabel(int label_id) {
- AutoLock lock(lock_);
- base::hash_map<int, std::string>::iterator it = process_labels_.find(
- label_id);
- if (it == process_labels_.end())
- return;
-
- process_labels_.erase(it);
-}
-
-void TraceLog::SetThreadSortIndex(PlatformThreadId thread_id, int sort_index) {
- AutoLock lock(lock_);
- thread_sort_indices_[static_cast<int>(thread_id)] = sort_index;
-}
-
-void TraceLog::SetTimeOffset(TimeDelta offset) {
- time_offset_ = offset;
-}
-
-size_t TraceLog::GetObserverCountForTest() const {
- return enabled_state_observer_list_.size();
-}
-
-void TraceLog::SetCurrentThreadBlocksMessageLoop() {
- thread_blocks_message_loop_.Set(true);
- if (thread_local_event_buffer_.Get()) {
- // This will flush the thread local buffer.
- delete thread_local_event_buffer_.Get();
- }
-}
-
-void ConvertableToTraceFormat::EstimateTraceMemoryOverhead(
- TraceEventMemoryOverhead* overhead) {
- overhead->Add("ConvertableToTraceFormat(Unknown)", sizeof(*this));
-}
-
} // namespace trace_event
} // namespace base
-
-namespace trace_event_internal {
-
-ScopedTraceBinaryEfficient::ScopedTraceBinaryEfficient(
- const char* category_group, const char* name) {
- // The single atom works because for now the category_group can only be "gpu".
- DCHECK_EQ(strcmp(category_group, "gpu"), 0);
- static TRACE_EVENT_API_ATOMIC_WORD atomic = 0;
- INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES(
- category_group, atomic, category_group_enabled_);
- name_ = name;
- if (*category_group_enabled_) {
- event_handle_ =
- TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
- TRACE_EVENT_PHASE_COMPLETE, category_group_enabled_, name, kNoId,
- kNoId, static_cast<int>(base::PlatformThread::CurrentId()),
- base::TraceTicks::Now(), 0, NULL, NULL, NULL, NULL,
- TRACE_EVENT_FLAG_NONE);
- }
-}
-
-ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() {
- if (*category_group_enabled_) {
- TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_,
- name_, event_handle_);
- }
-}
-
-} // namespace trace_event_internal
diff --git a/base/trace_event/trace_event_impl.h b/base/trace_event/trace_event_impl.h
index 4274f80..126bdc5 100644
--- a/base/trace_event/trace_event_impl.h
+++ b/base/trace_event/trace_event_impl.h
@@ -24,31 +24,8 @@
#include "base/synchronization/lock.h"
#include "base/threading/thread.h"
#include "base/threading/thread_local.h"
-#include "base/trace_event/memory_dump_provider.h"
-#include "base/trace_event/trace_config.h"
#include "base/trace_event/trace_event_memory_overhead.h"
-// Older style trace macros with explicit id and extra data
-// Only these macros result in publishing data to ETW as currently implemented.
-// TODO(georgesak): Update/replace these with new ETW macros.
-#define TRACE_EVENT_BEGIN_ETW(name, id, extra) \
- base::trace_event::TraceLog::AddTraceEventEtw( \
- TRACE_EVENT_PHASE_BEGIN, \
- name, reinterpret_cast<const void*>(id), extra)
-
-#define TRACE_EVENT_END_ETW(name, id, extra) \
- base::trace_event::TraceLog::AddTraceEventEtw( \
- TRACE_EVENT_PHASE_END, \
- name, reinterpret_cast<const void*>(id), extra)
-
-#define TRACE_EVENT_INSTANT_ETW(name, id, extra) \
- base::trace_event::TraceLog::AddTraceEventEtw( \
- TRACE_EVENT_PHASE_INSTANT, \
- name, reinterpret_cast<const void*>(id), extra)
-
-template <typename Type>
-struct DefaultSingletonTraits;
-
namespace base {
class WaitableEvent;
@@ -56,6 +33,9 @@ class MessageLoop;
namespace trace_event {
+typedef base::Callback<bool(const char* category_group_name,
+ const char* event_name)> ArgumentFilterPredicate;
+
// For any argument of type TRACE_VALUE_TYPE_CONVERTABLE the provided
// class must implement this interface.
class BASE_EXPORT ConvertableToTraceFormat
@@ -82,14 +62,14 @@ class BASE_EXPORT ConvertableToTraceFormat
friend class RefCounted<ConvertableToTraceFormat>;
};
+const int kTraceMaxNumArgs = 2;
+
struct TraceEventHandle {
uint32 chunk_seq;
uint16 chunk_index;
uint16 event_index;
};
-const int kTraceMaxNumArgs = 2;
-
class BASE_EXPORT TraceEvent {
public:
union TraceValue {
@@ -131,8 +111,6 @@ class BASE_EXPORT TraceEvent {
void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead*);
// Serialize event data to JSON
- typedef base::Callback<bool(const char* category_group_name,
- const char* event_name)> ArgumentFilterPredicate;
void AppendAsJSON(
std::string* out,
const ArgumentFilterPredicate& argument_filter_predicate) const;
@@ -192,541 +170,6 @@ class BASE_EXPORT TraceEvent {
DISALLOW_COPY_AND_ASSIGN(TraceEvent);
};
-// TraceBufferChunk is the basic unit of TraceBuffer.
-class BASE_EXPORT TraceBufferChunk {
- public:
- explicit TraceBufferChunk(uint32 seq);
- ~TraceBufferChunk();
-
- void Reset(uint32 new_seq);
- TraceEvent* AddTraceEvent(size_t* event_index);
- bool IsFull() const { return next_free_ == kTraceBufferChunkSize; }
-
- uint32 seq() const { return seq_; }
- size_t capacity() const { return kTraceBufferChunkSize; }
- size_t size() const { return next_free_; }
-
- TraceEvent* GetEventAt(size_t index) {
- DCHECK(index < size());
- return &chunk_[index];
- }
- const TraceEvent* GetEventAt(size_t index) const {
- DCHECK(index < size());
- return &chunk_[index];
- }
-
- scoped_ptr<TraceBufferChunk> Clone() const;
-
- void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead);
-
- static const size_t kTraceBufferChunkSize = 64;
-
- private:
- size_t next_free_;
- scoped_ptr<TraceEventMemoryOverhead> cached_overhead_estimate_;
- TraceEvent chunk_[kTraceBufferChunkSize];
- uint32 seq_;
-};
-
-// TraceBuffer holds the events as they are collected.
-class BASE_EXPORT TraceBuffer {
- public:
- virtual ~TraceBuffer() {}
-
- virtual scoped_ptr<TraceBufferChunk> GetChunk(size_t *index) = 0;
- virtual void ReturnChunk(size_t index,
- scoped_ptr<TraceBufferChunk> chunk) = 0;
-
- virtual bool IsFull() const = 0;
- virtual size_t Size() const = 0;
- virtual size_t Capacity() const = 0;
- virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) = 0;
-
- // For iteration. Each TraceBuffer can only be iterated once.
- virtual const TraceBufferChunk* NextChunk() = 0;
-
- virtual scoped_ptr<TraceBuffer> CloneForIteration() const = 0;
-
- // Computes an estimate of the size of the buffer, including all the retained
- // objects.
- virtual void EstimateTraceMemoryOverhead(
- TraceEventMemoryOverhead* overhead) = 0;
-};
-
-// TraceResultBuffer collects and converts trace fragments returned by TraceLog
-// to JSON output.
-class BASE_EXPORT TraceResultBuffer {
- public:
- typedef base::Callback<void(const std::string&)> OutputCallback;
-
- // If you don't need to stream JSON chunks out efficiently, and just want to
- // get a complete JSON string after calling Finish, use this struct to collect
- // JSON trace output.
- struct BASE_EXPORT SimpleOutput {
- OutputCallback GetCallback();
- void Append(const std::string& json_string);
-
- // Do what you want with the json_output_ string after calling
- // TraceResultBuffer::Finish.
- std::string json_output;
- };
-
- TraceResultBuffer();
- ~TraceResultBuffer();
-
- // Set callback. The callback will be called during Start with the initial
- // JSON output and during AddFragment and Finish with following JSON output
- // chunks. The callback target must live past the last calls to
- // TraceResultBuffer::Start/AddFragment/Finish.
- void SetOutputCallback(const OutputCallback& json_chunk_callback);
-
- // Start JSON output. This resets all internal state, so you can reuse
- // the TraceResultBuffer by calling Start.
- void Start();
-
- // Call AddFragment 0 or more times to add trace fragments from TraceLog.
- void AddFragment(const std::string& trace_fragment);
-
- // When all fragments have been added, call Finish to complete the JSON
- // formatted output.
- void Finish();
-
- private:
- OutputCallback output_callback_;
- bool append_comma_;
-};
-
-class TraceSamplingThread;
-
-struct BASE_EXPORT TraceLogStatus {
- TraceLogStatus();
- ~TraceLogStatus();
- size_t event_capacity;
- size_t event_count;
-};
-
-class BASE_EXPORT TraceLog : public MemoryDumpProvider {
- public:
- enum Mode {
- DISABLED = 0,
- RECORDING_MODE,
- MONITORING_MODE,
- };
-
- // The pointer returned from GetCategoryGroupEnabledInternal() points to a
- // value with zero or more of the following bits. Used in this class only.
- // The TRACE_EVENT macros should only use the value as a bool.
- // These values must be in sync with macro values in TraceEvent.h in Blink.
- enum CategoryGroupEnabledFlags {
- // Category group enabled for the recording mode.
- ENABLED_FOR_RECORDING = 1 << 0,
- // Category group enabled for the monitoring mode.
- ENABLED_FOR_MONITORING = 1 << 1,
- // Category group enabled by SetEventCallbackEnabled().
- ENABLED_FOR_EVENT_CALLBACK = 1 << 2,
- // Category group enabled to export events to ETW.
- ENABLED_FOR_ETW_EXPORT = 1 << 3
- };
-
- static TraceLog* GetInstance();
-
- // Get set of known category groups. This can change as new code paths are
- // reached. The known category groups are inserted into |category_groups|.
- void GetKnownCategoryGroups(std::vector<std::string>* category_groups);
-
- // Retrieves a copy (for thread-safety) of the current TraceConfig.
- TraceConfig GetCurrentTraceConfig() const;
-
- // Initializes the thread-local event buffer, if not already initialized and
- // if the current thread supports that (has a message loop).
- void InitializeThreadLocalEventBufferIfSupported();
-
- // Enables normal tracing (recording trace events in the trace buffer).
- // See TraceConfig comments for details on how to control what categories
- // will be traced. If tracing has already been enabled, |category_filter| will
- // be merged into the current category filter.
- void SetEnabled(const TraceConfig& trace_config, Mode mode);
-
- // Disables normal tracing for all categories.
- void SetDisabled();
-
- bool IsEnabled() { return mode_ != DISABLED; }
-
- // The number of times we have begun recording traces. If tracing is off,
- // returns -1. If tracing is on, then it returns the number of times we have
- // recorded a trace. By watching for this number to increment, you can
- // passively discover when a new trace has begun. This is then used to
- // implement the TRACE_EVENT_IS_NEW_TRACE() primitive.
- int GetNumTracesRecorded();
-
-#if defined(OS_ANDROID)
- void StartATrace();
- void StopATrace();
- void AddClockSyncMetadataEvent();
-#endif
-
- // Enabled state listeners give a callback when tracing is enabled or
- // disabled. This can be used to tie into other library's tracing systems
- // on-demand.
- class BASE_EXPORT EnabledStateObserver {
- public:
- virtual ~EnabledStateObserver() = default;
-
- // Called just after the tracing system becomes enabled, outside of the
- // |lock_|. TraceLog::IsEnabled() is true at this point.
- virtual void OnTraceLogEnabled() = 0;
-
- // Called just after the tracing system disables, outside of the |lock_|.
- // TraceLog::IsEnabled() is false at this point.
- virtual void OnTraceLogDisabled() = 0;
- };
- void AddEnabledStateObserver(EnabledStateObserver* listener);
- void RemoveEnabledStateObserver(EnabledStateObserver* listener);
- bool HasEnabledStateObserver(EnabledStateObserver* listener) const;
-
- TraceLogStatus GetStatus() const;
- bool BufferIsFull() const;
-
- // Computes an estimate of the size of the TraceLog including all the retained
- // objects.
- void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead);
-
- // Not using base::Callback because of its limited by 7 parameters.
- // Also, using primitive type allows directly passing callback from WebCore.
- // WARNING: It is possible for the previously set callback to be called
- // after a call to SetEventCallbackEnabled() that replaces or a call to
- // SetEventCallbackDisabled() that disables the callback.
- // This callback may be invoked on any thread.
- // For TRACE_EVENT_PHASE_COMPLETE events, the client will still receive pairs
- // of TRACE_EVENT_PHASE_BEGIN and TRACE_EVENT_PHASE_END events to keep the
- // interface simple.
- typedef void (*EventCallback)(TraceTicks timestamp,
- char phase,
- const unsigned char* category_group_enabled,
- const char* name,
- unsigned long long id,
- int num_args,
- const char* const arg_names[],
- const unsigned char arg_types[],
- const unsigned long long arg_values[],
- unsigned int flags);
-
- // Enable tracing for EventCallback.
- void SetEventCallbackEnabled(const TraceConfig& trace_config,
- EventCallback cb);
- void SetEventCallbackDisabled();
- void SetArgumentFilterPredicate(
- const TraceEvent::ArgumentFilterPredicate& argument_filter_predicate);
-
- // Flush all collected events to the given output callback. The callback will
- // be called one or more times either synchronously or asynchronously from
- // the current thread with IPC-bite-size chunks. The string format is
- // undefined. Use TraceResultBuffer to convert one or more trace strings to
- // JSON. The callback can be null if the caller doesn't want any data.
- // Due to the implementation of thread-local buffers, flush can't be
- // done when tracing is enabled. If called when tracing is enabled, the
- // callback will be called directly with (empty_string, false) to indicate
- // the end of this unsuccessful flush. Flush does the serialization
- // on the same thread if the caller doesn't set use_worker_thread explicitly.
- typedef base::Callback<void(const scoped_refptr<base::RefCountedString>&,
- bool has_more_events)> OutputCallback;
- void Flush(const OutputCallback& cb, bool use_worker_thread = false);
- void FlushButLeaveBufferIntact(const OutputCallback& flush_output_callback);
-
- // Cancels tracing and discards collected data.
- void CancelTracing(const OutputCallback& cb);
-
- // Called by TRACE_EVENT* macros, don't call this directly.
- // The name parameter is a category group for example:
- // TRACE_EVENT0("renderer,webkit", "WebViewImpl::HandleInputEvent")
- static const unsigned char* GetCategoryGroupEnabled(const char* name);
- static const char* GetCategoryGroupName(
- const unsigned char* category_group_enabled);
-
- // Called by TRACE_EVENT* macros, don't call this directly.
- // If |copy| is set, |name|, |arg_name1| and |arg_name2| will be deep copied
- // into the event; see "Memory scoping note" and TRACE_EVENT_COPY_XXX above.
- TraceEventHandle AddTraceEvent(
- char phase,
- const unsigned char* category_group_enabled,
- const char* name,
- unsigned long long id,
- int num_args,
- const char** arg_names,
- const unsigned char* arg_types,
- const unsigned long long* arg_values,
- const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
- unsigned int flags);
- TraceEventHandle AddTraceEventWithContextId(
- char phase,
- const unsigned char* category_group_enabled,
- const char* name,
- unsigned long long id,
- unsigned long long context_id,
- int num_args,
- const char** arg_names,
- const unsigned char* arg_types,
- const unsigned long long* arg_values,
- const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
- unsigned int flags);
- TraceEventHandle AddTraceEventWithThreadIdAndTimestamp(
- char phase,
- const unsigned char* category_group_enabled,
- const char* name,
- unsigned long long id,
- unsigned long long context_id,
- int thread_id,
- const TraceTicks& timestamp,
- int num_args,
- const char** arg_names,
- const unsigned char* arg_types,
- const unsigned long long* arg_values,
- const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
- unsigned int flags);
- static void AddTraceEventEtw(char phase,
- const char* category_group,
- const void* id,
- const char* extra);
- static void AddTraceEventEtw(char phase,
- const char* category_group,
- const void* id,
- const std::string& extra);
-
- void UpdateTraceEventDuration(const unsigned char* category_group_enabled,
- const char* name,
- TraceEventHandle handle);
-
- // For every matching event, the callback will be called.
- typedef base::Callback<void()> WatchEventCallback;
- void SetWatchEvent(const std::string& category_name,
- const std::string& event_name,
- const WatchEventCallback& callback);
- // Cancel the watch event. If tracing is enabled, this may race with the
- // watch event notification firing.
- void CancelWatchEvent();
-
- int process_id() const { return process_id_; }
-
- uint64 MangleEventId(uint64 id);
-
- // Exposed for unittesting:
-
- void WaitSamplingEventForTesting();
-
- // Allows deleting our singleton instance.
- static void DeleteForTesting();
-
- // Allow tests to inspect TraceEvents.
- TraceEvent* GetEventByHandle(TraceEventHandle handle);
-
- void SetProcessID(int process_id);
-
- // Process sort indices, if set, override the order of a process will appear
- // relative to other processes in the trace viewer. Processes are sorted first
- // on their sort index, ascending, then by their name, and then tid.
- void SetProcessSortIndex(int sort_index);
-
- // Sets the name of the process.
- void SetProcessName(const std::string& process_name);
-
- // Processes can have labels in addition to their names. Use labels, for
- // instance, to list out the web page titles that a process is handling.
- void UpdateProcessLabel(int label_id, const std::string& current_label);
- void RemoveProcessLabel(int label_id);
-
- // Thread sort indices, if set, override the order of a thread will appear
- // within its process in the trace viewer. Threads are sorted first on their
- // sort index, ascending, then by their name, and then tid.
- void SetThreadSortIndex(PlatformThreadId , int sort_index);
-
- // Allow setting an offset between the current TraceTicks time and the time
- // that should be reported.
- void SetTimeOffset(TimeDelta offset);
-
- size_t GetObserverCountForTest() const;
-
- // Call this method if the current thread may block the message loop to
- // prevent the thread from using the thread-local buffer because the thread
- // may not handle the flush request in time causing lost of unflushed events.
- void SetCurrentThreadBlocksMessageLoop();
-
- private:
- typedef unsigned int InternalTraceOptions;
-
- FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
- TraceBufferRingBufferGetReturnChunk);
- FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
- TraceBufferRingBufferHalfIteration);
- FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
- TraceBufferRingBufferFullIteration);
- FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
- TraceBufferVectorReportFull);
- FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
- ConvertTraceConfigToInternalOptions);
- FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
- TraceRecordAsMuchAsPossibleMode);
-
- // This allows constructor and destructor to be private and usable only
- // by the Singleton class.
- friend struct DefaultSingletonTraits<TraceLog>;
-
- // MemoryDumpProvider implementation.
- bool OnMemoryDump(ProcessMemoryDump* pmd) override;
-
- // Enable/disable each category group based on the current mode_,
- // category_filter_, event_callback_ and event_callback_category_filter_.
- // Enable the category group in the enabled mode if category_filter_ matches
- // the category group, or event_callback_ is not null and
- // event_callback_category_filter_ matches the category group.
- void UpdateCategoryGroupEnabledFlags();
- void UpdateCategoryGroupEnabledFlag(size_t category_index);
-
- // Configure synthetic delays based on the values set in the current
- // trace config.
- void UpdateSyntheticDelaysFromTraceConfig();
-
- InternalTraceOptions GetInternalOptionsFromTraceConfig(
- const TraceConfig& config);
-
- class ThreadLocalEventBuffer;
- class OptionalAutoLock;
-
- TraceLog();
- ~TraceLog() override;
- const unsigned char* GetCategoryGroupEnabledInternal(const char* name);
- void AddMetadataEventsWhileLocked();
-
- InternalTraceOptions trace_options() const {
- return static_cast<InternalTraceOptions>(
- subtle::NoBarrier_Load(&trace_options_));
- }
-
- TraceBuffer* trace_buffer() const { return logged_events_.get(); }
- TraceBuffer* CreateTraceBuffer();
- TraceBuffer* CreateTraceBufferVectorOfSize(size_t max_chunks);
-
- std::string EventToConsoleMessage(unsigned char phase,
- const TraceTicks& timestamp,
- TraceEvent* trace_event);
-
- TraceEvent* AddEventToThreadSharedChunkWhileLocked(TraceEventHandle* handle,
- bool check_buffer_is_full);
- void CheckIfBufferIsFullWhileLocked();
- void SetDisabledWhileLocked();
-
- TraceEvent* GetEventByHandleInternal(TraceEventHandle handle,
- OptionalAutoLock* lock);
-
- void FlushInternal(const OutputCallback& cb,
- bool use_worker_thread,
- bool discard_events);
-
- // |generation| is used in the following callbacks to check if the callback
- // is called for the flush of the current |logged_events_|.
- void FlushCurrentThread(int generation, bool discard_events);
- // Usually it runs on a different thread.
- static void ConvertTraceEventsToTraceFormat(
- scoped_ptr<TraceBuffer> logged_events,
- const TraceLog::OutputCallback& flush_output_callback,
- const TraceEvent::ArgumentFilterPredicate& argument_filter_predicate);
- void FinishFlush(int generation, bool discard_events);
- void OnFlushTimeout(int generation, bool discard_events);
-
- int generation() const {
- return static_cast<int>(subtle::NoBarrier_Load(&generation_));
- }
- bool CheckGeneration(int generation) const {
- return generation == this->generation();
- }
- void UseNextTraceBuffer();
-
- TraceTicks OffsetNow() const {
- return OffsetTimestamp(TraceTicks::Now());
- }
- TraceTicks OffsetTimestamp(const TraceTicks& timestamp) const {
- return timestamp - time_offset_;
- }
-
- // Internal representation of trace options since we store the currently used
- // trace option as an AtomicWord.
- static const InternalTraceOptions kInternalNone;
- static const InternalTraceOptions kInternalRecordUntilFull;
- static const InternalTraceOptions kInternalRecordContinuously;
- static const InternalTraceOptions kInternalEchoToConsole;
- static const InternalTraceOptions kInternalEnableSampling;
- static const InternalTraceOptions kInternalRecordAsMuchAsPossible;
- static const InternalTraceOptions kInternalEnableArgumentFilter;
-
- // This lock protects TraceLog member accesses (except for members protected
- // by thread_info_lock_) from arbitrary threads.
- mutable Lock lock_;
- // This lock protects accesses to thread_names_, thread_event_start_times_
- // and thread_colors_.
- Lock thread_info_lock_;
- Mode mode_;
- int num_traces_recorded_;
- scoped_ptr<TraceBuffer> logged_events_;
- subtle::AtomicWord /* EventCallback */ event_callback_;
- bool dispatching_to_observer_list_;
- std::vector<EnabledStateObserver*> enabled_state_observer_list_;
-
- std::string process_name_;
- base::hash_map<int, std::string> process_labels_;
- int process_sort_index_;
- base::hash_map<int, int> thread_sort_indices_;
- base::hash_map<int, std::string> thread_names_;
-
- // The following two maps are used only when ECHO_TO_CONSOLE.
- base::hash_map<int, std::stack<TraceTicks> > thread_event_start_times_;
- base::hash_map<std::string, int> thread_colors_;
-
- TraceTicks buffer_limit_reached_timestamp_;
-
- // XORed with TraceID to make it unlikely to collide with other processes.
- unsigned long long process_id_hash_;
-
- int process_id_;
-
- TimeDelta time_offset_;
-
- // Allow tests to wake up when certain events occur.
- WatchEventCallback watch_event_callback_;
- subtle::AtomicWord /* const unsigned char* */ watch_category_;
- std::string watch_event_name_;
-
- subtle::AtomicWord /* Options */ trace_options_;
-
- // Sampling thread handles.
- scoped_ptr<TraceSamplingThread> sampling_thread_;
- PlatformThreadHandle sampling_thread_handle_;
-
- TraceConfig trace_config_;
- TraceConfig event_callback_trace_config_;
-
- ThreadLocalPointer<ThreadLocalEventBuffer> thread_local_event_buffer_;
- ThreadLocalBoolean thread_blocks_message_loop_;
- ThreadLocalBoolean thread_is_in_trace_event_;
-
- // Contains the message loops of threads that have had at least one event
- // added into the local event buffer. Not using SingleThreadTaskRunner
- // because we need to know the life time of the message loops.
- hash_set<MessageLoop*> thread_message_loops_;
-
- // For events which can't be added into the thread local buffer, e.g. events
- // from threads without a message loop.
- scoped_ptr<TraceBufferChunk> thread_shared_chunk_;
- size_t thread_shared_chunk_index_;
-
- // Set when asynchronous Flush is in progress.
- OutputCallback flush_output_callback_;
- scoped_refptr<SingleThreadTaskRunner> flush_task_runner_;
- TraceEvent::ArgumentFilterPredicate argument_filter_predicate_;
- subtle::AtomicWord generation_;
- bool use_worker_thread_;
-
- DISALLOW_COPY_AND_ASSIGN(TraceLog);
-};
-
} // namespace trace_event
} // namespace base
diff --git a/base/trace_event/trace_event_memory.h b/base/trace_event/trace_event_memory.h
index 5b63db0..2e8fb5d 100644
--- a/base/trace_event/trace_event_memory.h
+++ b/base/trace_event/trace_event_memory.h
@@ -10,7 +10,7 @@
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
#include "base/timer/timer.h"
-#include "base/trace_event/trace_event_impl.h"
+#include "base/trace_event/trace_log.h"
// TODO(jamescook): Windows support for memory tracing.
#if !defined(NO_TCMALLOC) && !defined(OS_NACL) && \
diff --git a/base/trace_event/trace_event_system_stats_monitor.h b/base/trace_event/trace_event_system_stats_monitor.h
index 7a63a14..f80968b 100644
--- a/base/trace_event/trace_event_system_stats_monitor.h
+++ b/base/trace_event/trace_event_system_stats_monitor.h
@@ -11,7 +11,7 @@
#include "base/memory/weak_ptr.h"
#include "base/process/process_metrics.h"
#include "base/timer/timer.h"
-#include "base/trace_event/trace_event_impl.h"
+#include "base/trace_event/trace_log.h"
namespace base {
diff --git a/base/trace_event/trace_event_unittest.cc b/base/trace_event/trace_event_unittest.cc
index bf389d6..7420fc5 100644
--- a/base/trace_event/trace_event_unittest.cc
+++ b/base/trace_event/trace_event_unittest.cc
@@ -21,6 +21,7 @@
#include "base/threading/platform_thread.h"
#include "base/threading/thread.h"
#include "base/time/time.h"
+#include "base/trace_event/trace_buffer.h"
#include "base/trace_event/trace_event.h"
#include "base/trace_event/trace_event_synthetic_delay.h"
#include "base/values.h"
@@ -2514,7 +2515,7 @@ TEST_F(TraceEventTestFixture, TraceBufferVectorReportFull) {
trace_log->SetEnabled(
TraceConfig(kRecordAllCategoryFilter, ""), TraceLog::RECORDING_MODE);
trace_log->logged_events_.reset(
- trace_log->CreateTraceBufferVectorOfSize(100));
+ TraceBuffer::CreateTraceBufferVectorOfSize(100));
do {
TRACE_EVENT_BEGIN_WITH_ID_TID_AND_TIMESTAMP0(
"all", "with_timestamp", 0, 0, TraceTicks::Now().ToInternalValue());
diff --git a/base/trace_event/trace_log.cc b/base/trace_event/trace_log.cc
new file mode 100644
index 0000000..e8cceb5
--- /dev/null
+++ b/base/trace_event/trace_log.cc
@@ -0,0 +1,1642 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_log.h"
+
+#include <algorithm>
+#include <cmath>
+
+#include "base/base_switches.h"
+#include "base/bind.h"
+#include "base/command_line.h"
+#include "base/debug/leak_annotations.h"
+#include "base/lazy_instance.h"
+#include "base/location.h"
+#include "base/memory/singleton.h"
+#include "base/process/process_metrics.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_tokenizer.h"
+#include "base/strings/stringprintf.h"
+#include "base/sys_info.h"
+#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
+#include "base/thread_task_runner_handle.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread_id_name_manager.h"
+#include "base/threading/worker_pool.h"
+#include "base/time/time.h"
+#include "base/trace_event/memory_dump_manager.h"
+#include "base/trace_event/memory_dump_provider.h"
+#include "base/trace_event/process_memory_dump.h"
+#include "base/trace_event/trace_buffer.h"
+#include "base/trace_event/trace_event.h"
+#include "base/trace_event/trace_event_synthetic_delay.h"
+#include "base/trace_event/trace_log.h"
+#include "base/trace_event/trace_sampling_thread.h"
+
+#if defined(OS_WIN)
+#include "base/trace_event/trace_event_etw_export_win.h"
+#include "base/trace_event/trace_event_win.h"
+#endif
+
+class DeleteTraceLogForTesting {
+ public:
+ static void Delete() {
+ Singleton<base::trace_event::TraceLog,
+ LeakySingletonTraits<base::trace_event::TraceLog>>::OnExit(0);
+ }
+};
+
+// The thread buckets for the sampling profiler.
+BASE_EXPORT TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+// Controls the number of trace events we will buffer in-memory
+// before throwing them away.
+const size_t kTraceBufferChunkSize = TraceBufferChunk::kTraceBufferChunkSize;
+const size_t kTraceEventVectorBigBufferChunks =
+ 512000000 / kTraceBufferChunkSize;
+const size_t kTraceEventVectorBufferChunks = 256000 / kTraceBufferChunkSize;
+const size_t kTraceEventRingBufferChunks = kTraceEventVectorBufferChunks / 4;
+// Can store results for 30 seconds with 1 ms sampling interval.
+const size_t kMonitorTraceEventBufferChunks = 30000 / kTraceBufferChunkSize;
+// ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events.
+const size_t kEchoToConsoleTraceEventBufferChunks = 256;
+
+// The overhead of TraceEvent above this threshold will be reported in the
+// trace.
+const int kOverheadReportThresholdInMicroseconds = 50;
+const size_t kTraceEventBufferSizeInBytes = 100 * 1024;
+const int kThreadFlushTimeoutMs = 3000;
+
+#if !defined(OS_NACL)
+// These categories will cause deadlock when ECHO_TO_CONSOLE. crbug.com/325575.
+const char kEchoToConsoleCategoryFilter[] = "-ipc,-task";
+#endif
+
+#define MAX_CATEGORY_GROUPS 100
+
+// Parallel arrays g_category_groups and g_category_group_enabled are separate
+// so that a pointer to a member of g_category_group_enabled can be easily
+// converted to an index into g_category_groups. This allows macros to deal
+// only with char enabled pointers from g_category_group_enabled, and we can
+// convert internally to determine the category name from the char enabled
+// pointer.
+const char* g_category_groups[MAX_CATEGORY_GROUPS] = {
+ "toplevel",
+ "tracing already shutdown",
+ "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS",
+ "__metadata",
+ // For reporting trace_event overhead. For thread local event buffers only.
+ "trace_event_overhead"};
+
+// The enabled flag is char instead of bool so that the API can be used from C.
+unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = {0};
+// Indexes here have to match the g_category_groups array indexes above.
+const int g_category_already_shutdown = 1;
+const int g_category_categories_exhausted = 2;
+const int g_category_metadata = 3;
+const int g_category_trace_event_overhead = 4;
+const int g_num_builtin_categories = 5;
+// Skip default categories.
+base::subtle::AtomicWord g_category_index = g_num_builtin_categories;
+
+// The name of the current thread. This is used to decide if the current
+// thread name has changed. We combine all the seen thread names into the
+// output name for the thread.
+LazyInstance<ThreadLocalPointer<const char>>::Leaky g_current_thread_name =
+ LAZY_INSTANCE_INITIALIZER;
+
+ThreadTicks ThreadNow() {
+ return ThreadTicks::IsSupported() ? ThreadTicks::Now() : ThreadTicks();
+}
+
+template <typename T>
+void InitializeMetadataEvent(TraceEvent* trace_event,
+ int thread_id,
+ const char* metadata_name,
+ const char* arg_name,
+ const T& value) {
+ if (!trace_event)
+ return;
+
+ int num_args = 1;
+ unsigned char arg_type;
+ unsigned long long arg_value;
+ ::trace_event_internal::SetTraceValue(value, &arg_type, &arg_value);
+ trace_event->Initialize(
+ thread_id, TraceTicks(), ThreadTicks(), TRACE_EVENT_PHASE_METADATA,
+ &g_category_group_enabled[g_category_metadata], metadata_name,
+ trace_event_internal::kNoId, trace_event_internal::kNoId, num_args,
+ &arg_name, &arg_type, &arg_value, NULL, TRACE_EVENT_FLAG_NONE);
+}
+
+class AutoThreadLocalBoolean {
+ public:
+ explicit AutoThreadLocalBoolean(ThreadLocalBoolean* thread_local_boolean)
+ : thread_local_boolean_(thread_local_boolean) {
+ DCHECK(!thread_local_boolean_->Get());
+ thread_local_boolean_->Set(true);
+ }
+ ~AutoThreadLocalBoolean() { thread_local_boolean_->Set(false); }
+
+ private:
+ ThreadLocalBoolean* thread_local_boolean_;
+ DISALLOW_COPY_AND_ASSIGN(AutoThreadLocalBoolean);
+};
+
+// Use this function instead of TraceEventHandle constructor to keep the
+// overhead of ScopedTracer (trace_event.h) constructor minimum.
+void MakeHandle(uint32 chunk_seq,
+ size_t chunk_index,
+ size_t event_index,
+ TraceEventHandle* handle) {
+ DCHECK(chunk_seq);
+ DCHECK(chunk_index < (1u << 16));
+ DCHECK(event_index < (1u << 16));
+ handle->chunk_seq = chunk_seq;
+ handle->chunk_index = static_cast<uint16>(chunk_index);
+ handle->event_index = static_cast<uint16>(event_index);
+}
+
+} // namespace
+
+// A helper class that allows the lock to be acquired in the middle of the scope
+// and unlocks at the end of scope if locked.
+class TraceLog::OptionalAutoLock {
+ public:
+ explicit OptionalAutoLock(Lock* lock) : lock_(lock), locked_(false) {}
+
+ ~OptionalAutoLock() {
+ if (locked_)
+ lock_->Release();
+ }
+
+ void EnsureAcquired() {
+ if (!locked_) {
+ lock_->Acquire();
+ locked_ = true;
+ }
+ }
+
+ private:
+ Lock* lock_;
+ bool locked_;
+ DISALLOW_COPY_AND_ASSIGN(OptionalAutoLock);
+};
+
+class TraceLog::ThreadLocalEventBuffer
+ : public MessageLoop::DestructionObserver,
+ public MemoryDumpProvider {
+ public:
+ ThreadLocalEventBuffer(TraceLog* trace_log);
+ ~ThreadLocalEventBuffer() override;
+
+ TraceEvent* AddTraceEvent(TraceEventHandle* handle);
+
+ void ReportOverhead(const TraceTicks& event_timestamp,
+ const ThreadTicks& event_thread_timestamp);
+
+ TraceEvent* GetEventByHandle(TraceEventHandle handle) {
+ if (!chunk_ || handle.chunk_seq != chunk_->seq() ||
+ handle.chunk_index != chunk_index_)
+ return NULL;
+
+ return chunk_->GetEventAt(handle.event_index);
+ }
+
+ int generation() const { return generation_; }
+
+ private:
+ // MessageLoop::DestructionObserver
+ void WillDestroyCurrentMessageLoop() override;
+
+ // MemoryDumpProvider implementation.
+ bool OnMemoryDump(ProcessMemoryDump* pmd) override;
+
+ void FlushWhileLocked();
+
+ void CheckThisIsCurrentBuffer() const {
+ DCHECK(trace_log_->thread_local_event_buffer_.Get() == this);
+ }
+
+ // Since TraceLog is a leaky singleton, trace_log_ will always be valid
+ // as long as the thread exists.
+ TraceLog* trace_log_;
+ scoped_ptr<TraceBufferChunk> chunk_;
+ size_t chunk_index_;
+ int event_count_;
+ TimeDelta overhead_;
+ int generation_;
+
+ DISALLOW_COPY_AND_ASSIGN(ThreadLocalEventBuffer);
+};
+
+TraceLog::ThreadLocalEventBuffer::ThreadLocalEventBuffer(TraceLog* trace_log)
+ : trace_log_(trace_log),
+ chunk_index_(0),
+ event_count_(0),
+ generation_(trace_log->generation()) {
+ // ThreadLocalEventBuffer is created only if the thread has a message loop, so
+ // the following message_loop won't be NULL.
+ MessageLoop* message_loop = MessageLoop::current();
+ message_loop->AddDestructionObserver(this);
+
+ // This is to report the local memory usage when memory-infra is enabled.
+ MemoryDumpManager::GetInstance()->RegisterDumpProvider(
+ this, ThreadTaskRunnerHandle::Get());
+
+ AutoLock lock(trace_log->lock_);
+ trace_log->thread_message_loops_.insert(message_loop);
+}
+
+TraceLog::ThreadLocalEventBuffer::~ThreadLocalEventBuffer() {
+ CheckThisIsCurrentBuffer();
+ MessageLoop::current()->RemoveDestructionObserver(this);
+ MemoryDumpManager::GetInstance()->UnregisterDumpProvider(this);
+
+ // Zero event_count_ happens in either of the following cases:
+ // - no event generated for the thread;
+ // - the thread has no message loop;
+ // - trace_event_overhead is disabled.
+ if (event_count_) {
+ InitializeMetadataEvent(AddTraceEvent(NULL),
+ static_cast<int>(base::PlatformThread::CurrentId()),
+ "overhead", "average_overhead",
+ overhead_.InMillisecondsF() / event_count_);
+ }
+
+ {
+ AutoLock lock(trace_log_->lock_);
+ FlushWhileLocked();
+ trace_log_->thread_message_loops_.erase(MessageLoop::current());
+ }
+ trace_log_->thread_local_event_buffer_.Set(NULL);
+}
+
+TraceEvent* TraceLog::ThreadLocalEventBuffer::AddTraceEvent(
+ TraceEventHandle* handle) {
+ CheckThisIsCurrentBuffer();
+
+ if (chunk_ && chunk_->IsFull()) {
+ AutoLock lock(trace_log_->lock_);
+ FlushWhileLocked();
+ chunk_.reset();
+ }
+ if (!chunk_) {
+ AutoLock lock(trace_log_->lock_);
+ chunk_ = trace_log_->logged_events_->GetChunk(&chunk_index_);
+ trace_log_->CheckIfBufferIsFullWhileLocked();
+ }
+ if (!chunk_)
+ return NULL;
+
+ size_t event_index;
+ TraceEvent* trace_event = chunk_->AddTraceEvent(&event_index);
+ if (trace_event && handle)
+ MakeHandle(chunk_->seq(), chunk_index_, event_index, handle);
+
+ return trace_event;
+}
+
+void TraceLog::ThreadLocalEventBuffer::ReportOverhead(
+ const TraceTicks& event_timestamp,
+ const ThreadTicks& event_thread_timestamp) {
+ if (!g_category_group_enabled[g_category_trace_event_overhead])
+ return;
+
+ CheckThisIsCurrentBuffer();
+
+ event_count_++;
+ ThreadTicks thread_now = ThreadNow();
+ TraceTicks now = trace_log_->OffsetNow();
+ TimeDelta overhead = now - event_timestamp;
+ if (overhead.InMicroseconds() >= kOverheadReportThresholdInMicroseconds) {
+ TraceEvent* trace_event = AddTraceEvent(NULL);
+ if (trace_event) {
+ trace_event->Initialize(
+ static_cast<int>(PlatformThread::CurrentId()), event_timestamp,
+ event_thread_timestamp, TRACE_EVENT_PHASE_COMPLETE,
+ &g_category_group_enabled[g_category_trace_event_overhead],
+ "overhead", ::trace_event_internal::kNoId,
+ ::trace_event_internal::kNoId, ::trace_event_internal::kZeroNumArgs,
+ NULL, NULL, NULL, NULL, 0);
+ trace_event->UpdateDuration(now, thread_now);
+ }
+ }
+ overhead_ += overhead;
+}
+
+void TraceLog::ThreadLocalEventBuffer::WillDestroyCurrentMessageLoop() {
+ delete this;
+}
+
+bool TraceLog::ThreadLocalEventBuffer::OnMemoryDump(ProcessMemoryDump* pmd) {
+ if (!chunk_)
+ return true;
+ std::string dump_base_name = StringPrintf(
+ "tracing/thread_%d", static_cast<int>(PlatformThread::CurrentId()));
+ TraceEventMemoryOverhead overhead;
+ chunk_->EstimateTraceMemoryOverhead(&overhead);
+ overhead.DumpInto(dump_base_name.c_str(), pmd);
+ return true;
+}
+
+void TraceLog::ThreadLocalEventBuffer::FlushWhileLocked() {
+ if (!chunk_)
+ return;
+
+ trace_log_->lock_.AssertAcquired();
+ if (trace_log_->CheckGeneration(generation_)) {
+ // Return the chunk to the buffer only if the generation matches.
+ trace_log_->logged_events_->ReturnChunk(chunk_index_, chunk_.Pass());
+ }
+ // Otherwise this method may be called from the destructor, or TraceLog will
+ // find the generation mismatch and delete this buffer soon.
+}
+
+TraceLogStatus::TraceLogStatus() : event_capacity(0), event_count(0) {}
+
+TraceLogStatus::~TraceLogStatus() {}
+
+// static
+TraceLog* TraceLog::GetInstance() {
+ return Singleton<TraceLog, LeakySingletonTraits<TraceLog>>::get();
+}
+
+TraceLog::TraceLog()
+ : mode_(DISABLED),
+ num_traces_recorded_(0),
+ event_callback_(0),
+ dispatching_to_observer_list_(false),
+ process_sort_index_(0),
+ process_id_hash_(0),
+ process_id_(0),
+ watch_category_(0),
+ trace_options_(kInternalRecordUntilFull),
+ sampling_thread_handle_(0),
+ trace_config_(TraceConfig()),
+ event_callback_trace_config_(TraceConfig()),
+ thread_shared_chunk_index_(0),
+ generation_(0),
+ use_worker_thread_(false) {
+ // Trace is enabled or disabled on one thread while other threads are
+ // accessing the enabled flag. We don't care whether edge-case events are
+ // traced or not, so we allow races on the enabled flag to keep the trace
+ // macros fast.
+ // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots:
+ // ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled,
+ // sizeof(g_category_group_enabled),
+ // "trace_event category enabled");
+ for (int i = 0; i < MAX_CATEGORY_GROUPS; ++i) {
+ ANNOTATE_BENIGN_RACE(&g_category_group_enabled[i],
+ "trace_event category enabled");
+ }
+#if defined(OS_NACL) // NaCl shouldn't expose the process id.
+ SetProcessID(0);
+#else
+ SetProcessID(static_cast<int>(GetCurrentProcId()));
+
+ // NaCl also shouldn't access the command line.
+ if (CommandLine::InitializedForCurrentProcess() &&
+ CommandLine::ForCurrentProcess()->HasSwitch(switches::kTraceToConsole)) {
+ std::string filter = CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
+ switches::kTraceToConsole);
+ if (filter.empty()) {
+ filter = kEchoToConsoleCategoryFilter;
+ } else {
+ filter.append(",");
+ filter.append(kEchoToConsoleCategoryFilter);
+ }
+
+ LOG(ERROR) << "Start " << switches::kTraceToConsole
+ << " with CategoryFilter '" << filter << "'.";
+ SetEnabled(TraceConfig(filter, ECHO_TO_CONSOLE), RECORDING_MODE);
+ }
+#endif
+
+ logged_events_.reset(CreateTraceBuffer());
+
+ MemoryDumpManager::GetInstance()->RegisterDumpProvider(this);
+}
+
+TraceLog::~TraceLog() {}
+
+void TraceLog::InitializeThreadLocalEventBufferIfSupported() {
+ // A ThreadLocalEventBuffer needs the message loop
+ // - to know when the thread exits;
+ // - to handle the final flush.
+ // For a thread without a message loop or the message loop may be blocked, the
+ // trace events will be added into the main buffer directly.
+ if (thread_blocks_message_loop_.Get() || !MessageLoop::current())
+ return;
+ auto thread_local_event_buffer = thread_local_event_buffer_.Get();
+ if (thread_local_event_buffer &&
+ !CheckGeneration(thread_local_event_buffer->generation())) {
+ delete thread_local_event_buffer;
+ thread_local_event_buffer = NULL;
+ }
+ if (!thread_local_event_buffer) {
+ thread_local_event_buffer = new ThreadLocalEventBuffer(this);
+ thread_local_event_buffer_.Set(thread_local_event_buffer);
+ }
+}
+
+bool TraceLog::OnMemoryDump(ProcessMemoryDump* pmd) {
+ TraceEventMemoryOverhead overhead;
+ overhead.Add("TraceLog", sizeof(*this));
+ {
+ AutoLock lock(lock_);
+ if (logged_events_)
+ logged_events_->EstimateTraceMemoryOverhead(&overhead);
+ }
+ overhead.AddSelf();
+ overhead.DumpInto("tracing/main_trace_log", pmd);
+ return true;
+}
+
+const unsigned char* TraceLog::GetCategoryGroupEnabled(
+ const char* category_group) {
+ TraceLog* tracelog = GetInstance();
+ if (!tracelog) {
+ DCHECK(!g_category_group_enabled[g_category_already_shutdown]);
+ return &g_category_group_enabled[g_category_already_shutdown];
+ }
+ return tracelog->GetCategoryGroupEnabledInternal(category_group);
+}
+
+const char* TraceLog::GetCategoryGroupName(
+ const unsigned char* category_group_enabled) {
+ // Calculate the index of the category group by finding
+ // category_group_enabled in g_category_group_enabled array.
+ uintptr_t category_begin =
+ reinterpret_cast<uintptr_t>(g_category_group_enabled);
+ uintptr_t category_ptr = reinterpret_cast<uintptr_t>(category_group_enabled);
+ DCHECK(category_ptr >= category_begin &&
+ category_ptr < reinterpret_cast<uintptr_t>(g_category_group_enabled +
+ MAX_CATEGORY_GROUPS))
+ << "out of bounds category pointer";
+ uintptr_t category_index =
+ (category_ptr - category_begin) / sizeof(g_category_group_enabled[0]);
+ return g_category_groups[category_index];
+}
+
+void TraceLog::UpdateCategoryGroupEnabledFlag(size_t category_index) {
+ unsigned char enabled_flag = 0;
+ const char* category_group = g_category_groups[category_index];
+ if (mode_ == RECORDING_MODE &&
+ trace_config_.IsCategoryGroupEnabled(category_group))
+ enabled_flag |= ENABLED_FOR_RECORDING;
+ else if (mode_ == MONITORING_MODE &&
+ trace_config_.IsCategoryGroupEnabled(category_group))
+ enabled_flag |= ENABLED_FOR_MONITORING;
+ if (event_callback_ &&
+ event_callback_trace_config_.IsCategoryGroupEnabled(category_group))
+ enabled_flag |= ENABLED_FOR_EVENT_CALLBACK;
+#if defined(OS_WIN)
+ if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled(
+ category_group)) {
+ enabled_flag |= ENABLED_FOR_ETW_EXPORT;
+ }
+#endif
+
+ g_category_group_enabled[category_index] = enabled_flag;
+}
+
+void TraceLog::UpdateCategoryGroupEnabledFlags() {
+ size_t category_index = base::subtle::NoBarrier_Load(&g_category_index);
+ for (size_t i = 0; i < category_index; i++)
+ UpdateCategoryGroupEnabledFlag(i);
+}
+
+void TraceLog::UpdateSyntheticDelaysFromTraceConfig() {
+ ResetTraceEventSyntheticDelays();
+ const TraceConfig::StringList& delays =
+ trace_config_.GetSyntheticDelayValues();
+ TraceConfig::StringList::const_iterator ci;
+ for (ci = delays.begin(); ci != delays.end(); ++ci) {
+ StringTokenizer tokens(*ci, ";");
+ if (!tokens.GetNext())
+ continue;
+ TraceEventSyntheticDelay* delay =
+ TraceEventSyntheticDelay::Lookup(tokens.token());
+ while (tokens.GetNext()) {
+ std::string token = tokens.token();
+ char* duration_end;
+ double target_duration = strtod(token.c_str(), &duration_end);
+ if (duration_end != token.c_str()) {
+ delay->SetTargetDuration(TimeDelta::FromMicroseconds(
+ static_cast<int64>(target_duration * 1e6)));
+ } else if (token == "static") {
+ delay->SetMode(TraceEventSyntheticDelay::STATIC);
+ } else if (token == "oneshot") {
+ delay->SetMode(TraceEventSyntheticDelay::ONE_SHOT);
+ } else if (token == "alternating") {
+ delay->SetMode(TraceEventSyntheticDelay::ALTERNATING);
+ }
+ }
+ }
+}
+
+const unsigned char* TraceLog::GetCategoryGroupEnabledInternal(
+ const char* category_group) {
+ DCHECK(!strchr(category_group, '"'))
+ << "Category groups may not contain double quote";
+ // The g_category_groups is append only, avoid using a lock for the fast path.
+ size_t current_category_index = base::subtle::Acquire_Load(&g_category_index);
+
+ // Search for pre-existing category group.
+ for (size_t i = 0; i < current_category_index; ++i) {
+ if (strcmp(g_category_groups[i], category_group) == 0) {
+ return &g_category_group_enabled[i];
+ }
+ }
+
+ unsigned char* category_group_enabled = NULL;
+ // This is the slow path: the lock is not held in the case above, so more
+ // than one thread could have reached here trying to add the same category.
+ // Only hold to lock when actually appending a new category, and
+ // check the categories groups again.
+ AutoLock lock(lock_);
+ size_t category_index = base::subtle::Acquire_Load(&g_category_index);
+ for (size_t i = 0; i < category_index; ++i) {
+ if (strcmp(g_category_groups[i], category_group) == 0) {
+ return &g_category_group_enabled[i];
+ }
+ }
+
+ // Create a new category group.
+ DCHECK(category_index < MAX_CATEGORY_GROUPS)
+ << "must increase MAX_CATEGORY_GROUPS";
+ if (category_index < MAX_CATEGORY_GROUPS) {
+ // Don't hold on to the category_group pointer, so that we can create
+ // category groups with strings not known at compile time (this is
+ // required by SetWatchEvent).
+ const char* new_group = strdup(category_group);
+ ANNOTATE_LEAKING_OBJECT_PTR(new_group);
+ g_category_groups[category_index] = new_group;
+ DCHECK(!g_category_group_enabled[category_index]);
+ // Note that if both included and excluded patterns in the
+ // TraceConfig are empty, we exclude nothing,
+ // thereby enabling this category group.
+ UpdateCategoryGroupEnabledFlag(category_index);
+ category_group_enabled = &g_category_group_enabled[category_index];
+ // Update the max index now.
+ base::subtle::Release_Store(&g_category_index, category_index + 1);
+ } else {
+ category_group_enabled =
+ &g_category_group_enabled[g_category_categories_exhausted];
+ }
+ return category_group_enabled;
+}
+
+void TraceLog::GetKnownCategoryGroups(
+ std::vector<std::string>* category_groups) {
+ AutoLock lock(lock_);
+ category_groups->push_back(
+ g_category_groups[g_category_trace_event_overhead]);
+ size_t category_index = base::subtle::NoBarrier_Load(&g_category_index);
+ for (size_t i = g_num_builtin_categories; i < category_index; i++)
+ category_groups->push_back(g_category_groups[i]);
+}
+
+void TraceLog::SetEnabled(const TraceConfig& trace_config, Mode mode) {
+ std::vector<EnabledStateObserver*> observer_list;
+ {
+ AutoLock lock(lock_);
+
+ // Can't enable tracing when Flush() is in progress.
+ DCHECK(!flush_task_runner_);
+
+ InternalTraceOptions new_options =
+ GetInternalOptionsFromTraceConfig(trace_config);
+
+ InternalTraceOptions old_options = trace_options();
+
+ if (IsEnabled()) {
+ if (new_options != old_options) {
+ DLOG(ERROR) << "Attempting to re-enable tracing with a different "
+ << "set of options.";
+ }
+
+ if (mode != mode_) {
+ DLOG(ERROR) << "Attempting to re-enable tracing with a different mode.";
+ }
+
+ trace_config_.Merge(trace_config);
+ UpdateCategoryGroupEnabledFlags();
+ return;
+ }
+
+ if (dispatching_to_observer_list_) {
+ DLOG(ERROR)
+ << "Cannot manipulate TraceLog::Enabled state from an observer.";
+ return;
+ }
+
+ mode_ = mode;
+
+ if (new_options != old_options) {
+ subtle::NoBarrier_Store(&trace_options_, new_options);
+ UseNextTraceBuffer();
+ }
+
+ num_traces_recorded_++;
+
+ trace_config_ = TraceConfig(trace_config);
+ UpdateCategoryGroupEnabledFlags();
+ UpdateSyntheticDelaysFromTraceConfig();
+
+ if (new_options & kInternalEnableSampling) {
+ sampling_thread_.reset(new TraceSamplingThread);
+ sampling_thread_->RegisterSampleBucket(
+ &g_trace_state[0], "bucket0",
+ Bind(&TraceSamplingThread::DefaultSamplingCallback));
+ sampling_thread_->RegisterSampleBucket(
+ &g_trace_state[1], "bucket1",
+ Bind(&TraceSamplingThread::DefaultSamplingCallback));
+ sampling_thread_->RegisterSampleBucket(
+ &g_trace_state[2], "bucket2",
+ Bind(&TraceSamplingThread::DefaultSamplingCallback));
+ if (!PlatformThread::Create(0, sampling_thread_.get(),
+ &sampling_thread_handle_)) {
+ DCHECK(false) << "failed to create thread";
+ }
+ }
+
+ dispatching_to_observer_list_ = true;
+ observer_list = enabled_state_observer_list_;
+ }
+ // Notify observers outside the lock in case they trigger trace events.
+ for (size_t i = 0; i < observer_list.size(); ++i)
+ observer_list[i]->OnTraceLogEnabled();
+
+ {
+ AutoLock lock(lock_);
+ dispatching_to_observer_list_ = false;
+ }
+}
+
+void TraceLog::SetArgumentFilterPredicate(
+ const ArgumentFilterPredicate& argument_filter_predicate) {
+ AutoLock lock(lock_);
+ DCHECK(!argument_filter_predicate.is_null());
+ DCHECK(argument_filter_predicate_.is_null());
+ argument_filter_predicate_ = argument_filter_predicate;
+}
+
+TraceLog::InternalTraceOptions TraceLog::GetInternalOptionsFromTraceConfig(
+ const TraceConfig& config) {
+ InternalTraceOptions ret =
+ config.IsSamplingEnabled() ? kInternalEnableSampling : kInternalNone;
+ if (config.IsArgumentFilterEnabled())
+ ret |= kInternalEnableArgumentFilter;
+ switch (config.GetTraceRecordMode()) {
+ case RECORD_UNTIL_FULL:
+ return ret | kInternalRecordUntilFull;
+ case RECORD_CONTINUOUSLY:
+ return ret | kInternalRecordContinuously;
+ case ECHO_TO_CONSOLE:
+ return ret | kInternalEchoToConsole;
+ case RECORD_AS_MUCH_AS_POSSIBLE:
+ return ret | kInternalRecordAsMuchAsPossible;
+ }
+ NOTREACHED();
+ return kInternalNone;
+}
+
+TraceConfig TraceLog::GetCurrentTraceConfig() const {
+ AutoLock lock(lock_);
+ return trace_config_;
+}
+
+void TraceLog::SetDisabled() {
+ AutoLock lock(lock_);
+ SetDisabledWhileLocked();
+}
+
+void TraceLog::SetDisabledWhileLocked() {
+ lock_.AssertAcquired();
+
+ if (!IsEnabled())
+ return;
+
+ if (dispatching_to_observer_list_) {
+ DLOG(ERROR)
+ << "Cannot manipulate TraceLog::Enabled state from an observer.";
+ return;
+ }
+
+ mode_ = DISABLED;
+
+ if (sampling_thread_.get()) {
+ // Stop the sampling thread.
+ sampling_thread_->Stop();
+ lock_.Release();
+ PlatformThread::Join(sampling_thread_handle_);
+ lock_.Acquire();
+ sampling_thread_handle_ = PlatformThreadHandle();
+ sampling_thread_.reset();
+ }
+
+ trace_config_.Clear();
+ subtle::NoBarrier_Store(&watch_category_, 0);
+ watch_event_name_ = "";
+ UpdateCategoryGroupEnabledFlags();
+ AddMetadataEventsWhileLocked();
+
+ dispatching_to_observer_list_ = true;
+ std::vector<EnabledStateObserver*> observer_list =
+ enabled_state_observer_list_;
+
+ {
+ // Dispatch to observers outside the lock in case the observer triggers a
+ // trace event.
+ AutoUnlock unlock(lock_);
+ for (size_t i = 0; i < observer_list.size(); ++i)
+ observer_list[i]->OnTraceLogDisabled();
+ }
+ dispatching_to_observer_list_ = false;
+}
+
+int TraceLog::GetNumTracesRecorded() {
+ AutoLock lock(lock_);
+ if (!IsEnabled())
+ return -1;
+ return num_traces_recorded_;
+}
+
+void TraceLog::AddEnabledStateObserver(EnabledStateObserver* listener) {
+ enabled_state_observer_list_.push_back(listener);
+}
+
+void TraceLog::RemoveEnabledStateObserver(EnabledStateObserver* listener) {
+ std::vector<EnabledStateObserver*>::iterator it =
+ std::find(enabled_state_observer_list_.begin(),
+ enabled_state_observer_list_.end(), listener);
+ if (it != enabled_state_observer_list_.end())
+ enabled_state_observer_list_.erase(it);
+}
+
+bool TraceLog::HasEnabledStateObserver(EnabledStateObserver* listener) const {
+ std::vector<EnabledStateObserver*>::const_iterator it =
+ std::find(enabled_state_observer_list_.begin(),
+ enabled_state_observer_list_.end(), listener);
+ return it != enabled_state_observer_list_.end();
+}
+
+TraceLogStatus TraceLog::GetStatus() const {
+ AutoLock lock(lock_);
+ TraceLogStatus result;
+ result.event_capacity = logged_events_->Capacity();
+ result.event_count = logged_events_->Size();
+ return result;
+}
+
+bool TraceLog::BufferIsFull() const {
+ AutoLock lock(lock_);
+ return logged_events_->IsFull();
+}
+
+TraceEvent* TraceLog::AddEventToThreadSharedChunkWhileLocked(
+ TraceEventHandle* handle,
+ bool check_buffer_is_full) {
+ lock_.AssertAcquired();
+
+ if (thread_shared_chunk_ && thread_shared_chunk_->IsFull()) {
+ logged_events_->ReturnChunk(thread_shared_chunk_index_,
+ thread_shared_chunk_.Pass());
+ }
+
+ if (!thread_shared_chunk_) {
+ thread_shared_chunk_ =
+ logged_events_->GetChunk(&thread_shared_chunk_index_);
+ if (check_buffer_is_full)
+ CheckIfBufferIsFullWhileLocked();
+ }
+ if (!thread_shared_chunk_)
+ return NULL;
+
+ size_t event_index;
+ TraceEvent* trace_event = thread_shared_chunk_->AddTraceEvent(&event_index);
+ if (trace_event && handle) {
+ MakeHandle(thread_shared_chunk_->seq(), thread_shared_chunk_index_,
+ event_index, handle);
+ }
+ return trace_event;
+}
+
+void TraceLog::CheckIfBufferIsFullWhileLocked() {
+ lock_.AssertAcquired();
+ if (logged_events_->IsFull()) {
+ if (buffer_limit_reached_timestamp_.is_null()) {
+ buffer_limit_reached_timestamp_ = OffsetNow();
+ }
+ SetDisabledWhileLocked();
+ }
+}
+
+void TraceLog::SetEventCallbackEnabled(const TraceConfig& trace_config,
+ EventCallback cb) {
+ AutoLock lock(lock_);
+ subtle::NoBarrier_Store(&event_callback_,
+ reinterpret_cast<subtle::AtomicWord>(cb));
+ event_callback_trace_config_ = trace_config;
+ UpdateCategoryGroupEnabledFlags();
+};
+
+void TraceLog::SetEventCallbackDisabled() {
+ AutoLock lock(lock_);
+ subtle::NoBarrier_Store(&event_callback_, 0);
+ UpdateCategoryGroupEnabledFlags();
+}
+
+// Flush() works as the following:
+// 1. Flush() is called in thread A whose task runner is saved in
+// flush_task_runner_;
+// 2. If thread_message_loops_ is not empty, thread A posts task to each message
+// loop to flush the thread local buffers; otherwise finish the flush;
+// 3. FlushCurrentThread() deletes the thread local event buffer:
+// - The last batch of events of the thread are flushed into the main buffer;
+// - The message loop will be removed from thread_message_loops_;
+// If this is the last message loop, finish the flush;
+// 4. If any thread hasn't finish its flush in time, finish the flush.
+void TraceLog::Flush(const TraceLog::OutputCallback& cb,
+ bool use_worker_thread) {
+ FlushInternal(cb, use_worker_thread, false);
+}
+
+void TraceLog::CancelTracing(const OutputCallback& cb) {
+ SetDisabled();
+ FlushInternal(cb, false, true);
+}
+
+void TraceLog::FlushInternal(const TraceLog::OutputCallback& cb,
+ bool use_worker_thread,
+ bool discard_events) {
+ use_worker_thread_ = use_worker_thread;
+ if (IsEnabled()) {
+ // Can't flush when tracing is enabled because otherwise PostTask would
+ // - generate more trace events;
+ // - deschedule the calling thread on some platforms causing inaccurate
+ // timing of the trace events.
+ scoped_refptr<RefCountedString> empty_result = new RefCountedString;
+ if (!cb.is_null())
+ cb.Run(empty_result, false);
+ LOG(WARNING) << "Ignored TraceLog::Flush called when tracing is enabled";
+ return;
+ }
+
+ int generation = this->generation();
+ // Copy of thread_message_loops_ to be used without locking.
+ std::vector<scoped_refptr<SingleThreadTaskRunner>>
+ thread_message_loop_task_runners;
+ {
+ AutoLock lock(lock_);
+ DCHECK(!flush_task_runner_);
+ flush_task_runner_ = ThreadTaskRunnerHandle::IsSet()
+ ? ThreadTaskRunnerHandle::Get()
+ : nullptr;
+ DCHECK_IMPLIES(thread_message_loops_.size(), flush_task_runner_);
+ flush_output_callback_ = cb;
+
+ if (thread_shared_chunk_) {
+ logged_events_->ReturnChunk(thread_shared_chunk_index_,
+ thread_shared_chunk_.Pass());
+ }
+
+ if (thread_message_loops_.size()) {
+ for (hash_set<MessageLoop*>::const_iterator it =
+ thread_message_loops_.begin();
+ it != thread_message_loops_.end(); ++it) {
+ thread_message_loop_task_runners.push_back((*it)->task_runner());
+ }
+ }
+ }
+
+ if (thread_message_loop_task_runners.size()) {
+ for (size_t i = 0; i < thread_message_loop_task_runners.size(); ++i) {
+ thread_message_loop_task_runners[i]->PostTask(
+ FROM_HERE, Bind(&TraceLog::FlushCurrentThread, Unretained(this),
+ generation, discard_events));
+ }
+ flush_task_runner_->PostDelayedTask(
+ FROM_HERE, Bind(&TraceLog::OnFlushTimeout, Unretained(this), generation,
+ discard_events),
+ TimeDelta::FromMilliseconds(kThreadFlushTimeoutMs));
+ return;
+ }
+
+ FinishFlush(generation, discard_events);
+}
+
+// Usually it runs on a different thread.
+void TraceLog::ConvertTraceEventsToTraceFormat(
+ scoped_ptr<TraceBuffer> logged_events,
+ const OutputCallback& flush_output_callback,
+ const ArgumentFilterPredicate& argument_filter_predicate) {
+ if (flush_output_callback.is_null())
+ return;
+
+ // The callback need to be called at least once even if there is no events
+ // to let the caller know the completion of flush.
+ scoped_refptr<RefCountedString> json_events_str_ptr = new RefCountedString();
+ while (const TraceBufferChunk* chunk = logged_events->NextChunk()) {
+ for (size_t j = 0; j < chunk->size(); ++j) {
+ size_t size = json_events_str_ptr->size();
+ if (size > kTraceEventBufferSizeInBytes) {
+ flush_output_callback.Run(json_events_str_ptr, true);
+ json_events_str_ptr = new RefCountedString();
+ } else if (size) {
+ json_events_str_ptr->data().append(",\n");
+ }
+ chunk->GetEventAt(j)->AppendAsJSON(&(json_events_str_ptr->data()),
+ argument_filter_predicate);
+ }
+ }
+ flush_output_callback.Run(json_events_str_ptr, false);
+}
+
+void TraceLog::FinishFlush(int generation, bool discard_events) {
+ scoped_ptr<TraceBuffer> previous_logged_events;
+ OutputCallback flush_output_callback;
+ ArgumentFilterPredicate argument_filter_predicate;
+
+ if (!CheckGeneration(generation))
+ return;
+
+ {
+ AutoLock lock(lock_);
+
+ previous_logged_events.swap(logged_events_);
+ UseNextTraceBuffer();
+ thread_message_loops_.clear();
+
+ flush_task_runner_ = NULL;
+ flush_output_callback = flush_output_callback_;
+ flush_output_callback_.Reset();
+
+ if (trace_options() & kInternalEnableArgumentFilter) {
+ CHECK(!argument_filter_predicate_.is_null());
+ argument_filter_predicate = argument_filter_predicate_;
+ }
+ }
+
+ if (discard_events) {
+ if (!flush_output_callback.is_null()) {
+ scoped_refptr<RefCountedString> empty_result = new RefCountedString;
+ flush_output_callback.Run(empty_result, false);
+ }
+ return;
+ }
+
+ if (use_worker_thread_ &&
+ WorkerPool::PostTask(
+ FROM_HERE, Bind(&TraceLog::ConvertTraceEventsToTraceFormat,
+ Passed(&previous_logged_events),
+ flush_output_callback, argument_filter_predicate),
+ true)) {
+ return;
+ }
+
+ ConvertTraceEventsToTraceFormat(previous_logged_events.Pass(),
+ flush_output_callback,
+ argument_filter_predicate);
+}
+
+// Run in each thread holding a local event buffer.
+void TraceLog::FlushCurrentThread(int generation, bool discard_events) {
+ {
+ AutoLock lock(lock_);
+ if (!CheckGeneration(generation) || !flush_task_runner_) {
+ // This is late. The corresponding flush has finished.
+ return;
+ }
+ }
+
+ // This will flush the thread local buffer.
+ delete thread_local_event_buffer_.Get();
+
+ AutoLock lock(lock_);
+ if (!CheckGeneration(generation) || !flush_task_runner_ ||
+ thread_message_loops_.size())
+ return;
+
+ flush_task_runner_->PostTask(
+ FROM_HERE, Bind(&TraceLog::FinishFlush, Unretained(this), generation,
+ discard_events));
+}
+
+void TraceLog::OnFlushTimeout(int generation, bool discard_events) {
+ {
+ AutoLock lock(lock_);
+ if (!CheckGeneration(generation) || !flush_task_runner_) {
+ // Flush has finished before timeout.
+ return;
+ }
+
+ LOG(WARNING)
+ << "The following threads haven't finished flush in time. "
+ "If this happens stably for some thread, please call "
+ "TraceLog::GetInstance()->SetCurrentThreadBlocksMessageLoop() from "
+ "the thread to avoid its trace events from being lost.";
+ for (hash_set<MessageLoop*>::const_iterator it =
+ thread_message_loops_.begin();
+ it != thread_message_loops_.end(); ++it) {
+ LOG(WARNING) << "Thread: " << (*it)->thread_name();
+ }
+ }
+ FinishFlush(generation, discard_events);
+}
+
+void TraceLog::FlushButLeaveBufferIntact(
+ const TraceLog::OutputCallback& flush_output_callback) {
+ scoped_ptr<TraceBuffer> previous_logged_events;
+ ArgumentFilterPredicate argument_filter_predicate;
+ {
+ AutoLock lock(lock_);
+ AddMetadataEventsWhileLocked();
+ if (thread_shared_chunk_) {
+ // Return the chunk to the main buffer to flush the sampling data.
+ logged_events_->ReturnChunk(thread_shared_chunk_index_,
+ thread_shared_chunk_.Pass());
+ }
+ previous_logged_events = logged_events_->CloneForIteration().Pass();
+
+ if (trace_options() & kInternalEnableArgumentFilter) {
+ CHECK(!argument_filter_predicate_.is_null());
+ argument_filter_predicate = argument_filter_predicate_;
+ }
+ } // release lock
+
+ ConvertTraceEventsToTraceFormat(previous_logged_events.Pass(),
+ flush_output_callback,
+ argument_filter_predicate);
+}
+
+void TraceLog::UseNextTraceBuffer() {
+ logged_events_.reset(CreateTraceBuffer());
+ subtle::NoBarrier_AtomicIncrement(&generation_, 1);
+ thread_shared_chunk_.reset();
+ thread_shared_chunk_index_ = 0;
+}
+
+TraceEventHandle TraceLog::AddTraceEvent(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ unsigned long long id,
+ int num_args,
+ const char** arg_names,
+ const unsigned char* arg_types,
+ const unsigned long long* arg_values,
+ const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
+ unsigned int flags) {
+ int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
+ base::TraceTicks now = base::TraceTicks::Now();
+ return AddTraceEventWithThreadIdAndTimestamp(
+ phase, category_group_enabled, name, id, ::trace_event_internal::kNoId,
+ thread_id, now, num_args, arg_names, arg_types, arg_values,
+ convertable_values, flags);
+}
+
+TraceEventHandle TraceLog::AddTraceEventWithContextId(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ unsigned long long id,
+ unsigned long long context_id,
+ int num_args,
+ const char** arg_names,
+ const unsigned char* arg_types,
+ const unsigned long long* arg_values,
+ const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
+ unsigned int flags) {
+ int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
+ base::TraceTicks now = base::TraceTicks::Now();
+ return AddTraceEventWithThreadIdAndTimestamp(
+ phase, category_group_enabled, name, id, context_id, thread_id, now,
+ num_args, arg_names, arg_types, arg_values, convertable_values,
+ flags | TRACE_EVENT_FLAG_HAS_CONTEXT_ID);
+}
+
+TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ unsigned long long id,
+ unsigned long long context_id,
+ int thread_id,
+ const TraceTicks& timestamp,
+ int num_args,
+ const char** arg_names,
+ const unsigned char* arg_types,
+ const unsigned long long* arg_values,
+ const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
+ unsigned int flags) {
+ TraceEventHandle handle = {0, 0, 0};
+ if (!*category_group_enabled)
+ return handle;
+
+ // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
+ // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
+ // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
+ if (thread_is_in_trace_event_.Get())
+ return handle;
+
+ AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_);
+
+ DCHECK(name);
+ DCHECK(!timestamp.is_null());
+
+ if (flags & TRACE_EVENT_FLAG_MANGLE_ID)
+ id = MangleEventId(id);
+
+ TraceTicks offset_event_timestamp = OffsetTimestamp(timestamp);
+ TraceTicks now = flags & TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP
+ ? OffsetNow()
+ : offset_event_timestamp;
+ ThreadTicks thread_now = ThreadNow();
+
+ // |thread_local_event_buffer_| can be null if the current thread doesn't have
+ // a message loop or the message loop is blocked.
+ InitializeThreadLocalEventBufferIfSupported();
+ auto thread_local_event_buffer = thread_local_event_buffer_.Get();
+
+ // Check and update the current thread name only if the event is for the
+ // current thread to avoid locks in most cases.
+ if (thread_id == static_cast<int>(PlatformThread::CurrentId())) {
+ const char* new_name =
+ ThreadIdNameManager::GetInstance()->GetName(thread_id);
+ // Check if the thread name has been set or changed since the previous
+ // call (if any), but don't bother if the new name is empty. Note this will
+ // not detect a thread name change within the same char* buffer address: we
+ // favor common case performance over corner case correctness.
+ if (new_name != g_current_thread_name.Get().Get() && new_name &&
+ *new_name) {
+ g_current_thread_name.Get().Set(new_name);
+
+ AutoLock thread_info_lock(thread_info_lock_);
+
+ hash_map<int, std::string>::iterator existing_name =
+ thread_names_.find(thread_id);
+ if (existing_name == thread_names_.end()) {
+ // This is a new thread id, and a new name.
+ thread_names_[thread_id] = new_name;
+ } else {
+ // This is a thread id that we've seen before, but potentially with a
+ // new name.
+ std::vector<StringPiece> existing_names = base::SplitStringPiece(
+ existing_name->second, ",", base::KEEP_WHITESPACE,
+ base::SPLIT_WANT_NONEMPTY);
+ bool found = std::find(existing_names.begin(), existing_names.end(),
+ new_name) != existing_names.end();
+ if (!found) {
+ if (existing_names.size())
+ existing_name->second.push_back(',');
+ existing_name->second.append(new_name);
+ }
+ }
+ }
+ }
+
+#if defined(OS_WIN)
+ // This is done sooner rather than later, to avoid creating the event and
+ // acquiring the lock, which is not needed for ETW as it's already threadsafe.
+ if (*category_group_enabled & ENABLED_FOR_ETW_EXPORT)
+ TraceEventETWExport::AddEvent(phase, category_group_enabled, name, id,
+ num_args, arg_names, arg_types, arg_values,
+ convertable_values);
+#endif // OS_WIN
+
+ std::string console_message;
+ if (*category_group_enabled &
+ (ENABLED_FOR_RECORDING | ENABLED_FOR_MONITORING)) {
+ OptionalAutoLock lock(&lock_);
+
+ TraceEvent* trace_event = NULL;
+ if (thread_local_event_buffer) {
+ trace_event = thread_local_event_buffer->AddTraceEvent(&handle);
+ } else {
+ lock.EnsureAcquired();
+ trace_event = AddEventToThreadSharedChunkWhileLocked(&handle, true);
+ }
+
+ if (trace_event) {
+ trace_event->Initialize(thread_id, offset_event_timestamp, thread_now,
+ phase, category_group_enabled, name, id,
+ context_id, num_args, arg_names, arg_types,
+ arg_values, convertable_values, flags);
+
+#if defined(OS_ANDROID)
+ trace_event->SendToATrace();
+#endif
+ }
+
+ if (trace_options() & kInternalEchoToConsole) {
+ console_message = EventToConsoleMessage(
+ phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase,
+ timestamp, trace_event);
+ }
+ }
+
+ if (console_message.size())
+ LOG(ERROR) << console_message;
+
+ if (reinterpret_cast<const unsigned char*>(
+ subtle::NoBarrier_Load(&watch_category_)) == category_group_enabled) {
+ bool event_name_matches;
+ WatchEventCallback watch_event_callback_copy;
+ {
+ AutoLock lock(lock_);
+ event_name_matches = watch_event_name_ == name;
+ watch_event_callback_copy = watch_event_callback_;
+ }
+ if (event_name_matches) {
+ if (!watch_event_callback_copy.is_null())
+ watch_event_callback_copy.Run();
+ }
+ }
+
+ if (*category_group_enabled & ENABLED_FOR_EVENT_CALLBACK) {
+ EventCallback event_callback = reinterpret_cast<EventCallback>(
+ subtle::NoBarrier_Load(&event_callback_));
+ if (event_callback) {
+ event_callback(
+ offset_event_timestamp,
+ phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase,
+ category_group_enabled, name, id, num_args, arg_names, arg_types,
+ arg_values, flags);
+ }
+ }
+
+ if (thread_local_event_buffer)
+ thread_local_event_buffer->ReportOverhead(now, thread_now);
+
+ return handle;
+}
+
+// May be called when a COMPELETE event ends and the unfinished event has been
+// recycled (phase == TRACE_EVENT_PHASE_END and trace_event == NULL).
+std::string TraceLog::EventToConsoleMessage(unsigned char phase,
+ const TraceTicks& timestamp,
+ TraceEvent* trace_event) {
+ AutoLock thread_info_lock(thread_info_lock_);
+
+ // The caller should translate TRACE_EVENT_PHASE_COMPLETE to
+ // TRACE_EVENT_PHASE_BEGIN or TRACE_EVENT_END.
+ DCHECK(phase != TRACE_EVENT_PHASE_COMPLETE);
+
+ TimeDelta duration;
+ int thread_id =
+ trace_event ? trace_event->thread_id() : PlatformThread::CurrentId();
+ if (phase == TRACE_EVENT_PHASE_END) {
+ duration = timestamp - thread_event_start_times_[thread_id].top();
+ thread_event_start_times_[thread_id].pop();
+ }
+
+ std::string thread_name = thread_names_[thread_id];
+ if (thread_colors_.find(thread_name) == thread_colors_.end())
+ thread_colors_[thread_name] = (thread_colors_.size() % 6) + 1;
+
+ std::ostringstream log;
+ log << base::StringPrintf("%s: \x1b[0;3%dm", thread_name.c_str(),
+ thread_colors_[thread_name]);
+
+ size_t depth = 0;
+ if (thread_event_start_times_.find(thread_id) !=
+ thread_event_start_times_.end())
+ depth = thread_event_start_times_[thread_id].size();
+
+ for (size_t i = 0; i < depth; ++i)
+ log << "| ";
+
+ if (trace_event)
+ trace_event->AppendPrettyPrinted(&log);
+ if (phase == TRACE_EVENT_PHASE_END)
+ log << base::StringPrintf(" (%.3f ms)", duration.InMillisecondsF());
+
+ log << "\x1b[0;m";
+
+ if (phase == TRACE_EVENT_PHASE_BEGIN)
+ thread_event_start_times_[thread_id].push(timestamp);
+
+ return log.str();
+}
+
+void TraceLog::AddTraceEventEtw(char phase,
+ const char* name,
+ const void* id,
+ const char* extra) {
+#if defined(OS_WIN)
+ TraceEventETWProvider::Trace(name, phase, id, extra);
+#endif
+ INTERNAL_TRACE_EVENT_ADD(phase, "ETW Trace Event", name,
+ TRACE_EVENT_FLAG_COPY, "id", id, "extra", extra);
+}
+
+void TraceLog::AddTraceEventEtw(char phase,
+ const char* name,
+ const void* id,
+ const std::string& extra) {
+#if defined(OS_WIN)
+ TraceEventETWProvider::Trace(name, phase, id, extra);
+#endif
+ INTERNAL_TRACE_EVENT_ADD(phase, "ETW Trace Event", name,
+ TRACE_EVENT_FLAG_COPY, "id", id, "extra", extra);
+}
+
+void TraceLog::UpdateTraceEventDuration(
+ const unsigned char* category_group_enabled,
+ const char* name,
+ TraceEventHandle handle) {
+ // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
+ // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
+ // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
+ if (thread_is_in_trace_event_.Get())
+ return;
+
+ AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_);
+
+ ThreadTicks thread_now = ThreadNow();
+ TraceTicks now = OffsetNow();
+
+ std::string console_message;
+ if (*category_group_enabled & ENABLED_FOR_RECORDING) {
+ OptionalAutoLock lock(&lock_);
+
+ TraceEvent* trace_event = GetEventByHandleInternal(handle, &lock);
+ if (trace_event) {
+ DCHECK(trace_event->phase() == TRACE_EVENT_PHASE_COMPLETE);
+ trace_event->UpdateDuration(now, thread_now);
+#if defined(OS_ANDROID)
+ trace_event->SendToATrace();
+#endif
+ }
+
+ if (trace_options() & kInternalEchoToConsole) {
+ console_message =
+ EventToConsoleMessage(TRACE_EVENT_PHASE_END, now, trace_event);
+ }
+ }
+
+ if (console_message.size())
+ LOG(ERROR) << console_message;
+
+ if (*category_group_enabled & ENABLED_FOR_EVENT_CALLBACK) {
+ EventCallback event_callback = reinterpret_cast<EventCallback>(
+ subtle::NoBarrier_Load(&event_callback_));
+ if (event_callback) {
+ event_callback(now, TRACE_EVENT_PHASE_END, category_group_enabled, name,
+ trace_event_internal::kNoId, trace_event_internal::kNoId,
+ NULL, NULL, NULL, TRACE_EVENT_FLAG_NONE);
+ }
+ }
+}
+
+void TraceLog::SetWatchEvent(const std::string& category_name,
+ const std::string& event_name,
+ const WatchEventCallback& callback) {
+ const unsigned char* category =
+ GetCategoryGroupEnabled(category_name.c_str());
+ AutoLock lock(lock_);
+ subtle::NoBarrier_Store(&watch_category_,
+ reinterpret_cast<subtle::AtomicWord>(category));
+ watch_event_name_ = event_name;
+ watch_event_callback_ = callback;
+}
+
+void TraceLog::CancelWatchEvent() {
+ AutoLock lock(lock_);
+ subtle::NoBarrier_Store(&watch_category_, 0);
+ watch_event_name_ = "";
+ watch_event_callback_.Reset();
+}
+
+uint64 TraceLog::MangleEventId(uint64 id) {
+ return id ^ process_id_hash_;
+}
+
+void TraceLog::AddMetadataEventsWhileLocked() {
+ lock_.AssertAcquired();
+
+#if !defined(OS_NACL) // NaCl shouldn't expose the process id.
+ InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
+ 0, "num_cpus", "number",
+ base::SysInfo::NumberOfProcessors());
+#endif
+
+ int current_thread_id = static_cast<int>(base::PlatformThread::CurrentId());
+ if (process_sort_index_ != 0) {
+ InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
+ current_thread_id, "process_sort_index",
+ "sort_index", process_sort_index_);
+ }
+
+ if (process_name_.size()) {
+ InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
+ current_thread_id, "process_name", "name",
+ process_name_);
+ }
+
+ if (process_labels_.size() > 0) {
+ std::vector<std::string> labels;
+ for (base::hash_map<int, std::string>::iterator it =
+ process_labels_.begin();
+ it != process_labels_.end(); it++) {
+ labels.push_back(it->second);
+ }
+ InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
+ current_thread_id, "process_labels", "labels",
+ base::JoinString(labels, ","));
+ }
+
+ // Thread sort indices.
+ for (hash_map<int, int>::iterator it = thread_sort_indices_.begin();
+ it != thread_sort_indices_.end(); it++) {
+ if (it->second == 0)
+ continue;
+ InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
+ it->first, "thread_sort_index", "sort_index",
+ it->second);
+ }
+
+ // Thread names.
+ AutoLock thread_info_lock(thread_info_lock_);
+ for (hash_map<int, std::string>::iterator it = thread_names_.begin();
+ it != thread_names_.end(); it++) {
+ if (it->second.empty())
+ continue;
+ InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
+ it->first, "thread_name", "name", it->second);
+ }
+
+ // If buffer is full, add a metadata record to report this.
+ if (!buffer_limit_reached_timestamp_.is_null()) {
+ InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
+ current_thread_id, "trace_buffer_overflowed",
+ "overflowed_at_ts",
+ buffer_limit_reached_timestamp_);
+ }
+}
+
+void TraceLog::WaitSamplingEventForTesting() {
+ if (!sampling_thread_)
+ return;
+ sampling_thread_->WaitSamplingEventForTesting();
+}
+
+void TraceLog::DeleteForTesting() {
+ DeleteTraceLogForTesting::Delete();
+}
+
+TraceEvent* TraceLog::GetEventByHandle(TraceEventHandle handle) {
+ return GetEventByHandleInternal(handle, NULL);
+}
+
+TraceEvent* TraceLog::GetEventByHandleInternal(TraceEventHandle handle,
+ OptionalAutoLock* lock) {
+ if (!handle.chunk_seq)
+ return NULL;
+
+ if (thread_local_event_buffer_.Get()) {
+ TraceEvent* trace_event =
+ thread_local_event_buffer_.Get()->GetEventByHandle(handle);
+ if (trace_event)
+ return trace_event;
+ }
+
+ // The event has been out-of-control of the thread local buffer.
+ // Try to get the event from the main buffer with a lock.
+ if (lock)
+ lock->EnsureAcquired();
+
+ if (thread_shared_chunk_ &&
+ handle.chunk_index == thread_shared_chunk_index_) {
+ return handle.chunk_seq == thread_shared_chunk_->seq()
+ ? thread_shared_chunk_->GetEventAt(handle.event_index)
+ : NULL;
+ }
+
+ return logged_events_->GetEventByHandle(handle);
+}
+
+void TraceLog::SetProcessID(int process_id) {
+ process_id_ = process_id;
+ // Create a FNV hash from the process ID for XORing.
+ // See http://isthe.com/chongo/tech/comp/fnv/ for algorithm details.
+ unsigned long long offset_basis = 14695981039346656037ull;
+ unsigned long long fnv_prime = 1099511628211ull;
+ unsigned long long pid = static_cast<unsigned long long>(process_id_);
+ process_id_hash_ = (offset_basis ^ pid) * fnv_prime;
+}
+
+void TraceLog::SetProcessSortIndex(int sort_index) {
+ AutoLock lock(lock_);
+ process_sort_index_ = sort_index;
+}
+
+void TraceLog::SetProcessName(const std::string& process_name) {
+ AutoLock lock(lock_);
+ process_name_ = process_name;
+}
+
+void TraceLog::UpdateProcessLabel(int label_id,
+ const std::string& current_label) {
+ if (!current_label.length())
+ return RemoveProcessLabel(label_id);
+
+ AutoLock lock(lock_);
+ process_labels_[label_id] = current_label;
+}
+
+void TraceLog::RemoveProcessLabel(int label_id) {
+ AutoLock lock(lock_);
+ base::hash_map<int, std::string>::iterator it =
+ process_labels_.find(label_id);
+ if (it == process_labels_.end())
+ return;
+
+ process_labels_.erase(it);
+}
+
+void TraceLog::SetThreadSortIndex(PlatformThreadId thread_id, int sort_index) {
+ AutoLock lock(lock_);
+ thread_sort_indices_[static_cast<int>(thread_id)] = sort_index;
+}
+
+void TraceLog::SetTimeOffset(TimeDelta offset) {
+ time_offset_ = offset;
+}
+
+size_t TraceLog::GetObserverCountForTest() const {
+ return enabled_state_observer_list_.size();
+}
+
+void TraceLog::SetCurrentThreadBlocksMessageLoop() {
+ thread_blocks_message_loop_.Set(true);
+ if (thread_local_event_buffer_.Get()) {
+ // This will flush the thread local buffer.
+ delete thread_local_event_buffer_.Get();
+ }
+}
+
+TraceBuffer* TraceLog::CreateTraceBuffer() {
+ InternalTraceOptions options = trace_options();
+ if (options & kInternalRecordContinuously)
+ return TraceBuffer::CreateTraceBufferRingBuffer(
+ kTraceEventRingBufferChunks);
+ else if ((options & kInternalEnableSampling) && mode_ == MONITORING_MODE)
+ return TraceBuffer::CreateTraceBufferRingBuffer(
+ kMonitorTraceEventBufferChunks);
+ else if (options & kInternalEchoToConsole)
+ return TraceBuffer::CreateTraceBufferRingBuffer(
+ kEchoToConsoleTraceEventBufferChunks);
+ else if (options & kInternalRecordAsMuchAsPossible)
+ return TraceBuffer::CreateTraceBufferVectorOfSize(
+ kTraceEventVectorBigBufferChunks);
+ return TraceBuffer::CreateTraceBufferVectorOfSize(
+ kTraceEventVectorBufferChunks);
+}
+
+void ConvertableToTraceFormat::EstimateTraceMemoryOverhead(
+ TraceEventMemoryOverhead* overhead) {
+ overhead->Add("ConvertableToTraceFormat(Unknown)", sizeof(*this));
+}
+
+} // namespace trace_event
+} // namespace base
+
+namespace trace_event_internal {
+
+ScopedTraceBinaryEfficient::ScopedTraceBinaryEfficient(
+ const char* category_group,
+ const char* name) {
+ // The single atom works because for now the category_group can only be "gpu".
+ DCHECK_EQ(strcmp(category_group, "gpu"), 0);
+ static TRACE_EVENT_API_ATOMIC_WORD atomic = 0;
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES(
+ category_group, atomic, category_group_enabled_);
+ name_ = name;
+ if (*category_group_enabled_) {
+ event_handle_ =
+ TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
+ TRACE_EVENT_PHASE_COMPLETE, category_group_enabled_, name, kNoId,
+ kNoId, static_cast<int>(base::PlatformThread::CurrentId()),
+ base::TraceTicks::Now(), 0, NULL, NULL, NULL, NULL,
+ TRACE_EVENT_FLAG_NONE);
+ }
+}
+
+ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() {
+ if (*category_group_enabled_) {
+ TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, name_,
+ event_handle_);
+ }
+}
+
+} // namespace trace_event_internal
diff --git a/base/trace_event/trace_log.h b/base/trace_event/trace_log.h
new file mode 100644
index 0000000..985fe473
--- /dev/null
+++ b/base/trace_event/trace_log.h
@@ -0,0 +1,471 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_LOG_H_
+#define BASE_TRACE_EVENT_TRACE_LOG_H_
+
+#include "base/trace_event/memory_dump_provider.h"
+#include "base/trace_event/trace_config.h"
+#include "base/trace_event/trace_event_impl.h"
+
+// Older style trace macros with explicit id and extra data
+// Only these macros result in publishing data to ETW as currently implemented.
+// TODO(georgesak): Update/replace these with new ETW macros.
+#define TRACE_EVENT_BEGIN_ETW(name, id, extra) \
+ base::trace_event::TraceLog::AddTraceEventEtw( \
+ TRACE_EVENT_PHASE_BEGIN, name, reinterpret_cast<const void*>(id), extra)
+
+#define TRACE_EVENT_END_ETW(name, id, extra) \
+ base::trace_event::TraceLog::AddTraceEventEtw( \
+ TRACE_EVENT_PHASE_END, name, reinterpret_cast<const void*>(id), extra)
+
+#define TRACE_EVENT_INSTANT_ETW(name, id, extra) \
+ base::trace_event::TraceLog::AddTraceEventEtw( \
+ TRACE_EVENT_PHASE_INSTANT, name, reinterpret_cast<const void*>(id), \
+ extra)
+
+template <typename Type>
+struct DefaultSingletonTraits;
+
+namespace base {
+
+class RefCountedString;
+
+namespace trace_event {
+
+class TraceBuffer;
+class TraceBufferChunk;
+class TraceEvent;
+class TraceEventMemoryOverhead;
+class TraceSamplingThread;
+
+struct BASE_EXPORT TraceLogStatus {
+ TraceLogStatus();
+ ~TraceLogStatus();
+ size_t event_capacity;
+ size_t event_count;
+};
+
+class BASE_EXPORT TraceLog : public MemoryDumpProvider {
+ public:
+ enum Mode {
+ DISABLED = 0,
+ RECORDING_MODE,
+ MONITORING_MODE,
+ };
+
+ // The pointer returned from GetCategoryGroupEnabledInternal() points to a
+ // value with zero or more of the following bits. Used in this class only.
+ // The TRACE_EVENT macros should only use the value as a bool.
+ // These values must be in sync with macro values in TraceEvent.h in Blink.
+ enum CategoryGroupEnabledFlags {
+ // Category group enabled for the recording mode.
+ ENABLED_FOR_RECORDING = 1 << 0,
+ // Category group enabled for the monitoring mode.
+ ENABLED_FOR_MONITORING = 1 << 1,
+ // Category group enabled by SetEventCallbackEnabled().
+ ENABLED_FOR_EVENT_CALLBACK = 1 << 2,
+ // Category group enabled to export events to ETW.
+ ENABLED_FOR_ETW_EXPORT = 1 << 3
+ };
+
+ static TraceLog* GetInstance();
+
+ // Get set of known category groups. This can change as new code paths are
+ // reached. The known category groups are inserted into |category_groups|.
+ void GetKnownCategoryGroups(std::vector<std::string>* category_groups);
+
+ // Retrieves a copy (for thread-safety) of the current TraceConfig.
+ TraceConfig GetCurrentTraceConfig() const;
+
+ // Initializes the thread-local event buffer, if not already initialized and
+ // if the current thread supports that (has a message loop).
+ void InitializeThreadLocalEventBufferIfSupported();
+
+ // Enables normal tracing (recording trace events in the trace buffer).
+ // See TraceConfig comments for details on how to control what categories
+ // will be traced. If tracing has already been enabled, |category_filter| will
+ // be merged into the current category filter.
+ void SetEnabled(const TraceConfig& trace_config, Mode mode);
+
+ // Disables normal tracing for all categories.
+ void SetDisabled();
+
+ bool IsEnabled() { return mode_ != DISABLED; }
+
+ // The number of times we have begun recording traces. If tracing is off,
+ // returns -1. If tracing is on, then it returns the number of times we have
+ // recorded a trace. By watching for this number to increment, you can
+ // passively discover when a new trace has begun. This is then used to
+ // implement the TRACE_EVENT_IS_NEW_TRACE() primitive.
+ int GetNumTracesRecorded();
+
+#if defined(OS_ANDROID)
+ void StartATrace();
+ void StopATrace();
+ void AddClockSyncMetadataEvent();
+#endif
+
+ // Enabled state listeners give a callback when tracing is enabled or
+ // disabled. This can be used to tie into other library's tracing systems
+ // on-demand.
+ class BASE_EXPORT EnabledStateObserver {
+ public:
+ virtual ~EnabledStateObserver() = default;
+
+ // Called just after the tracing system becomes enabled, outside of the
+ // |lock_|. TraceLog::IsEnabled() is true at this point.
+ virtual void OnTraceLogEnabled() = 0;
+
+ // Called just after the tracing system disables, outside of the |lock_|.
+ // TraceLog::IsEnabled() is false at this point.
+ virtual void OnTraceLogDisabled() = 0;
+ };
+ void AddEnabledStateObserver(EnabledStateObserver* listener);
+ void RemoveEnabledStateObserver(EnabledStateObserver* listener);
+ bool HasEnabledStateObserver(EnabledStateObserver* listener) const;
+
+ TraceLogStatus GetStatus() const;
+ bool BufferIsFull() const;
+
+ // Computes an estimate of the size of the TraceLog including all the retained
+ // objects.
+ void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead);
+
+ // Not using base::Callback because of its limited by 7 parameters.
+ // Also, using primitive type allows directly passing callback from WebCore.
+ // WARNING: It is possible for the previously set callback to be called
+ // after a call to SetEventCallbackEnabled() that replaces or a call to
+ // SetEventCallbackDisabled() that disables the callback.
+ // This callback may be invoked on any thread.
+ // For TRACE_EVENT_PHASE_COMPLETE events, the client will still receive pairs
+ // of TRACE_EVENT_PHASE_BEGIN and TRACE_EVENT_PHASE_END events to keep the
+ // interface simple.
+ typedef void (*EventCallback)(TraceTicks timestamp,
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ unsigned long long id,
+ int num_args,
+ const char* const arg_names[],
+ const unsigned char arg_types[],
+ const unsigned long long arg_values[],
+ unsigned int flags);
+
+ // Enable tracing for EventCallback.
+ void SetEventCallbackEnabled(const TraceConfig& trace_config,
+ EventCallback cb);
+ void SetEventCallbackDisabled();
+ void SetArgumentFilterPredicate(
+ const ArgumentFilterPredicate& argument_filter_predicate);
+
+ // Flush all collected events to the given output callback. The callback will
+ // be called one or more times either synchronously or asynchronously from
+ // the current thread with IPC-bite-size chunks. The string format is
+ // undefined. Use TraceResultBuffer to convert one or more trace strings to
+ // JSON. The callback can be null if the caller doesn't want any data.
+ // Due to the implementation of thread-local buffers, flush can't be
+ // done when tracing is enabled. If called when tracing is enabled, the
+ // callback will be called directly with (empty_string, false) to indicate
+ // the end of this unsuccessful flush. Flush does the serialization
+ // on the same thread if the caller doesn't set use_worker_thread explicitly.
+ typedef base::Callback<void(const scoped_refptr<base::RefCountedString>&,
+ bool has_more_events)> OutputCallback;
+ void Flush(const OutputCallback& cb, bool use_worker_thread = false);
+ void FlushButLeaveBufferIntact(const OutputCallback& flush_output_callback);
+
+ // Cancels tracing and discards collected data.
+ void CancelTracing(const OutputCallback& cb);
+
+ // Called by TRACE_EVENT* macros, don't call this directly.
+ // The name parameter is a category group for example:
+ // TRACE_EVENT0("renderer,webkit", "WebViewImpl::HandleInputEvent")
+ static const unsigned char* GetCategoryGroupEnabled(const char* name);
+ static const char* GetCategoryGroupName(
+ const unsigned char* category_group_enabled);
+
+ // Called by TRACE_EVENT* macros, don't call this directly.
+ // If |copy| is set, |name|, |arg_name1| and |arg_name2| will be deep copied
+ // into the event; see "Memory scoping note" and TRACE_EVENT_COPY_XXX above.
+ TraceEventHandle AddTraceEvent(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ unsigned long long id,
+ int num_args,
+ const char** arg_names,
+ const unsigned char* arg_types,
+ const unsigned long long* arg_values,
+ const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
+ unsigned int flags);
+ TraceEventHandle AddTraceEventWithContextId(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ unsigned long long id,
+ unsigned long long context_id,
+ int num_args,
+ const char** arg_names,
+ const unsigned char* arg_types,
+ const unsigned long long* arg_values,
+ const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
+ unsigned int flags);
+ TraceEventHandle AddTraceEventWithThreadIdAndTimestamp(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ unsigned long long id,
+ unsigned long long context_id,
+ int thread_id,
+ const TraceTicks& timestamp,
+ int num_args,
+ const char** arg_names,
+ const unsigned char* arg_types,
+ const unsigned long long* arg_values,
+ const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
+ unsigned int flags);
+ static void AddTraceEventEtw(char phase,
+ const char* category_group,
+ const void* id,
+ const char* extra);
+ static void AddTraceEventEtw(char phase,
+ const char* category_group,
+ const void* id,
+ const std::string& extra);
+
+ void UpdateTraceEventDuration(const unsigned char* category_group_enabled,
+ const char* name,
+ TraceEventHandle handle);
+
+ // For every matching event, the callback will be called.
+ typedef base::Callback<void()> WatchEventCallback;
+ void SetWatchEvent(const std::string& category_name,
+ const std::string& event_name,
+ const WatchEventCallback& callback);
+ // Cancel the watch event. If tracing is enabled, this may race with the
+ // watch event notification firing.
+ void CancelWatchEvent();
+
+ int process_id() const { return process_id_; }
+
+ uint64 MangleEventId(uint64 id);
+
+ // Exposed for unittesting:
+
+ void WaitSamplingEventForTesting();
+
+ // Allows deleting our singleton instance.
+ static void DeleteForTesting();
+
+ // Allow tests to inspect TraceEvents.
+ TraceEvent* GetEventByHandle(TraceEventHandle handle);
+
+ void SetProcessID(int process_id);
+
+ // Process sort indices, if set, override the order of a process will appear
+ // relative to other processes in the trace viewer. Processes are sorted first
+ // on their sort index, ascending, then by their name, and then tid.
+ void SetProcessSortIndex(int sort_index);
+
+ // Sets the name of the process.
+ void SetProcessName(const std::string& process_name);
+
+ // Processes can have labels in addition to their names. Use labels, for
+ // instance, to list out the web page titles that a process is handling.
+ void UpdateProcessLabel(int label_id, const std::string& current_label);
+ void RemoveProcessLabel(int label_id);
+
+ // Thread sort indices, if set, override the order of a thread will appear
+ // within its process in the trace viewer. Threads are sorted first on their
+ // sort index, ascending, then by their name, and then tid.
+ void SetThreadSortIndex(PlatformThreadId thread_id, int sort_index);
+
+ // Allow setting an offset between the current TraceTicks time and the time
+ // that should be reported.
+ void SetTimeOffset(TimeDelta offset);
+
+ size_t GetObserverCountForTest() const;
+
+ // Call this method if the current thread may block the message loop to
+ // prevent the thread from using the thread-local buffer because the thread
+ // may not handle the flush request in time causing lost of unflushed events.
+ void SetCurrentThreadBlocksMessageLoop();
+
+ private:
+ typedef unsigned int InternalTraceOptions;
+
+ FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
+ TraceBufferRingBufferGetReturnChunk);
+ FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
+ TraceBufferRingBufferHalfIteration);
+ FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
+ TraceBufferRingBufferFullIteration);
+ FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture, TraceBufferVectorReportFull);
+ FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
+ ConvertTraceConfigToInternalOptions);
+ FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
+ TraceRecordAsMuchAsPossibleMode);
+
+ // This allows constructor and destructor to be private and usable only
+ // by the Singleton class.
+ friend struct DefaultSingletonTraits<TraceLog>;
+
+ // MemoryDumpProvider implementation.
+ bool OnMemoryDump(ProcessMemoryDump* pmd) override;
+
+ // Enable/disable each category group based on the current mode_,
+ // category_filter_, event_callback_ and event_callback_category_filter_.
+ // Enable the category group in the enabled mode if category_filter_ matches
+ // the category group, or event_callback_ is not null and
+ // event_callback_category_filter_ matches the category group.
+ void UpdateCategoryGroupEnabledFlags();
+ void UpdateCategoryGroupEnabledFlag(size_t category_index);
+
+ // Configure synthetic delays based on the values set in the current
+ // trace config.
+ void UpdateSyntheticDelaysFromTraceConfig();
+
+ InternalTraceOptions GetInternalOptionsFromTraceConfig(
+ const TraceConfig& config);
+
+ class ThreadLocalEventBuffer;
+ class OptionalAutoLock;
+
+ TraceLog();
+ ~TraceLog() override;
+ const unsigned char* GetCategoryGroupEnabledInternal(const char* name);
+ void AddMetadataEventsWhileLocked();
+
+ InternalTraceOptions trace_options() const {
+ return static_cast<InternalTraceOptions>(
+ subtle::NoBarrier_Load(&trace_options_));
+ }
+
+ TraceBuffer* trace_buffer() const { return logged_events_.get(); }
+ TraceBuffer* CreateTraceBuffer();
+
+ std::string EventToConsoleMessage(unsigned char phase,
+ const TraceTicks& timestamp,
+ TraceEvent* trace_event);
+
+ TraceEvent* AddEventToThreadSharedChunkWhileLocked(TraceEventHandle* handle,
+ bool check_buffer_is_full);
+ void CheckIfBufferIsFullWhileLocked();
+ void SetDisabledWhileLocked();
+
+ TraceEvent* GetEventByHandleInternal(TraceEventHandle handle,
+ OptionalAutoLock* lock);
+
+ void FlushInternal(const OutputCallback& cb,
+ bool use_worker_thread,
+ bool discard_events);
+
+ // |generation| is used in the following callbacks to check if the callback
+ // is called for the flush of the current |logged_events_|.
+ void FlushCurrentThread(int generation, bool discard_events);
+ // Usually it runs on a different thread.
+ static void ConvertTraceEventsToTraceFormat(
+ scoped_ptr<TraceBuffer> logged_events,
+ const TraceLog::OutputCallback& flush_output_callback,
+ const ArgumentFilterPredicate& argument_filter_predicate);
+ void FinishFlush(int generation, bool discard_events);
+ void OnFlushTimeout(int generation, bool discard_events);
+
+ int generation() const {
+ return static_cast<int>(subtle::NoBarrier_Load(&generation_));
+ }
+ bool CheckGeneration(int generation) const {
+ return generation == this->generation();
+ }
+ void UseNextTraceBuffer();
+
+ TraceTicks OffsetNow() const { return OffsetTimestamp(TraceTicks::Now()); }
+ TraceTicks OffsetTimestamp(const TraceTicks& timestamp) const {
+ return timestamp - time_offset_;
+ }
+
+ // Internal representation of trace options since we store the currently used
+ // trace option as an AtomicWord.
+ static const InternalTraceOptions kInternalNone;
+ static const InternalTraceOptions kInternalRecordUntilFull;
+ static const InternalTraceOptions kInternalRecordContinuously;
+ static const InternalTraceOptions kInternalEchoToConsole;
+ static const InternalTraceOptions kInternalEnableSampling;
+ static const InternalTraceOptions kInternalRecordAsMuchAsPossible;
+ static const InternalTraceOptions kInternalEnableArgumentFilter;
+
+ // This lock protects TraceLog member accesses (except for members protected
+ // by thread_info_lock_) from arbitrary threads.
+ mutable Lock lock_;
+ // This lock protects accesses to thread_names_, thread_event_start_times_
+ // and thread_colors_.
+ Lock thread_info_lock_;
+ Mode mode_;
+ int num_traces_recorded_;
+ scoped_ptr<TraceBuffer> logged_events_;
+ subtle::AtomicWord /* EventCallback */ event_callback_;
+ bool dispatching_to_observer_list_;
+ std::vector<EnabledStateObserver*> enabled_state_observer_list_;
+
+ std::string process_name_;
+ base::hash_map<int, std::string> process_labels_;
+ int process_sort_index_;
+ base::hash_map<int, int> thread_sort_indices_;
+ base::hash_map<int, std::string> thread_names_;
+
+ // The following two maps are used only when ECHO_TO_CONSOLE.
+ base::hash_map<int, std::stack<TraceTicks>> thread_event_start_times_;
+ base::hash_map<std::string, int> thread_colors_;
+
+ TraceTicks buffer_limit_reached_timestamp_;
+
+ // XORed with TraceID to make it unlikely to collide with other processes.
+ unsigned long long process_id_hash_;
+
+ int process_id_;
+
+ TimeDelta time_offset_;
+
+ // Allow tests to wake up when certain events occur.
+ WatchEventCallback watch_event_callback_;
+ subtle::AtomicWord /* const unsigned char* */ watch_category_;
+ std::string watch_event_name_;
+
+ subtle::AtomicWord /* Options */ trace_options_;
+
+ // Sampling thread handles.
+ scoped_ptr<TraceSamplingThread> sampling_thread_;
+ PlatformThreadHandle sampling_thread_handle_;
+
+ TraceConfig trace_config_;
+ TraceConfig event_callback_trace_config_;
+
+ ThreadLocalPointer<ThreadLocalEventBuffer> thread_local_event_buffer_;
+ ThreadLocalBoolean thread_blocks_message_loop_;
+ ThreadLocalBoolean thread_is_in_trace_event_;
+
+ // Contains the message loops of threads that have had at least one event
+ // added into the local event buffer. Not using SingleThreadTaskRunner
+ // because we need to know the life time of the message loops.
+ hash_set<MessageLoop*> thread_message_loops_;
+
+ // For events which can't be added into the thread local buffer, e.g. events
+ // from threads without a message loop.
+ scoped_ptr<TraceBufferChunk> thread_shared_chunk_;
+ size_t thread_shared_chunk_index_;
+
+ // Set when asynchronous Flush is in progress.
+ OutputCallback flush_output_callback_;
+ scoped_refptr<SingleThreadTaskRunner> flush_task_runner_;
+ ArgumentFilterPredicate argument_filter_predicate_;
+ subtle::AtomicWord generation_;
+ bool use_worker_thread_;
+
+ DISALLOW_COPY_AND_ASSIGN(TraceLog);
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_TRACE_LOG_H_
diff --git a/base/trace_event/trace_event_impl_constants.cc b/base/trace_event/trace_log_constants.cc
index b7f3b4c..cd2ff0d 100644
--- a/base/trace_event/trace_event_impl_constants.cc
+++ b/base/trace_event/trace_log_constants.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "base/trace_event/trace_event_impl.h"
+#include "base/trace_event/trace_log.h"
namespace base {
namespace trace_event {
diff --git a/base/trace_event/trace_sampling_thread.cc b/base/trace_event/trace_sampling_thread.cc
new file mode 100644
index 0000000..32ce7bd
--- /dev/null
+++ b/base/trace_event/trace_sampling_thread.cc
@@ -0,0 +1,101 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_sampling_thread.h"
+#include "base/trace_event/trace_event_impl.h"
+#include "base/trace_event/trace_log.h"
+
+namespace base {
+namespace trace_event {
+
+class TraceBucketData {
+ public:
+ TraceBucketData(base::subtle::AtomicWord* bucket,
+ const char* name,
+ TraceSampleCallback callback);
+ ~TraceBucketData();
+
+ TRACE_EVENT_API_ATOMIC_WORD* bucket;
+ const char* bucket_name;
+ TraceSampleCallback callback;
+};
+
+TraceSamplingThread::TraceSamplingThread()
+ : thread_running_(false), waitable_event_for_testing_(false, false) {}
+
+TraceSamplingThread::~TraceSamplingThread() {}
+
+void TraceSamplingThread::ThreadMain() {
+ PlatformThread::SetName("Sampling Thread");
+ thread_running_ = true;
+ const int kSamplingFrequencyMicroseconds = 1000;
+ while (!cancellation_flag_.IsSet()) {
+ PlatformThread::Sleep(
+ TimeDelta::FromMicroseconds(kSamplingFrequencyMicroseconds));
+ GetSamples();
+ waitable_event_for_testing_.Signal();
+ }
+}
+
+// static
+void TraceSamplingThread::DefaultSamplingCallback(
+ TraceBucketData* bucket_data) {
+ TRACE_EVENT_API_ATOMIC_WORD category_and_name =
+ TRACE_EVENT_API_ATOMIC_LOAD(*bucket_data->bucket);
+ if (!category_and_name)
+ return;
+ const char* const combined =
+ reinterpret_cast<const char* const>(category_and_name);
+ const char* category_group;
+ const char* name;
+ ExtractCategoryAndName(combined, &category_group, &name);
+ TRACE_EVENT_API_ADD_TRACE_EVENT(
+ TRACE_EVENT_PHASE_SAMPLE,
+ TraceLog::GetCategoryGroupEnabled(category_group), name, 0, 0, NULL, NULL,
+ NULL, NULL, 0);
+}
+
+void TraceSamplingThread::GetSamples() {
+ for (size_t i = 0; i < sample_buckets_.size(); ++i) {
+ TraceBucketData* bucket_data = &sample_buckets_[i];
+ bucket_data->callback.Run(bucket_data);
+ }
+}
+
+void TraceSamplingThread::RegisterSampleBucket(
+ TRACE_EVENT_API_ATOMIC_WORD* bucket,
+ const char* const name,
+ TraceSampleCallback callback) {
+ // Access to sample_buckets_ doesn't cause races with the sampling thread
+ // that uses the sample_buckets_, because it is guaranteed that
+ // RegisterSampleBucket is called before the sampling thread is created.
+ DCHECK(!thread_running_);
+ sample_buckets_.push_back(TraceBucketData(bucket, name, callback));
+}
+
+// static
+void TraceSamplingThread::ExtractCategoryAndName(const char* combined,
+ const char** category,
+ const char** name) {
+ *category = combined;
+ *name = &combined[strlen(combined) + 1];
+}
+
+void TraceSamplingThread::Stop() {
+ cancellation_flag_.Set();
+}
+
+void TraceSamplingThread::WaitSamplingEventForTesting() {
+ waitable_event_for_testing_.Wait();
+}
+
+TraceBucketData::TraceBucketData(base::subtle::AtomicWord* bucket,
+ const char* name,
+ TraceSampleCallback callback)
+ : bucket(bucket), bucket_name(name), callback(callback) {}
+
+TraceBucketData::~TraceBucketData() {}
+
+} // namespace trace_event
+} // namespace base
diff --git a/base/trace_event/trace_sampling_thread.h b/base/trace_event/trace_sampling_thread.h
new file mode 100644
index 0000000..f976a80
--- /dev/null
+++ b/base/trace_event/trace_sampling_thread.h
@@ -0,0 +1,54 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_SAMPLING_THREAD_H_
+#define BASE_TRACE_EVENT_TRACE_SAMPLING_THREAD_H_
+
+#include "base/synchronization/cancellation_flag.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/trace_event/trace_event.h"
+
+namespace base {
+namespace trace_event {
+
+class TraceBucketData;
+typedef base::Callback<void(TraceBucketData*)> TraceSampleCallback;
+
+// This object must be created on the IO thread.
+class TraceSamplingThread : public PlatformThread::Delegate {
+ public:
+ TraceSamplingThread();
+ ~TraceSamplingThread() override;
+
+ // Implementation of PlatformThread::Delegate:
+ void ThreadMain() override;
+
+ static void DefaultSamplingCallback(TraceBucketData* bucket_data);
+
+ void Stop();
+ void WaitSamplingEventForTesting();
+
+ private:
+ friend class TraceLog;
+
+ void GetSamples();
+ // Not thread-safe. Once the ThreadMain has been called, this can no longer
+ // be called.
+ void RegisterSampleBucket(TRACE_EVENT_API_ATOMIC_WORD* bucket,
+ const char* const name,
+ TraceSampleCallback callback);
+ // Splits a combined "category\0name" into the two component parts.
+ static void ExtractCategoryAndName(const char* combined,
+ const char** category,
+ const char** name);
+ std::vector<TraceBucketData> sample_buckets_;
+ bool thread_running_;
+ CancellationFlag cancellation_flag_;
+ WaitableEvent waitable_event_for_testing_;
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_TRACE_SAMPLING_THREAD_H_