diff options
author | caseq <caseq@chromium.org> | 2015-03-26 08:56:41 -0700 |
---|---|---|
committer | Commit bot <commit-bot@chromium.org> | 2015-03-26 15:57:16 +0000 |
commit | 61b95998e904561aa31aa095982529543f2cbac5 (patch) | |
tree | b6659f689437464ea4041dd9e24704a3e2b123de /base/trace_event/trace_event_impl.cc | |
parent | 25e7eb79c60ee43d14684ec5da6304941c169d50 (diff) | |
download | chromium_src-61b95998e904561aa31aa095982529543f2cbac5.zip chromium_src-61b95998e904561aa31aa095982529543f2cbac5.tar.gz chromium_src-61b95998e904561aa31aa095982529543f2cbac5.tar.bz2 |
Re-land issue 750183008 (Parallelize trace messages serialization)
This re-lands https://codereview.chromium.org/750183008 (originally
by loislo@) with the output chunk size reduced down from 10M to 100K
(close to what it used to be originally). The reason for telemetry
failures of original change on Windows was the bug in websocket-client
(https://github.com/liris/websocket-client/issues/163) that caused
it to fragment Python heap when receiving a large frame.
Original issue description:
Move serialization into a worker thread. As a result
IO thread will be able to send messages to the browser.
The original implementation did serialization on IO thread
and was not able to send the messages because ipc had
is_blocked_on_write_ = true and had no chance to check
the actual state of the channel. So the messages were
collected in output_queue. Also the messages could be quite
big and could block the IO thread for a long time.
BUG=463572
TBR=dsinclair
(as he already l-g-t-m-d the original patch)
Review URL: https://codereview.chromium.org/1035783002
Cr-Commit-Position: refs/heads/master@{#322391}
Diffstat (limited to 'base/trace_event/trace_event_impl.cc')
-rw-r--r-- | base/trace_event/trace_event_impl.cc | 31 |
1 files changed, 22 insertions, 9 deletions
diff --git a/base/trace_event/trace_event_impl.cc b/base/trace_event/trace_event_impl.cc index 72b46f9..834f826e 100644 --- a/base/trace_event/trace_event_impl.cc +++ b/base/trace_event/trace_event_impl.cc @@ -30,6 +30,7 @@ #include "base/third_party/dynamic_annotations/dynamic_annotations.h" #include "base/threading/platform_thread.h" #include "base/threading/thread_id_name_manager.h" +#include "base/threading/worker_pool.h" #include "base/time/time.h" #include "base/trace_event/trace_event.h" #include "base/trace_event/trace_event_synthetic_delay.h" @@ -73,7 +74,7 @@ const size_t kTraceEventVectorBigBufferChunks = 512000000 / kTraceBufferChunkSize; const size_t kTraceEventVectorBufferChunks = 256000 / kTraceBufferChunkSize; const size_t kTraceEventRingBufferChunks = kTraceEventVectorBufferChunks / 4; -const size_t kTraceEventBatchChunks = 1000 / kTraceBufferChunkSize; +const size_t kTraceEventBufferSizeInBytes = 100 * 1024; // Can store results for 30 seconds with 1 ms sampling interval. const size_t kMonitorTraceEventBufferChunks = 30000 / kTraceBufferChunkSize; // ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events. @@ -1208,7 +1209,8 @@ TraceLog::TraceLog() event_callback_category_filter_( CategoryFilter::kDefaultCategoryFilterString), thread_shared_chunk_index_(0), - generation_(0) { + generation_(0), + use_worker_thread_(false) { // Trace is enabled or disabled on one thread while other threads are // accessing the enabled flag. We don't care whether edge-case events are // traced or not, so we allow races on the enabled flag to keep the trace @@ -1681,7 +1683,9 @@ void TraceLog::SetEventCallbackDisabled() { // - The message loop will be removed from thread_message_loops_; // If this is the last message loop, finish the flush; // 4. If any thread hasn't finish its flush in time, finish the flush. -void TraceLog::Flush(const TraceLog::OutputCallback& cb) { +void TraceLog::Flush(const TraceLog::OutputCallback& cb, + bool use_worker_thread) { + use_worker_thread_ = use_worker_thread; if (IsEnabled()) { // Can't flush when tracing is enabled because otherwise PostTask would // - generate more trace events; @@ -1735,6 +1739,7 @@ void TraceLog::Flush(const TraceLog::OutputCallback& cb) { FinishFlush(generation); } +// Usually it runs on a different thread. void TraceLog::ConvertTraceEventsToTraceFormat( scoped_ptr<TraceBuffer> logged_events, const TraceLog::OutputCallback& flush_output_callback) { @@ -1749,19 +1754,17 @@ void TraceLog::ConvertTraceEventsToTraceFormat( scoped_refptr<RefCountedString> json_events_str_ptr = new RefCountedString(); - for (size_t i = 0; i < kTraceEventBatchChunks; ++i) { + while (json_events_str_ptr->size() < kTraceEventBufferSizeInBytes) { const TraceBufferChunk* chunk = logged_events->NextChunk(); - if (!chunk) { - has_more_events = false; + has_more_events = chunk != NULL; + if (!chunk) break; - } for (size_t j = 0; j < chunk->size(); ++j) { - if (i > 0 || j > 0) + if (json_events_str_ptr->size()) json_events_str_ptr->data().append(",\n"); chunk->GetEventAt(j)->AppendAsJSON(&(json_events_str_ptr->data())); } } - flush_output_callback.Run(json_events_str_ptr, has_more_events); } while (has_more_events); } @@ -1785,6 +1788,16 @@ void TraceLog::FinishFlush(int generation) { flush_output_callback_.Reset(); } + if (use_worker_thread_ && + WorkerPool::PostTask( + FROM_HERE, + Bind(&TraceLog::ConvertTraceEventsToTraceFormat, + Passed(&previous_logged_events), + flush_output_callback), + true)) { + return; + } + ConvertTraceEventsToTraceFormat(previous_logged_events.Pass(), flush_output_callback); } |