summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--base/trace_event/memory_dump_manager.cc237
-rw-r--r--base/trace_event/memory_dump_manager.h63
-rw-r--r--base/trace_event/memory_dump_manager_unittest.cc117
-rw-r--r--base/trace_event/memory_dump_provider.h12
-rw-r--r--components/tracing/process_metrics_memory_dump_provider.cc3
5 files changed, 308 insertions, 124 deletions
diff --git a/base/trace_event/memory_dump_manager.cc b/base/trace_event/memory_dump_manager.cc
index a549202..c24a659 100644
--- a/base/trace_event/memory_dump_manager.cc
+++ b/base/trace_event/memory_dump_manager.cc
@@ -185,6 +185,36 @@ void MemoryDumpManager::RegisterDumpProvider(
MemoryDumpProvider* mdp,
const char* name,
const scoped_refptr<SingleThreadTaskRunner>& task_runner,
+ MemoryDumpProvider::Options options) {
+ options.dumps_on_single_thread_task_runner = true;
+ RegisterDumpProviderInternal(mdp, name, task_runner, options);
+}
+
+void MemoryDumpManager::RegisterDumpProvider(
+ MemoryDumpProvider* mdp,
+ const char* name,
+ const scoped_refptr<SingleThreadTaskRunner>& task_runner) {
+ // Set |dumps_on_single_thread_task_runner| to true because all providers
+ // without task runner are run on dump thread.
+ MemoryDumpProvider::Options options;
+ options.dumps_on_single_thread_task_runner = true;
+ RegisterDumpProviderInternal(mdp, name, task_runner, options);
+}
+
+void MemoryDumpManager::RegisterDumpProviderWithSequencedTaskRunner(
+ MemoryDumpProvider* mdp,
+ const char* name,
+ const scoped_refptr<SequencedTaskRunner>& task_runner,
+ MemoryDumpProvider::Options options) {
+ DCHECK(task_runner);
+ options.dumps_on_single_thread_task_runner = false;
+ RegisterDumpProviderInternal(mdp, name, task_runner, options);
+}
+
+void MemoryDumpManager::RegisterDumpProviderInternal(
+ MemoryDumpProvider* mdp,
+ const char* name,
+ const scoped_refptr<SequencedTaskRunner>& task_runner,
const MemoryDumpProvider::Options& options) {
if (dumper_registrations_ignored_for_testing_)
return;
@@ -205,13 +235,6 @@ void MemoryDumpManager::RegisterDumpProvider(
mdp->OnHeapProfilingEnabled(true);
}
-void MemoryDumpManager::RegisterDumpProvider(
- MemoryDumpProvider* mdp,
- const char* name,
- const scoped_refptr<SingleThreadTaskRunner>& task_runner) {
- RegisterDumpProvider(mdp, name, task_runner, MemoryDumpProvider::Options());
-}
-
void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) {
UnregisterDumpProviderInternal(mdp, false /* delete_async */);
}
@@ -242,28 +265,29 @@ void MemoryDumpManager::UnregisterDumpProviderInternal(
if (take_mdp_ownership_and_delete_async) {
// The MDP will be deleted whenever the MDPInfo struct will, that is either:
// - At the end of this function, if no dump is in progress.
- // - In the prologue of the ContinueAsyncProcessDump().
+ // - Either in SetupNextMemoryDump() or InvokeOnMemoryDump() when MDPInfo is
+ // removed from |pending_dump_providers|.
DCHECK(!(*mdp_iter)->owned_dump_provider);
(*mdp_iter)->owned_dump_provider = std::move(owned_mdp);
} else if (subtle::NoBarrier_Load(&memory_tracing_enabled_)) {
// If you hit this DCHECK, your dump provider has a bug.
// Unregistration of a MemoryDumpProvider is safe only if:
- // - The MDP has specified a thread affinity (via task_runner()) AND
- // the unregistration happens on the same thread (so the MDP cannot
+ // - The MDP has specified a sequenced task runner affinity AND the
+ // unregistration happens on the same task runner. So that the MDP cannot
// unregister and be in the middle of a OnMemoryDump() at the same time.
- // - The MDP has NOT specified a thread affinity and its ownership is
+ // - The MDP has NOT specified a task runner affinity and its ownership is
// transferred via UnregisterAndDeleteDumpProviderSoon().
// In all the other cases, it is not possible to guarantee that the
// unregistration will not race with OnMemoryDump() calls.
DCHECK((*mdp_iter)->task_runner &&
- (*mdp_iter)->task_runner->BelongsToCurrentThread())
+ (*mdp_iter)->task_runner->RunsTasksOnCurrentThread())
<< "MemoryDumpProvider \"" << (*mdp_iter)->name << "\" attempted to "
<< "unregister itself in a racy way. Please file a crbug.";
}
// The MDPInfo instance can still be referenced by the
// |ProcessMemoryDumpAsyncState.pending_dump_providers|. For this reason
- // the MDPInfo is flagged as disabled. It will cause ContinueAsyncProcessDump
+ // the MDPInfo is flagged as disabled. It will cause InvokeOnMemoryDump()
// to just skip it, without actually invoking the |mdp|, which might be
// destroyed by the caller soon after this method returns.
(*mdp_iter)->disabled = true;
@@ -323,7 +347,7 @@ void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args,
{
AutoLock lock(lock_);
// |dump_thread_| can be nullptr is tracing was disabled before reaching
- // here. ContinueAsyncProcessDump is robust enough to tolerate it and will
+ // here. SetupNextMemoryDump() is robust enough to tolerate it and will
// NACK the dump.
pmd_async_state.reset(new ProcessMemoryDumpAsyncState(
args, dump_providers_, session_state_, callback,
@@ -334,113 +358,134 @@ void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args,
TRACE_ID_MANGLE(args.dump_guid),
TRACE_EVENT_FLAG_FLOW_OUT);
- // Start the thread hop. |dump_providers_| are kept sorted by thread, so
- // ContinueAsyncProcessDump will hop at most once per thread (w.r.t. thread
- // affinity specified by the MemoryDumpProvider(s) in RegisterDumpProvider()).
- ContinueAsyncProcessDump(pmd_async_state.release());
+ // Start the process dump. This involves task runner hops as specified by the
+ // MemoryDumpProvider(s) in RegisterDumpProvider()).
+ SetupNextMemoryDump(std::move(pmd_async_state));
}
-// At most one ContinueAsyncProcessDump() can be active at any time for a given
-// PMD, regardless of status of the |lock_|. |lock_| is used here purely to
-// ensure consistency w.r.t. (un)registrations of |dump_providers_|.
-// The linearization of dump providers' OnMemoryDump invocations is achieved by
-// means of subsequent PostTask(s).
-//
-// 1) Prologue:
-// - If this was the last hop, create a trace event, add it to the trace
-// and finalize (invoke callback).
-// - Check if we are on the right thread. If not hop and continue there.
-// - Check if the dump provider is disabled, if so skip the dump.
-// 2) Invoke the dump provider's OnMemoryDump() (unless skipped).
-// 3) Epilogue:
-// - Unregister the dump provider if it failed too many times consecutively.
-// - Pop() the MDP from the |pending_dump_providers| list, eventually
-// destroying the MDPInfo if that was unregistered in the meantime.
-void MemoryDumpManager::ContinueAsyncProcessDump(
- ProcessMemoryDumpAsyncState* owned_pmd_async_state) {
+// PostTask InvokeOnMemoryDump() to the dump provider's sequenced task runner. A
+// PostTask is always required for a generic SequencedTaskRunner to ensure that
+// no other task is running on it concurrently. SetupNextMemoryDump() and
+// InvokeOnMemoryDump() are called alternatively which linearizes the dump
+// provider's OnMemoryDump invocations.
+// At most one of either SetupNextMemoryDump() or InvokeOnMemoryDump() can be
+// active at any time for a given PMD, regardless of status of the |lock_|.
+// |lock_| is used in these functions purely to ensure consistency w.r.t.
+// (un)registrations of |dump_providers_|.
+void MemoryDumpManager::SetupNextMemoryDump(
+ scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
// Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs
// in the PostTask below don't end up registering their own dump providers
// (for discounting trace memory overhead) while holding the |lock_|.
TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported();
+ // If this was the last hop, create a trace event, add it to the trace and
+ // finalize process dump (invoke callback).
+ if (pmd_async_state->pending_dump_providers.empty())
+ return FinalizeDumpAndAddToTrace(std::move(pmd_async_state));
+
+ // Read MemoryDumpProviderInfo thread safety considerations in
+ // memory_dump_manager.h when accessing |mdpinfo| fields.
+ MemoryDumpProviderInfo* mdpinfo =
+ pmd_async_state->pending_dump_providers.back().get();
+
+ // If the dump provider did not specify a task runner affinity, dump on
+ // |dump_thread_|. Note that |dump_thread_| might have been destroyed
+ // meanwhile.
+ SequencedTaskRunner* task_runner = mdpinfo->task_runner.get();
+ if (!task_runner) {
+ DCHECK(mdpinfo->options.dumps_on_single_thread_task_runner);
+ task_runner = pmd_async_state->dump_thread_task_runner.get();
+ if (!task_runner) {
+ // If tracing was disabled before reaching CreateProcessDump() the
+ // dump_thread_ would have been already torn down. Nack current dump and
+ // continue.
+ pmd_async_state->dump_successful = false;
+ pmd_async_state->pending_dump_providers.pop_back();
+ return SetupNextMemoryDump(std::move(pmd_async_state));
+ }
+ }
+
+ if (mdpinfo->options.dumps_on_single_thread_task_runner &&
+ task_runner->RunsTasksOnCurrentThread()) {
+ // If |dumps_on_single_thread_task_runner| is true then no PostTask is
+ // required if we are on the right thread.
+ return InvokeOnMemoryDump(pmd_async_state.release());
+ }
+
+ bool did_post_task = task_runner->PostTask(
+ FROM_HERE, Bind(&MemoryDumpManager::InvokeOnMemoryDump, Unretained(this),
+ Unretained(pmd_async_state.get())));
+
+ if (did_post_task) {
+ // Ownership is tranferred to InvokeOnMemoryDump().
+ ignore_result(pmd_async_state.release());
+ return;
+ }
+
+ // PostTask usually fails only if the process or thread is shut down. So, the
+ // dump provider is disabled here. But, don't disable unbound dump providers.
+ // The utility thread is normally shutdown when disabling the trace and
+ // getting here in this case is expected.
+ if (mdpinfo->task_runner) {
+ LOG(ERROR) << "Disabling MemoryDumpProvider \"" << mdpinfo->name
+ << "\". Failed to post task on the task runner provided.";
+
+ // A locked access is required to R/W |disabled| (for the
+ // UnregisterAndDeleteDumpProviderSoon() case).
+ AutoLock lock(lock_);
+ mdpinfo->disabled = true;
+ }
+
+ // PostTask failed. Ignore the dump provider and continue.
+ pmd_async_state->pending_dump_providers.pop_back();
+ SetupNextMemoryDump(std::move(pmd_async_state));
+}
+
+// This function is called on the right task runner for current MDP. It is
+// either the task runner specified by MDP or |dump_thread_task_runner| if the
+// MDP did not specify task runner. Invokes the dump provider's OnMemoryDump()
+// (unless disabled).
+void MemoryDumpManager::InvokeOnMemoryDump(
+ ProcessMemoryDumpAsyncState* owned_pmd_async_state) {
// In theory |owned_pmd_async_state| should be a scoped_ptr. The only reason
- // why it isn't is because of the corner case logic of |did_post_task| below,
- // which needs to take back the ownership of the |pmd_async_state| when a
- // thread goes away and consequently the PostTask() fails.
+ // why it isn't is because of the corner case logic of |did_post_task|
+ // above, which needs to take back the ownership of the |pmd_async_state| when
+ // the PostTask() fails.
// Unfortunately, PostTask() destroys the scoped_ptr arguments upon failure
// to prevent accidental leaks. Using a scoped_ptr would prevent us to to
// skip the hop and move on. Hence the manual naked -> scoped ptr juggling.
auto pmd_async_state = make_scoped_ptr(owned_pmd_async_state);
owned_pmd_async_state = nullptr;
- if (pmd_async_state->pending_dump_providers.empty())
- return FinalizeDumpAndAddToTrace(std::move(pmd_async_state));
-
// Read MemoryDumpProviderInfo thread safety considerations in
// memory_dump_manager.h when accessing |mdpinfo| fields.
MemoryDumpProviderInfo* mdpinfo =
pmd_async_state->pending_dump_providers.back().get();
- // If the dump provider did not specify a thread affinity, dump on
- // |dump_thread_|. Note that |dump_thread_| might have been Stop()-ed at this
- // point (if tracing was disabled in the meanwhile). In such case the
- // PostTask() below will fail, but |task_runner| should always be non-null.
- SingleThreadTaskRunner* task_runner = mdpinfo->task_runner.get();
- if (!task_runner)
- task_runner = pmd_async_state->dump_thread_task_runner.get();
-
- bool post_task_failed = false;
- if (!task_runner) {
- // If tracing was disabled before reaching CreateProcessDump() |task_runner|
- // will be null, as the dump_thread_ would have been already torn down.
- post_task_failed = true;
- pmd_async_state->dump_successful = false;
- } else if (!task_runner->BelongsToCurrentThread()) {
- // It's time to hop onto another thread.
- post_task_failed = !task_runner->PostTask(
- FROM_HERE, Bind(&MemoryDumpManager::ContinueAsyncProcessDump,
- Unretained(this), Unretained(pmd_async_state.get())));
- if (!post_task_failed) {
- // Ownership is tranferred to the next ContinueAsyncProcessDump().
- ignore_result(pmd_async_state.release());
- return;
- }
- }
+ DCHECK(!mdpinfo->task_runner ||
+ mdpinfo->task_runner->RunsTasksOnCurrentThread());
- // At this point either:
- // - The MDP has a task runner affinity and we are on the right thread.
- // - The MDP has a task runner affinity but the underlying thread is gone,
- // hence the above |post_task_failed| == true.
- // - The MDP does NOT have a task runner affinity. A locked access is required
- // to R/W |disabled| (for the UnregisterAndDeleteDumpProviderSoon() case).
bool should_dump;
- const char* disabled_reason = nullptr;
{
+ // A locked access is required to R/W |disabled| (for the
+ // UnregisterAndDeleteDumpProviderSoon() case).
AutoLock lock(lock_);
- if (!mdpinfo->disabled) {
- if (mdpinfo->consecutive_failures >= kMaxConsecutiveFailuresCount) {
- mdpinfo->disabled = true;
- disabled_reason = "Dump failed multiple times consecutively.";
- } else if (post_task_failed && mdpinfo->task_runner) {
- // Don't disable unbound dump providers. The utility thread is normally
- // shutdown when disabling the trace and getting here in this case is
- // expected.
- mdpinfo->disabled = true;
- disabled_reason = "The thread it was meant to dump onto is gone.";
- }
+
+ // Unregister the dump provider if it failed too many times consecutively.
+ if (!mdpinfo->disabled &&
+ mdpinfo->consecutive_failures >= kMaxConsecutiveFailuresCount) {
+ mdpinfo->disabled = true;
+ LOG(ERROR) << "Disabling MemoryDumpProvider \"" << mdpinfo->name
+ << "\". Dump failed multiple times consecutively.";
}
- should_dump = !mdpinfo->disabled && !post_task_failed;
+ should_dump = !mdpinfo->disabled;
} // AutoLock lock(lock_);
- if (disabled_reason) {
- LOG(ERROR) << "Disabling MemoryDumpProvider \"" << mdpinfo->name << "\". "
- << disabled_reason;
- }
-
if (should_dump) {
// Invoke the dump provider.
TRACE_EVENT_WITH_FLOW1(kTraceCategory,
- "MemoryDumpManager::ContinueAsyncProcessDump",
+ "MemoryDumpManager::InvokeOnMemoryDump",
TRACE_ID_MANGLE(pmd_async_state->req_args.dump_guid),
TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT,
"dump_provider.name", mdpinfo->name);
@@ -455,10 +500,10 @@ void MemoryDumpManager::ContinueAsyncProcessDump(
bool dump_successful = mdpinfo->dump_provider->OnMemoryDump(args, pmd);
mdpinfo->consecutive_failures =
dump_successful ? 0 : mdpinfo->consecutive_failures + 1;
- } // if (!mdpinfo->disabled)
+ }
pmd_async_state->pending_dump_providers.pop_back();
- ContinueAsyncProcessDump(pmd_async_state.release());
+ SetupNextMemoryDump(std::move(pmd_async_state));
}
// static
@@ -612,7 +657,7 @@ void MemoryDumpManager::OnTraceLogDisabled() {
}
// Thread stops are blocking and must be performed outside of the |lock_|
- // or will deadlock (e.g., if ContinueAsyncProcessDump() tries to acquire it).
+ // or will deadlock (e.g., if SetupNextMemoryDump() tries to acquire it).
periodic_dump_timer_.Stop();
if (dump_thread)
dump_thread->Stop();
@@ -625,7 +670,7 @@ uint64_t MemoryDumpManager::GetTracingProcessId() const {
MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo(
MemoryDumpProvider* dump_provider,
const char* name,
- const scoped_refptr<SingleThreadTaskRunner>& task_runner,
+ const scoped_refptr<SequencedTaskRunner>& task_runner,
const MemoryDumpProvider::Options& options)
: dump_provider(dump_provider),
name(name),
diff --git a/base/trace_event/memory_dump_manager.h b/base/trace_event/memory_dump_manager.h
index b713079..e9b09f8 100644
--- a/base/trace_event/memory_dump_manager.h
+++ b/base/trace_event/memory_dump_manager.h
@@ -69,9 +69,9 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
// - name: a friendly name (duplicates allowed). Used for debugging and
// run-time profiling of memory-infra internals. Must be a long-lived
// C string.
- // - task_runner: if non-null, all the calls to |mdp| will be
- // issued on the given thread. Otherwise, |mdp| should be able to
- // handle calls on arbitrary threads.
+ // - task_runner: either a SingleThreadTaskRunner or SequencedTaskRunner. All
+ // the calls to |mdp| will be run on the given |task_runner|. If passed
+ // null |mdp| should be able to handle calls on arbitrary threads.
// - options: extra optional arguments. See memory_dump_provider.h.
void RegisterDumpProvider(
MemoryDumpProvider* mdp,
@@ -81,7 +81,12 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
MemoryDumpProvider* mdp,
const char* name,
const scoped_refptr<SingleThreadTaskRunner>& task_runner,
- const MemoryDumpProvider::Options& options);
+ MemoryDumpProvider::Options options);
+ void RegisterDumpProviderWithSequencedTaskRunner(
+ MemoryDumpProvider* mdp,
+ const char* name,
+ const scoped_refptr<SequencedTaskRunner>& task_runner,
+ MemoryDumpProvider::Options options);
void UnregisterDumpProvider(MemoryDumpProvider* mdp);
// Unregisters an unbound dump provider and takes care about its deletion
@@ -153,14 +158,15 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
// inside ProcessMemoryDumpAsyncState is removed.
// - In most cases, the MDPInfo is destroyed within UnregisterDumpProvider().
// - If UnregisterDumpProvider() is called while a dump is in progress, the
- // MDPInfo is destroyed in the epilogue of ContinueAsyncProcessDump(), when
- // the copy inside ProcessMemoryDumpAsyncState is erase()-d.
+ // MDPInfo is destroyed in SetupNextMemoryDump() or InvokeOnMemoryDump(),
+ // when the copy inside ProcessMemoryDumpAsyncState is erase()-d.
// - The non-const fields of MemoryDumpProviderInfo are safe to access only
- // in the |task_runner| thread, unless the thread has been destroyed.
+ // on tasks running in the |task_runner|, unless the thread has been
+ // destroyed.
struct MemoryDumpProviderInfo
: public RefCountedThreadSafe<MemoryDumpProviderInfo> {
- // Define a total order based on the thread (i.e. |task_runner|) affinity,
- // so that all MDP belonging to the same thread are adjacent in the set.
+ // Define a total order based on the |task_runner| affinity, so that MDPs
+ // belonging to the same SequencedTaskRunner are adjacent in the set.
struct Comparator {
bool operator()(const scoped_refptr<MemoryDumpProviderInfo>& a,
const scoped_refptr<MemoryDumpProviderInfo>& b) const;
@@ -171,7 +177,7 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
MemoryDumpProviderInfo(
MemoryDumpProvider* dump_provider,
const char* name,
- const scoped_refptr<SingleThreadTaskRunner>& task_runner,
+ const scoped_refptr<SequencedTaskRunner>& task_runner,
const MemoryDumpProvider::Options& options);
MemoryDumpProvider* const dump_provider;
@@ -183,9 +189,9 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
// Human readable name, for debugging and testing. Not necessarily unique.
const char* const name;
- // The task_runner affinity. Can be nullptr, in which case the dump provider
+ // The task runner affinity. Can be nullptr, in which case the dump provider
// will be invoked on |dump_thread_|.
- const scoped_refptr<SingleThreadTaskRunner> task_runner;
+ const scoped_refptr<SequencedTaskRunner> task_runner;
// The |options| arg passed to RegisterDumpProvider().
const MemoryDumpProvider::Options options;
@@ -204,8 +210,9 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
};
// Holds the state of a process memory dump that needs to be carried over
- // across threads in order to fulfil an asynchronous CreateProcessDump()
- // request. At any time exactly one thread owns a ProcessMemoryDumpAsyncState.
+ // across task runners in order to fulfil an asynchronous CreateProcessDump()
+ // request. At any time exactly one task runner owns a
+ // ProcessMemoryDumpAsyncState.
struct ProcessMemoryDumpAsyncState {
ProcessMemoryDumpAsyncState(
MemoryDumpRequestArgs req_args,
@@ -277,17 +284,30 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
void CreateProcessDump(const MemoryDumpRequestArgs& args,
const MemoryDumpCallback& callback);
- // Continues the ProcessMemoryDump started by CreateProcessDump(), hopping
- // across threads as needed as specified by MDPs in RegisterDumpProvider().
- void ContinueAsyncProcessDump(
- ProcessMemoryDumpAsyncState* owned_pmd_async_state);
+ // Calls InvokeOnMemoryDump() for the next MDP on the task runner specified by
+ // the MDP while registration. On failure to do so, skips and continues to
+ // next MDP.
+ void SetupNextMemoryDump(
+ scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state);
+
+ // Invokes OnMemoryDump() of the next MDP and calls SetupNextMemoryDump() at
+ // the end to continue the ProcessMemoryDump. Should be called on the MDP task
+ // runner.
+ void InvokeOnMemoryDump(ProcessMemoryDumpAsyncState* owned_pmd_async_state);
+
+ // Helper for RegierDumpProvider* functions.
+ void RegisterDumpProviderInternal(
+ MemoryDumpProvider* mdp,
+ const char* name,
+ const scoped_refptr<SequencedTaskRunner>& task_runner,
+ const MemoryDumpProvider::Options& options);
// Helper for the public UnregisterDumpProvider* functions.
void UnregisterDumpProviderInternal(MemoryDumpProvider* mdp,
bool take_mdp_ownership_and_delete_async);
- // An ordererd set of registered MemoryDumpProviderInfo(s), sorted by thread
- // affinity (MDPs belonging to the same thread are adjacent).
+ // An ordererd set of registered MemoryDumpProviderInfo(s), sorted by task
+ // runner affinity (MDPs belonging to the same task runners are adjacent).
MemoryDumpProviderInfo::OrderedSet dump_providers_;
// Shared among all the PMDs to keep state scoped to the tracing session.
@@ -309,7 +329,8 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
// For time-triggered periodic dumps.
RepeatingTimer periodic_dump_timer_;
- // Thread used for MemoryDumpProviders which don't specify a thread affinity.
+ // Thread used for MemoryDumpProviders which don't specify a task runner
+ // affinity.
scoped_ptr<Thread> dump_thread_;
// The unique id of the child process. This is created only for tracing and is
diff --git a/base/trace_event/memory_dump_manager_unittest.cc b/base/trace_event/memory_dump_manager_unittest.cc
index 89a6e3d..138ba69 100644
--- a/base/trace_event/memory_dump_manager_unittest.cc
+++ b/base/trace_event/memory_dump_manager_unittest.cc
@@ -19,6 +19,7 @@
#include "base/test/trace_event_analyzer.h"
#include "base/thread_task_runner_handle.h"
#include "base/threading/platform_thread.h"
+#include "base/threading/sequenced_worker_pool.h"
#include "base/threading/thread.h"
#include "base/trace_event/memory_dump_provider.h"
#include "base/trace_event/process_memory_dump.h"
@@ -50,18 +51,44 @@ namespace {
void RegisterDumpProvider(
MemoryDumpProvider* mdp,
- const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
- const MemoryDumpProvider::Options& options) {
+ const scoped_refptr<base::SequencedTaskRunner>& task_runner,
+ const MemoryDumpProvider::Options& options,
+ bool dumps_on_single_thread_task_runner) {
MemoryDumpManager* mdm = MemoryDumpManager::GetInstance();
mdm->set_dumper_registrations_ignored_for_testing(false);
- mdm->RegisterDumpProvider(mdp, "TestDumpProvider", task_runner, options);
+ const char* kMDPName = "TestDumpProvider";
+ if (dumps_on_single_thread_task_runner) {
+ scoped_refptr<base::SingleThreadTaskRunner> single_thread_task_runner =
+ static_cast<base::SingleThreadTaskRunner*>(task_runner.get());
+ mdm->RegisterDumpProvider(mdp, kMDPName,
+ std::move(single_thread_task_runner), options);
+ } else {
+ mdm->RegisterDumpProviderWithSequencedTaskRunner(mdp, kMDPName, task_runner,
+ options);
+ }
mdm->set_dumper_registrations_ignored_for_testing(true);
}
+void RegisterDumpProvider(
+ MemoryDumpProvider* mdp,
+ const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+ const MemoryDumpProvider::Options& options) {
+ RegisterDumpProvider(mdp, task_runner, options,
+ true /* dumps_on_single_thread_task_runner */);
+}
+
void RegisterDumpProvider(MemoryDumpProvider* mdp) {
RegisterDumpProvider(mdp, nullptr, MemoryDumpProvider::Options());
}
+void RegisterDumpProviderWithSequencedTaskRunner(
+ MemoryDumpProvider* mdp,
+ const scoped_refptr<base::SequencedTaskRunner>& task_runner,
+ const MemoryDumpProvider::Options& options) {
+ RegisterDumpProvider(mdp, task_runner, options,
+ false /* dumps_on_single_thread_task_runner */);
+}
+
void OnTraceDataCollected(Closure quit_closure,
trace_event::TraceResultBuffer* buffer,
const scoped_refptr<RefCountedString>& json,
@@ -111,6 +138,46 @@ class MockMemoryDumpProvider : public MemoryDumpProvider {
bool enable_mock_destructor;
};
+class TestSequencedTaskRunner : public SequencedTaskRunner {
+ public:
+ TestSequencedTaskRunner()
+ : worker_pool_(
+ new SequencedWorkerPool(2 /* max_threads */, "Test Task Runner")),
+ enabled_(true),
+ num_of_post_tasks_(0) {}
+
+ void set_enabled(bool value) { enabled_ = value; }
+ unsigned no_of_post_tasks() const { return num_of_post_tasks_; }
+
+ bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
+ const Closure& task,
+ TimeDelta delay) override {
+ NOTREACHED();
+ return false;
+ }
+
+ bool PostDelayedTask(const tracked_objects::Location& from_here,
+ const Closure& task,
+ TimeDelta delay) override {
+ num_of_post_tasks_++;
+ if (enabled_)
+ return worker_pool_->PostSequencedWorkerTask(token_, from_here, task);
+ return false;
+ }
+
+ bool RunsTasksOnCurrentThread() const override {
+ return worker_pool_->IsRunningSequenceOnCurrentThread(token_);
+ }
+
+ private:
+ ~TestSequencedTaskRunner() override {}
+
+ scoped_refptr<SequencedWorkerPool> worker_pool_;
+ const SequencedWorkerPool::SequenceToken token_;
+ bool enabled_;
+ unsigned num_of_post_tasks_;
+};
+
class MemoryDumpManagerTest : public testing::Test {
public:
MemoryDumpManagerTest() : testing::Test(), kDefaultOptions() {}
@@ -442,6 +509,50 @@ TEST_F(MemoryDumpManagerTest, RespectTaskRunnerAffinity) {
DisableTracing();
}
+// Check that the memory dump calls are always posted on task runner for
+// SequencedTaskRunner case and that the dump provider gets disabled when
+// PostTask fails, but the dump still succeeds.
+TEST_F(MemoryDumpManagerTest, PostTaskForSequencedTaskRunner) {
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ std::vector<MockMemoryDumpProvider> mdps(3);
+ scoped_refptr<TestSequencedTaskRunner> task_runner1(
+ make_scoped_refptr(new TestSequencedTaskRunner()));
+ scoped_refptr<TestSequencedTaskRunner> task_runner2(
+ make_scoped_refptr(new TestSequencedTaskRunner()));
+ RegisterDumpProviderWithSequencedTaskRunner(&mdps[0], task_runner1,
+ kDefaultOptions);
+ RegisterDumpProviderWithSequencedTaskRunner(&mdps[1], task_runner2,
+ kDefaultOptions);
+ RegisterDumpProviderWithSequencedTaskRunner(&mdps[2], task_runner2,
+ kDefaultOptions);
+ // |mdps[0]| should be disabled permanently after first dump.
+ EXPECT_CALL(mdps[0], OnMemoryDump(_, _)).Times(0);
+ EXPECT_CALL(mdps[1], OnMemoryDump(_, _)).Times(2);
+ EXPECT_CALL(mdps[2], OnMemoryDump(_, _)).Times(2);
+ EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(2);
+
+ EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+
+ task_runner1->set_enabled(false);
+ last_callback_success_ = false;
+ RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
+ // Tasks should be individually posted even if |mdps[1]| and |mdps[2]| belong
+ // to same task runner.
+ EXPECT_EQ(1u, task_runner1->no_of_post_tasks());
+ EXPECT_EQ(2u, task_runner2->no_of_post_tasks());
+ EXPECT_TRUE(last_callback_success_);
+
+ task_runner1->set_enabled(true);
+ last_callback_success_ = false;
+ RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED);
+ EXPECT_EQ(2u, task_runner1->no_of_post_tasks());
+ EXPECT_EQ(4u, task_runner2->no_of_post_tasks());
+ EXPECT_TRUE(last_callback_success_);
+ DisableTracing();
+}
+
// Checks that providers get disabled after 3 consecutive failures, but not
// otherwise (e.g., if interleaved).
TEST_F(MemoryDumpManagerTest, DisableFailingDumpers) {
diff --git a/base/trace_event/memory_dump_provider.h b/base/trace_event/memory_dump_provider.h
index f03fcbd..79ab793 100644
--- a/base/trace_event/memory_dump_provider.h
+++ b/base/trace_event/memory_dump_provider.h
@@ -26,14 +26,20 @@ class BASE_EXPORT MemoryDumpProvider {
public:
// Optional arguments for MemoryDumpManager::RegisterDumpProvider().
struct Options {
- Options() : target_pid(kNullProcessId) {}
- explicit Options(ProcessId target_pid) : target_pid(target_pid) {}
+ Options()
+ : target_pid(kNullProcessId),
+ dumps_on_single_thread_task_runner(false) {}
// If the dump provider generates dumps on behalf of another process,
- // |target_process| contains the pid of that process.
+ // |target_pid| contains the pid of that process.
// The default value is kNullProcessId, which means that the dump provider
// generates dumps for the current process.
ProcessId target_pid;
+
+ // |dumps_on_single_thread_task_runner| is true if the dump provider runs on
+ // a SingleThreadTaskRunner, which is usually the case. It is faster to run
+ // all providers that run on the same thread together without thread hops.
+ bool dumps_on_single_thread_task_runner;
};
virtual ~MemoryDumpProvider() {}
diff --git a/components/tracing/process_metrics_memory_dump_provider.cc b/components/tracing/process_metrics_memory_dump_provider.cc
index 141764b..e299a79 100644
--- a/components/tracing/process_metrics_memory_dump_provider.cc
+++ b/components/tracing/process_metrics_memory_dump_provider.cc
@@ -207,7 +207,8 @@ void ProcessMetricsMemoryDumpProvider::RegisterForProcess(
base::ProcessId process) {
scoped_ptr<ProcessMetricsMemoryDumpProvider> metrics_provider(
new ProcessMetricsMemoryDumpProvider(process));
- base::trace_event::MemoryDumpProvider::Options options(process);
+ base::trace_event::MemoryDumpProvider::Options options;
+ options.target_pid = process;
base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
metrics_provider.get(), "ProcessMemoryMetrics", nullptr, options);
bool did_insert =