summaryrefslogtreecommitdiffstats
path: root/base/trace_event/memory_dump_manager.cc
diff options
context:
space:
mode:
Diffstat (limited to 'base/trace_event/memory_dump_manager.cc')
-rw-r--r--base/trace_event/memory_dump_manager.cc237
1 files changed, 141 insertions, 96 deletions
diff --git a/base/trace_event/memory_dump_manager.cc b/base/trace_event/memory_dump_manager.cc
index a549202..c24a659 100644
--- a/base/trace_event/memory_dump_manager.cc
+++ b/base/trace_event/memory_dump_manager.cc
@@ -185,6 +185,36 @@ void MemoryDumpManager::RegisterDumpProvider(
MemoryDumpProvider* mdp,
const char* name,
const scoped_refptr<SingleThreadTaskRunner>& task_runner,
+ MemoryDumpProvider::Options options) {
+ options.dumps_on_single_thread_task_runner = true;
+ RegisterDumpProviderInternal(mdp, name, task_runner, options);
+}
+
+void MemoryDumpManager::RegisterDumpProvider(
+ MemoryDumpProvider* mdp,
+ const char* name,
+ const scoped_refptr<SingleThreadTaskRunner>& task_runner) {
+ // Set |dumps_on_single_thread_task_runner| to true because all providers
+ // without task runner are run on dump thread.
+ MemoryDumpProvider::Options options;
+ options.dumps_on_single_thread_task_runner = true;
+ RegisterDumpProviderInternal(mdp, name, task_runner, options);
+}
+
+void MemoryDumpManager::RegisterDumpProviderWithSequencedTaskRunner(
+ MemoryDumpProvider* mdp,
+ const char* name,
+ const scoped_refptr<SequencedTaskRunner>& task_runner,
+ MemoryDumpProvider::Options options) {
+ DCHECK(task_runner);
+ options.dumps_on_single_thread_task_runner = false;
+ RegisterDumpProviderInternal(mdp, name, task_runner, options);
+}
+
+void MemoryDumpManager::RegisterDumpProviderInternal(
+ MemoryDumpProvider* mdp,
+ const char* name,
+ const scoped_refptr<SequencedTaskRunner>& task_runner,
const MemoryDumpProvider::Options& options) {
if (dumper_registrations_ignored_for_testing_)
return;
@@ -205,13 +235,6 @@ void MemoryDumpManager::RegisterDumpProvider(
mdp->OnHeapProfilingEnabled(true);
}
-void MemoryDumpManager::RegisterDumpProvider(
- MemoryDumpProvider* mdp,
- const char* name,
- const scoped_refptr<SingleThreadTaskRunner>& task_runner) {
- RegisterDumpProvider(mdp, name, task_runner, MemoryDumpProvider::Options());
-}
-
void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) {
UnregisterDumpProviderInternal(mdp, false /* delete_async */);
}
@@ -242,28 +265,29 @@ void MemoryDumpManager::UnregisterDumpProviderInternal(
if (take_mdp_ownership_and_delete_async) {
// The MDP will be deleted whenever the MDPInfo struct will, that is either:
// - At the end of this function, if no dump is in progress.
- // - In the prologue of the ContinueAsyncProcessDump().
+ // - Either in SetupNextMemoryDump() or InvokeOnMemoryDump() when MDPInfo is
+ // removed from |pending_dump_providers|.
DCHECK(!(*mdp_iter)->owned_dump_provider);
(*mdp_iter)->owned_dump_provider = std::move(owned_mdp);
} else if (subtle::NoBarrier_Load(&memory_tracing_enabled_)) {
// If you hit this DCHECK, your dump provider has a bug.
// Unregistration of a MemoryDumpProvider is safe only if:
- // - The MDP has specified a thread affinity (via task_runner()) AND
- // the unregistration happens on the same thread (so the MDP cannot
+ // - The MDP has specified a sequenced task runner affinity AND the
+ // unregistration happens on the same task runner. So that the MDP cannot
// unregister and be in the middle of a OnMemoryDump() at the same time.
- // - The MDP has NOT specified a thread affinity and its ownership is
+ // - The MDP has NOT specified a task runner affinity and its ownership is
// transferred via UnregisterAndDeleteDumpProviderSoon().
// In all the other cases, it is not possible to guarantee that the
// unregistration will not race with OnMemoryDump() calls.
DCHECK((*mdp_iter)->task_runner &&
- (*mdp_iter)->task_runner->BelongsToCurrentThread())
+ (*mdp_iter)->task_runner->RunsTasksOnCurrentThread())
<< "MemoryDumpProvider \"" << (*mdp_iter)->name << "\" attempted to "
<< "unregister itself in a racy way. Please file a crbug.";
}
// The MDPInfo instance can still be referenced by the
// |ProcessMemoryDumpAsyncState.pending_dump_providers|. For this reason
- // the MDPInfo is flagged as disabled. It will cause ContinueAsyncProcessDump
+ // the MDPInfo is flagged as disabled. It will cause InvokeOnMemoryDump()
// to just skip it, without actually invoking the |mdp|, which might be
// destroyed by the caller soon after this method returns.
(*mdp_iter)->disabled = true;
@@ -323,7 +347,7 @@ void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args,
{
AutoLock lock(lock_);
// |dump_thread_| can be nullptr is tracing was disabled before reaching
- // here. ContinueAsyncProcessDump is robust enough to tolerate it and will
+ // here. SetupNextMemoryDump() is robust enough to tolerate it and will
// NACK the dump.
pmd_async_state.reset(new ProcessMemoryDumpAsyncState(
args, dump_providers_, session_state_, callback,
@@ -334,113 +358,134 @@ void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args,
TRACE_ID_MANGLE(args.dump_guid),
TRACE_EVENT_FLAG_FLOW_OUT);
- // Start the thread hop. |dump_providers_| are kept sorted by thread, so
- // ContinueAsyncProcessDump will hop at most once per thread (w.r.t. thread
- // affinity specified by the MemoryDumpProvider(s) in RegisterDumpProvider()).
- ContinueAsyncProcessDump(pmd_async_state.release());
+ // Start the process dump. This involves task runner hops as specified by the
+ // MemoryDumpProvider(s) in RegisterDumpProvider()).
+ SetupNextMemoryDump(std::move(pmd_async_state));
}
-// At most one ContinueAsyncProcessDump() can be active at any time for a given
-// PMD, regardless of status of the |lock_|. |lock_| is used here purely to
-// ensure consistency w.r.t. (un)registrations of |dump_providers_|.
-// The linearization of dump providers' OnMemoryDump invocations is achieved by
-// means of subsequent PostTask(s).
-//
-// 1) Prologue:
-// - If this was the last hop, create a trace event, add it to the trace
-// and finalize (invoke callback).
-// - Check if we are on the right thread. If not hop and continue there.
-// - Check if the dump provider is disabled, if so skip the dump.
-// 2) Invoke the dump provider's OnMemoryDump() (unless skipped).
-// 3) Epilogue:
-// - Unregister the dump provider if it failed too many times consecutively.
-// - Pop() the MDP from the |pending_dump_providers| list, eventually
-// destroying the MDPInfo if that was unregistered in the meantime.
-void MemoryDumpManager::ContinueAsyncProcessDump(
- ProcessMemoryDumpAsyncState* owned_pmd_async_state) {
+// PostTask InvokeOnMemoryDump() to the dump provider's sequenced task runner. A
+// PostTask is always required for a generic SequencedTaskRunner to ensure that
+// no other task is running on it concurrently. SetupNextMemoryDump() and
+// InvokeOnMemoryDump() are called alternatively which linearizes the dump
+// provider's OnMemoryDump invocations.
+// At most one of either SetupNextMemoryDump() or InvokeOnMemoryDump() can be
+// active at any time for a given PMD, regardless of status of the |lock_|.
+// |lock_| is used in these functions purely to ensure consistency w.r.t.
+// (un)registrations of |dump_providers_|.
+void MemoryDumpManager::SetupNextMemoryDump(
+ scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
// Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs
// in the PostTask below don't end up registering their own dump providers
// (for discounting trace memory overhead) while holding the |lock_|.
TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported();
+ // If this was the last hop, create a trace event, add it to the trace and
+ // finalize process dump (invoke callback).
+ if (pmd_async_state->pending_dump_providers.empty())
+ return FinalizeDumpAndAddToTrace(std::move(pmd_async_state));
+
+ // Read MemoryDumpProviderInfo thread safety considerations in
+ // memory_dump_manager.h when accessing |mdpinfo| fields.
+ MemoryDumpProviderInfo* mdpinfo =
+ pmd_async_state->pending_dump_providers.back().get();
+
+ // If the dump provider did not specify a task runner affinity, dump on
+ // |dump_thread_|. Note that |dump_thread_| might have been destroyed
+ // meanwhile.
+ SequencedTaskRunner* task_runner = mdpinfo->task_runner.get();
+ if (!task_runner) {
+ DCHECK(mdpinfo->options.dumps_on_single_thread_task_runner);
+ task_runner = pmd_async_state->dump_thread_task_runner.get();
+ if (!task_runner) {
+ // If tracing was disabled before reaching CreateProcessDump() the
+ // dump_thread_ would have been already torn down. Nack current dump and
+ // continue.
+ pmd_async_state->dump_successful = false;
+ pmd_async_state->pending_dump_providers.pop_back();
+ return SetupNextMemoryDump(std::move(pmd_async_state));
+ }
+ }
+
+ if (mdpinfo->options.dumps_on_single_thread_task_runner &&
+ task_runner->RunsTasksOnCurrentThread()) {
+ // If |dumps_on_single_thread_task_runner| is true then no PostTask is
+ // required if we are on the right thread.
+ return InvokeOnMemoryDump(pmd_async_state.release());
+ }
+
+ bool did_post_task = task_runner->PostTask(
+ FROM_HERE, Bind(&MemoryDumpManager::InvokeOnMemoryDump, Unretained(this),
+ Unretained(pmd_async_state.get())));
+
+ if (did_post_task) {
+ // Ownership is tranferred to InvokeOnMemoryDump().
+ ignore_result(pmd_async_state.release());
+ return;
+ }
+
+ // PostTask usually fails only if the process or thread is shut down. So, the
+ // dump provider is disabled here. But, don't disable unbound dump providers.
+ // The utility thread is normally shutdown when disabling the trace and
+ // getting here in this case is expected.
+ if (mdpinfo->task_runner) {
+ LOG(ERROR) << "Disabling MemoryDumpProvider \"" << mdpinfo->name
+ << "\". Failed to post task on the task runner provided.";
+
+ // A locked access is required to R/W |disabled| (for the
+ // UnregisterAndDeleteDumpProviderSoon() case).
+ AutoLock lock(lock_);
+ mdpinfo->disabled = true;
+ }
+
+ // PostTask failed. Ignore the dump provider and continue.
+ pmd_async_state->pending_dump_providers.pop_back();
+ SetupNextMemoryDump(std::move(pmd_async_state));
+}
+
+// This function is called on the right task runner for current MDP. It is
+// either the task runner specified by MDP or |dump_thread_task_runner| if the
+// MDP did not specify task runner. Invokes the dump provider's OnMemoryDump()
+// (unless disabled).
+void MemoryDumpManager::InvokeOnMemoryDump(
+ ProcessMemoryDumpAsyncState* owned_pmd_async_state) {
// In theory |owned_pmd_async_state| should be a scoped_ptr. The only reason
- // why it isn't is because of the corner case logic of |did_post_task| below,
- // which needs to take back the ownership of the |pmd_async_state| when a
- // thread goes away and consequently the PostTask() fails.
+ // why it isn't is because of the corner case logic of |did_post_task|
+ // above, which needs to take back the ownership of the |pmd_async_state| when
+ // the PostTask() fails.
// Unfortunately, PostTask() destroys the scoped_ptr arguments upon failure
// to prevent accidental leaks. Using a scoped_ptr would prevent us to to
// skip the hop and move on. Hence the manual naked -> scoped ptr juggling.
auto pmd_async_state = make_scoped_ptr(owned_pmd_async_state);
owned_pmd_async_state = nullptr;
- if (pmd_async_state->pending_dump_providers.empty())
- return FinalizeDumpAndAddToTrace(std::move(pmd_async_state));
-
// Read MemoryDumpProviderInfo thread safety considerations in
// memory_dump_manager.h when accessing |mdpinfo| fields.
MemoryDumpProviderInfo* mdpinfo =
pmd_async_state->pending_dump_providers.back().get();
- // If the dump provider did not specify a thread affinity, dump on
- // |dump_thread_|. Note that |dump_thread_| might have been Stop()-ed at this
- // point (if tracing was disabled in the meanwhile). In such case the
- // PostTask() below will fail, but |task_runner| should always be non-null.
- SingleThreadTaskRunner* task_runner = mdpinfo->task_runner.get();
- if (!task_runner)
- task_runner = pmd_async_state->dump_thread_task_runner.get();
-
- bool post_task_failed = false;
- if (!task_runner) {
- // If tracing was disabled before reaching CreateProcessDump() |task_runner|
- // will be null, as the dump_thread_ would have been already torn down.
- post_task_failed = true;
- pmd_async_state->dump_successful = false;
- } else if (!task_runner->BelongsToCurrentThread()) {
- // It's time to hop onto another thread.
- post_task_failed = !task_runner->PostTask(
- FROM_HERE, Bind(&MemoryDumpManager::ContinueAsyncProcessDump,
- Unretained(this), Unretained(pmd_async_state.get())));
- if (!post_task_failed) {
- // Ownership is tranferred to the next ContinueAsyncProcessDump().
- ignore_result(pmd_async_state.release());
- return;
- }
- }
+ DCHECK(!mdpinfo->task_runner ||
+ mdpinfo->task_runner->RunsTasksOnCurrentThread());
- // At this point either:
- // - The MDP has a task runner affinity and we are on the right thread.
- // - The MDP has a task runner affinity but the underlying thread is gone,
- // hence the above |post_task_failed| == true.
- // - The MDP does NOT have a task runner affinity. A locked access is required
- // to R/W |disabled| (for the UnregisterAndDeleteDumpProviderSoon() case).
bool should_dump;
- const char* disabled_reason = nullptr;
{
+ // A locked access is required to R/W |disabled| (for the
+ // UnregisterAndDeleteDumpProviderSoon() case).
AutoLock lock(lock_);
- if (!mdpinfo->disabled) {
- if (mdpinfo->consecutive_failures >= kMaxConsecutiveFailuresCount) {
- mdpinfo->disabled = true;
- disabled_reason = "Dump failed multiple times consecutively.";
- } else if (post_task_failed && mdpinfo->task_runner) {
- // Don't disable unbound dump providers. The utility thread is normally
- // shutdown when disabling the trace and getting here in this case is
- // expected.
- mdpinfo->disabled = true;
- disabled_reason = "The thread it was meant to dump onto is gone.";
- }
+
+ // Unregister the dump provider if it failed too many times consecutively.
+ if (!mdpinfo->disabled &&
+ mdpinfo->consecutive_failures >= kMaxConsecutiveFailuresCount) {
+ mdpinfo->disabled = true;
+ LOG(ERROR) << "Disabling MemoryDumpProvider \"" << mdpinfo->name
+ << "\". Dump failed multiple times consecutively.";
}
- should_dump = !mdpinfo->disabled && !post_task_failed;
+ should_dump = !mdpinfo->disabled;
} // AutoLock lock(lock_);
- if (disabled_reason) {
- LOG(ERROR) << "Disabling MemoryDumpProvider \"" << mdpinfo->name << "\". "
- << disabled_reason;
- }
-
if (should_dump) {
// Invoke the dump provider.
TRACE_EVENT_WITH_FLOW1(kTraceCategory,
- "MemoryDumpManager::ContinueAsyncProcessDump",
+ "MemoryDumpManager::InvokeOnMemoryDump",
TRACE_ID_MANGLE(pmd_async_state->req_args.dump_guid),
TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT,
"dump_provider.name", mdpinfo->name);
@@ -455,10 +500,10 @@ void MemoryDumpManager::ContinueAsyncProcessDump(
bool dump_successful = mdpinfo->dump_provider->OnMemoryDump(args, pmd);
mdpinfo->consecutive_failures =
dump_successful ? 0 : mdpinfo->consecutive_failures + 1;
- } // if (!mdpinfo->disabled)
+ }
pmd_async_state->pending_dump_providers.pop_back();
- ContinueAsyncProcessDump(pmd_async_state.release());
+ SetupNextMemoryDump(std::move(pmd_async_state));
}
// static
@@ -612,7 +657,7 @@ void MemoryDumpManager::OnTraceLogDisabled() {
}
// Thread stops are blocking and must be performed outside of the |lock_|
- // or will deadlock (e.g., if ContinueAsyncProcessDump() tries to acquire it).
+ // or will deadlock (e.g., if SetupNextMemoryDump() tries to acquire it).
periodic_dump_timer_.Stop();
if (dump_thread)
dump_thread->Stop();
@@ -625,7 +670,7 @@ uint64_t MemoryDumpManager::GetTracingProcessId() const {
MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo(
MemoryDumpProvider* dump_provider,
const char* name,
- const scoped_refptr<SingleThreadTaskRunner>& task_runner,
+ const scoped_refptr<SequencedTaskRunner>& task_runner,
const MemoryDumpProvider::Options& options)
: dump_provider(dump_provider),
name(name),