summaryrefslogtreecommitdiffstats
path: root/base
diff options
context:
space:
mode:
authorruuda <ruuda@google.com>2015-08-24 06:25:22 -0700
committerCommit bot <commit-bot@chromium.org>2015-08-24 13:26:18 +0000
commit535f0876943ebd615b8a736cc96ecbae155a7919 (patch)
treee73352652b0b18f590bff411668866701062d915 /base
parent7b4c1f468e3198cf127527ddbc40e1802238328a (diff)
downloadchromium_src-535f0876943ebd615b8a736cc96ecbae155a7919.zip
chromium_src-535f0876943ebd615b8a736cc96ecbae155a7919.tar.gz
chromium_src-535f0876943ebd615b8a736cc96ecbae155a7919.tar.bz2
Allow unregistering MemoryDumpProviders during dump
Previously, if a memory dump provider would unregister itself while a dump was in progress, this would abort the dump. While the scenario is unlikely, it was triggered in some tests (https://crbug.com/522009). Instead of unregistering immediately, we now set a flag and skip the provider during dump if it has been unregistered. At the end of every dump, we remove all unregistered providers. When the registration includes a task runner, the refptr can keep the task runner alive until the next dump even when the provider has unregistered itself. BUG=518823,522009,522165 Review URL: https://codereview.chromium.org/1289793007 Cr-Commit-Position: refs/heads/master@{#345057}
Diffstat (limited to 'base')
-rw-r--r--base/trace_event/memory_dump_manager.cc44
-rw-r--r--base/trace_event/memory_dump_manager.h11
-rw-r--r--base/trace_event/memory_dump_manager_unittest.cc106
3 files changed, 133 insertions, 28 deletions
diff --git a/base/trace_event/memory_dump_manager.cc b/base/trace_event/memory_dump_manager.cc
index bafd121..f6e0239 100644
--- a/base/trace_event/memory_dump_manager.cc
+++ b/base/trace_event/memory_dump_manager.cc
@@ -93,8 +93,7 @@ void MemoryDumpManager::SetInstanceForTesting(MemoryDumpManager* instance) {
}
MemoryDumpManager::MemoryDumpManager()
- : did_unregister_dump_provider_(false),
- delegate_(nullptr),
+ : delegate_(nullptr),
memory_tracing_enabled_(0),
tracing_process_id_(kInvalidTracingProcessId),
system_allocator_pool_name_(nullptr),
@@ -152,7 +151,16 @@ void MemoryDumpManager::RegisterDumpProvider(
const scoped_refptr<SingleThreadTaskRunner>& task_runner) {
MemoryDumpProviderInfo mdp_info(mdp, task_runner);
AutoLock lock(lock_);
- dump_providers_.insert(mdp_info);
+ auto iter_new = dump_providers_.insert(mdp_info);
+
+ // If there was a previous entry, replace it with the new one. This is to deal
+ // with the case where a dump provider unregisters itself and then re-
+ // registers before a memory dump happens, so its entry was still in the
+ // collection but flagged |unregistered|.
+ if (!iter_new.second) {
+ dump_providers_.erase(iter_new.first);
+ dump_providers_.insert(mdp_info);
+ }
}
void MemoryDumpManager::RegisterDumpProvider(MemoryDumpProvider* mdp) {
@@ -183,8 +191,7 @@ void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) {
<< "The MemoryDumpProvider attempted to unregister itself in a racy way. "
<< "Please file a crbug.";
- dump_providers_.erase(mdp_iter);
- did_unregister_dump_provider_ = true;
+ mdp_iter->unregistered = true;
}
void MemoryDumpManager::RequestGlobalDump(MemoryDumpType dump_type,
@@ -228,7 +235,6 @@ void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args,
scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state;
{
AutoLock lock(lock_);
- did_unregister_dump_provider_ = false;
pmd_async_state.reset(new ProcessMemoryDumpAsyncState(
args, dump_providers_.begin(), session_state_, callback));
}
@@ -268,18 +274,10 @@ void MemoryDumpManager::ContinueAsyncProcessDump(
bool skip_dump = false;
{
AutoLock lock(lock_);
- // In the unlikely event that a dump provider was unregistered while
- // dumping, abort the dump, as that would make |next_dump_provider| invalid.
- // Registration, on the other hand, is safe as per std::set<> contract.
- if (did_unregister_dump_provider_) {
- return AbortDumpLocked(pmd_async_state->callback,
- pmd_async_state->task_runner,
- pmd_async_state->req_args.dump_guid);
- }
- auto* mdp_info = &*pmd_async_state->next_dump_provider;
+ auto mdp_info = pmd_async_state->next_dump_provider;
mdp = mdp_info->dump_provider;
- if (mdp_info->disabled) {
+ if (mdp_info->disabled || mdp_info->unregistered) {
skip_dump = true;
} else if (mdp_info->task_runner &&
!mdp_info->task_runner->BelongsToCurrentThread()) {
@@ -318,12 +316,7 @@ void MemoryDumpManager::ContinueAsyncProcessDump(
{
AutoLock lock(lock_);
- if (did_unregister_dump_provider_) {
- return AbortDumpLocked(pmd_async_state->callback,
- pmd_async_state->task_runner,
- pmd_async_state->req_args.dump_guid);
- }
- auto* mdp_info = &*pmd_async_state->next_dump_provider;
+ auto mdp_info = pmd_async_state->next_dump_provider;
if (dump_successful) {
mdp_info->consecutive_failures = 0;
} else if (!skip_dump) {
@@ -334,6 +327,9 @@ void MemoryDumpManager::ContinueAsyncProcessDump(
}
++pmd_async_state->next_dump_provider;
finalize = pmd_async_state->next_dump_provider == dump_providers_.end();
+
+ if (mdp_info->unregistered)
+ dump_providers_.erase(mdp_info);
}
if (!skip_dump && !dump_successful) {
@@ -454,8 +450,8 @@ MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo(
: dump_provider(dump_provider),
task_runner(task_runner),
consecutive_failures(0),
- disabled(false) {
-}
+ disabled(false),
+ unregistered(false) {}
MemoryDumpManager::MemoryDumpProviderInfo::~MemoryDumpProviderInfo() {
}
diff --git a/base/trace_event/memory_dump_manager.h b/base/trace_event/memory_dump_manager.h
index 5c75e4e..fc32270 100644
--- a/base/trace_event/memory_dump_manager.h
+++ b/base/trace_event/memory_dump_manager.h
@@ -103,6 +103,8 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
friend class MemoryDumpManagerDelegate;
friend class MemoryDumpManagerTest;
FRIEND_TEST_ALL_PREFIXES(MemoryDumpManagerTest, DisableFailingDumpers);
+ FRIEND_TEST_ALL_PREFIXES(MemoryDumpManagerTest,
+ UnregisterDumperFromThreadWhileDumping);
// Descriptor struct used to hold information about registered MDPs. It is
// deliberately copyable, in order to allow it to be used as std::set value.
@@ -123,6 +125,11 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
// as can be safely changed without impacting the order within the set.
mutable int consecutive_failures;
mutable bool disabled;
+
+ // When a dump provider unregisters, it is flagged as |unregistered| and it
+ // is removed only upon the next memory dump. This is to avoid altering the
+ // |dump_providers_| collection while a dump is in progress.
+ mutable bool unregistered;
};
using MemoryDumpProviderInfoSet = std::set<MemoryDumpProviderInfo>;
@@ -189,10 +196,6 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
// affinity (MDPs belonging to the same thread are adjacent).
MemoryDumpProviderInfoSet dump_providers_;
- // Flag used to signal that some provider was removed from |dump_providers_|
- // and therefore the current memory dump (if any) should be aborted.
- bool did_unregister_dump_provider_;
-
// Shared among all the PMDs to keep state scoped to the tracing session.
scoped_refptr<MemoryDumpSessionState> session_state_;
diff --git a/base/trace_event/memory_dump_manager_unittest.cc b/base/trace_event/memory_dump_manager_unittest.cc
index 9835415..6fb8c32 100644
--- a/base/trace_event/memory_dump_manager_unittest.cc
+++ b/base/trace_event/memory_dump_manager_unittest.cc
@@ -8,6 +8,7 @@
#include "base/memory/scoped_vector.h"
#include "base/message_loop/message_loop.h"
#include "base/run_loop.h"
+#include "base/test/test_io_thread.h"
#include "base/thread_task_runner_handle.h"
#include "base/threading/thread.h"
#include "base/trace_event/memory_dump_provider.h"
@@ -16,6 +17,7 @@
#include "testing/gtest/include/gtest/gtest.h"
using testing::_;
+using testing::AtMost;
using testing::Between;
using testing::Invoke;
using testing::Return;
@@ -282,6 +284,55 @@ TEST_F(MemoryDumpManagerTest, MultipleDumpers) {
DisableTracing();
}
+// Verify that whether OnMemoryDump is called depends only on the current
+// registration state and not on previous registrations and dumps.
+TEST_F(MemoryDumpManagerTest, RegistrationConsistency) {
+ MockDumpProvider mdp;
+
+ mdm_->RegisterDumpProvider(&mdp);
+
+ {
+ EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(1);
+ EnableTracing(kTraceCategory);
+ mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ high_detail_args);
+ DisableTracing();
+ }
+
+ mdm_->UnregisterDumpProvider(&mdp);
+
+ {
+ EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(0);
+ EnableTracing(kTraceCategory);
+ mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ high_detail_args);
+ DisableTracing();
+ }
+
+ mdm_->RegisterDumpProvider(&mdp);
+ mdm_->UnregisterDumpProvider(&mdp);
+
+ {
+ EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(0);
+ EnableTracing(kTraceCategory);
+ mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ high_detail_args);
+ DisableTracing();
+ }
+
+ mdm_->RegisterDumpProvider(&mdp);
+ mdm_->UnregisterDumpProvider(&mdp);
+ mdm_->RegisterDumpProvider(&mdp);
+
+ {
+ EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(1);
+ EnableTracing(kTraceCategory);
+ mdm_->RequestGlobalDump(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ high_detail_args);
+ DisableTracing();
+ }
+}
+
// Checks that the MemoryDumpManager respects the thread affinity when a
// MemoryDumpProvider specifies a task_runner(). The test starts creating 8
// threads and registering a MemoryDumpProvider on each of them. At each
@@ -433,6 +484,61 @@ TEST_F(MemoryDumpManagerTest, UnregisterDumperWhileDumping) {
DisableTracing();
}
+// Verify that the dump does not abort when unregistering a provider while
+// dumping from a different thread than the dumping thread.
+TEST_F(MemoryDumpManagerTest, UnregisterDumperFromThreadWhileDumping) {
+ ScopedVector<TestIOThread> threads;
+ ScopedVector<MockDumpProvider> mdps;
+
+ for (int i = 0; i < 2; i++) {
+ threads.push_back(new TestIOThread(TestIOThread::kAutoStart));
+ mdps.push_back(new MockDumpProvider(threads.back()->task_runner()));
+ mdm_->RegisterDumpProvider(mdps.back(), threads.back()->task_runner());
+ }
+
+ int on_memory_dump_call_count = 0;
+ RunLoop run_loop;
+
+ // When OnMemoryDump is called on either of the dump providers, it will
+ // unregister the other one.
+ for (MockDumpProvider* mdp : mdps) {
+ int other_idx = (mdps.front() == mdp);
+ TestIOThread* other_thread = threads[other_idx];
+ MockDumpProvider* other_mdp = mdps[other_idx];
+ auto on_dump = [this, other_thread, other_mdp, &on_memory_dump_call_count](
+ const MemoryDumpArgs& args, ProcessMemoryDump* pmd) {
+ other_thread->PostTaskAndWait(
+ FROM_HERE, base::Bind(&MemoryDumpManager::UnregisterDumpProvider,
+ base::Unretained(&*mdm_), other_mdp));
+ on_memory_dump_call_count++;
+ return true;
+ };
+
+ // OnMemoryDump is called once for the provider that dumps first, and zero
+ // times for the other provider.
+ EXPECT_CALL(*mdp, OnMemoryDump(_, _))
+ .Times(AtMost(1))
+ .WillOnce(Invoke(on_dump));
+ }
+
+ last_callback_success_ = false;
+ MemoryDumpCallback callback =
+ Bind(&MemoryDumpManagerTest::DumpCallbackAdapter, Unretained(this),
+ MessageLoop::current()->task_runner(), run_loop.QuitClosure());
+
+ EnableTracing(kTraceCategory);
+ MemoryDumpRequestArgs request_args = {0, MemoryDumpType::EXPLICITLY_TRIGGERED,
+ high_detail_args};
+ mdm_->CreateProcessDump(request_args, callback);
+
+ run_loop.Run();
+
+ ASSERT_EQ(1, on_memory_dump_call_count);
+ ASSERT_EQ(true, last_callback_success_);
+
+ DisableTracing();
+}
+
// Ensures that a NACK callback is invoked if RequestGlobalDump is called when
// tracing is not enabled.
TEST_F(MemoryDumpManagerTest, CallbackCalledOnFailure) {