diff options
author | erikchen <erikchen@chromium.org> | 2015-03-13 15:57:37 -0700 |
---|---|---|
committer | Commit bot <commit-bot@chromium.org> | 2015-03-13 22:58:22 +0000 |
commit | 19249bcc5ac81a3dfd5e38be10d56b2ea434162c (patch) | |
tree | f09e666868bd0f64f71929783f376af72a554a90 /base/threading | |
parent | d8bdbf385ad0b4bee03095d1737bdcf51672f675 (diff) | |
download | chromium_src-19249bcc5ac81a3dfd5e38be10d56b2ea434162c.zip chromium_src-19249bcc5ac81a3dfd5e38be10d56b2ea434162c.tar.gz chromium_src-19249bcc5ac81a3dfd5e38be10d56b2ea434162c.tar.bz2 |
Base: Fix another deadlock during shutdown of SequencedWorkerPool.
This CL is very similar to https://codereview.chromium.org/956583004/. There is
yet another code path that causes SequencedWorkerPool to destroy some tasks
without releasing its global lock.
BUG=465271
Review URL: https://codereview.chromium.org/994483005
Cr-Commit-Position: refs/heads/master@{#320600}
Diffstat (limited to 'base/threading')
-rw-r--r-- | base/threading/sequenced_worker_pool.cc | 14 | ||||
-rw-r--r-- | base/threading/sequenced_worker_pool_unittest.cc | 31 |
2 files changed, 45 insertions, 0 deletions
diff --git a/base/threading/sequenced_worker_pool.cc b/base/threading/sequenced_worker_pool.cc index f20d997..52b178b 100644 --- a/base/threading/sequenced_worker_pool.cc +++ b/base/threading/sequenced_worker_pool.cc @@ -803,6 +803,20 @@ void SequencedWorkerPool::Inner::ThreadLoop(Worker* this_worker) { delete_these_outside_lock.clear(); break; } + + // No work was found, but there are tasks that need deletion. The + // deletion must happen outside of the lock. + if (delete_these_outside_lock.size()) { + AutoUnlock unlock(lock_); + delete_these_outside_lock.clear(); + + // Since the lock has been released, |status| may no longer be + // accurate. It might read GET_WORK_WAIT even if there are tasks + // ready to perform work. Jump to the top of the loop to recalculate + // |status|. + continue; + } + waiting_thread_count_++; switch (status) { diff --git a/base/threading/sequenced_worker_pool_unittest.cc b/base/threading/sequenced_worker_pool_unittest.cc index c132731..5d0880c 100644 --- a/base/threading/sequenced_worker_pool_unittest.cc +++ b/base/threading/sequenced_worker_pool_unittest.cc @@ -148,6 +148,17 @@ class TestTracker : public base::RefCountedThreadSafe<TestTracker> { FROM_HERE, reposting_task, SequencedWorkerPool::SKIP_ON_SHUTDOWN); } + // This task reposts itself back onto the SequencedWorkerPool before it + // finishes running. + void PostRepostingBlockingTask( + const scoped_refptr<SequencedWorkerPool>& pool, + const SequencedWorkerPool::SequenceToken& token) { + Closure reposting_task = + base::Bind(&TestTracker::PostRepostingBlockingTask, this, pool, token); + pool->PostSequencedWorkerTaskWithShutdownBehavior(token, + FROM_HERE, reposting_task, SequencedWorkerPool::BLOCK_SHUTDOWN); + } + // Waits until the given number of tasks have started executing. void WaitUntilTasksBlocked(size_t count) { { @@ -795,6 +806,26 @@ TEST_F(SequencedWorkerPoolTest, AvoidsDeadlockOnShutdown) { pool()->Shutdown(); } +// Similar to the test AvoidsDeadlockOnShutdown, but there are now also +// sequenced, blocking tasks in the queue during shutdown. +TEST_F(SequencedWorkerPoolTest, + AvoidsDeadlockOnShutdownWithSequencedBlockingTasks) { + const std::string sequence_token_name("name"); + for (int i = 0; i < 4; ++i) { + scoped_refptr<DestructionDeadlockChecker> checker( + new DestructionDeadlockChecker(pool())); + tracker()->PostRepostingTask(pool(), checker); + + SequencedWorkerPool::SequenceToken token1 = + pool()->GetNamedSequenceToken(sequence_token_name); + tracker()->PostRepostingBlockingTask(pool(), token1); + } + + // Shutting down the pool should destroy the DestructionDeadlockCheckers, + // which in turn should not deadlock in their destructors. + pool()->Shutdown(); +} + // Verify that FlushForTesting works as intended. TEST_F(SequencedWorkerPoolTest, FlushForTesting) { // Should be fine to call on a new instance. |