summaryrefslogtreecommitdiffstats
path: root/cc
diff options
context:
space:
mode:
authorreveman@chromium.org <reveman@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-03-09 14:26:58 +0000
committerreveman@chromium.org <reveman@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2013-03-09 14:26:58 +0000
commit8f8bd29f1b5049ef141b409f2ddccdd940e2cadc (patch)
treea61b546c7d0379c6d963c7f4b61c966d34192a35 /cc
parent521475b3fa3e5333a022854a70844a4a46c84d5a (diff)
downloadchromium_src-8f8bd29f1b5049ef141b409f2ddccdd940e2cadc.zip
chromium_src-8f8bd29f1b5049ef141b409f2ddccdd940e2cadc.tar.gz
chromium_src-8f8bd29f1b5049ef141b409f2ddccdd940e2cadc.tar.bz2
cc: Implement raster worker thread pool using shared queue.
This is an implementation of the worker pool using as shared queue instead of using message loops. It improves efficiency and load balancing between threads significantly. This changes the behavior of cheap tasks a bit. Cheap tasks are now shared between compositor thread and worker threads. Compositor thread will run cheap tasks until time limit is reached. Worker threads can always run cheap tasks. The time limit decision for cheap tasks is moved to the consumer where this decision can be properly made. BUG=178239 Review URL: https://chromiumcodereview.appspot.com/12398002 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@187145 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'cc')
-rw-r--r--cc/raster_worker_pool.cc28
-rw-r--r--cc/rendering_stats.cc23
-rw-r--r--cc/rendering_stats.h7
-rw-r--r--cc/scoped_ptr_deque.h7
-rw-r--r--cc/tile_manager.cc31
-rw-r--r--cc/tile_manager.h3
-rw-r--r--cc/worker_pool.cc700
-rw-r--r--cc/worker_pool.h142
8 files changed, 576 insertions, 365 deletions
diff --git a/cc/raster_worker_pool.cc b/cc/raster_worker_pool.cc
index e0210e6..316b962 100644
--- a/cc/raster_worker_pool.cc
+++ b/cc/raster_worker_pool.cc
@@ -13,33 +13,44 @@ namespace {
class RasterWorkerPoolTaskImpl : public internal::WorkerPoolTask {
public:
RasterWorkerPoolTaskImpl(PicturePileImpl* picture_pile,
+ bool is_cheap,
const RasterWorkerPool::RasterCallback& task,
const base::Closure& reply)
: internal::WorkerPoolTask(reply),
picture_pile_(picture_pile),
+ is_cheap_(is_cheap),
task_(task) {
DCHECK(picture_pile_);
}
+ virtual bool IsCheap() OVERRIDE { return is_cheap_; }
+
virtual void WillRunOnThread(unsigned thread_index) OVERRIDE {
picture_pile_ = picture_pile_->GetCloneForDrawingOnThread(thread_index);
}
virtual void Run(RenderingStats* rendering_stats) OVERRIDE {
task_.Run(picture_pile_.get(), rendering_stats);
- base::subtle::Release_Store(&completed_, 1);
}
private:
scoped_refptr<PicturePileImpl> picture_pile_;
+ bool is_cheap_;
RasterWorkerPool::RasterCallback task_;
};
+const char* kWorkerThreadNamePrefix = "CompositorRaster";
+
+const int kCheckForCompletedTasksDelayMs = 6;
+
} // namespace
RasterWorkerPool::RasterWorkerPool(
- WorkerPoolClient* client, size_t num_threads)
- : WorkerPool(client, num_threads) {
+ WorkerPoolClient* client, size_t num_threads) : WorkerPool(
+ client,
+ num_threads,
+ base::TimeDelta::FromMilliseconds(kCheckForCompletedTasksDelayMs),
+ kWorkerThreadNamePrefix) {
}
RasterWorkerPool::~RasterWorkerPool() {
@@ -49,12 +60,11 @@ void RasterWorkerPool::PostRasterTaskAndReply(PicturePileImpl* picture_pile,
bool is_cheap,
const RasterCallback& task,
const base::Closure& reply) {
- PostTask(
- make_scoped_ptr(new RasterWorkerPoolTaskImpl(
- picture_pile,
- task,
- reply)).PassAs<internal::WorkerPoolTask>(),
- is_cheap);
+ PostTask(make_scoped_ptr(new RasterWorkerPoolTaskImpl(
+ picture_pile,
+ is_cheap,
+ task,
+ reply)).PassAs<internal::WorkerPoolTask>());
}
} // namespace cc
diff --git a/cc/rendering_stats.cc b/cc/rendering_stats.cc
index 2295217..e62f0cf 100644
--- a/cc/rendering_stats.cc
+++ b/cc/rendering_stats.cc
@@ -52,4 +52,27 @@ void RenderingStats::EnumerateFields(Enumerator* enumerator) const {
totalImageGatheringTime.InSecondsF());
}
+void RenderingStats::Add(const RenderingStats& other) {
+ numAnimationFrames += other.numAnimationFrames;
+ numFramesSentToScreen += other.numFramesSentToScreen;
+ droppedFrameCount += other.droppedFrameCount;
+ totalPaintTime += other.totalPaintTime;
+ totalRasterizeTime += other.totalRasterizeTime;
+ totalRasterizeTimeForNowBinsOnPendingTree +=
+ other.totalRasterizeTimeForNowBinsOnPendingTree;
+ totalCommitTime += other.totalCommitTime;
+ totalCommitCount += other.totalCommitCount;
+ totalPixelsPainted += other.totalPixelsPainted;
+ totalPixelsRasterized += other.totalPixelsRasterized;
+ numImplThreadScrolls += other.numImplThreadScrolls;
+ numMainThreadScrolls += other.numMainThreadScrolls;
+ numLayersDrawn += other.numLayersDrawn;
+ numMissingTiles += other.numMissingTiles;
+ totalDeferredImageDecodeCount += other.totalDeferredImageDecodeCount;
+ totalDeferredImageCacheHitCount += other.totalDeferredImageCacheHitCount;
+ totalImageGatheringCount += other.totalImageGatheringCount;
+ totalDeferredImageDecodeTime += other.totalDeferredImageDecodeTime;
+ totalImageGatheringTime += other.totalImageGatheringTime;
+}
+
} // namespace cc
diff --git a/cc/rendering_stats.h b/cc/rendering_stats.h
index 193c985..f0f76929 100644
--- a/cc/rendering_stats.h
+++ b/cc/rendering_stats.h
@@ -32,8 +32,8 @@ struct CC_EXPORT RenderingStats {
int64 totalImageGatheringCount;
base::TimeDelta totalDeferredImageDecodeTime;
base::TimeDelta totalImageGatheringTime;
- // Note: when adding new members, please remember to update enumerateFields
- // in rendering_stats.cc.
+ // Note: when adding new members, please remember to update EnumerateFields
+ // and Add in rendering_stats.cc.
RenderingStats();
@@ -55,6 +55,9 @@ struct CC_EXPORT RenderingStats {
// Outputs the fields in this structure to the provided enumerator.
void EnumerateFields(Enumerator* enumerator) const;
+
+ // Add fields of |other| to the fields in this structure.
+ void Add(const RenderingStats& other);
};
} // namespace cc
diff --git a/cc/scoped_ptr_deque.h b/cc/scoped_ptr_deque.h
index f5eddea..52aa40d 100644
--- a/cc/scoped_ptr_deque.h
+++ b/cc/scoped_ptr_deque.h
@@ -97,6 +97,13 @@ class ScopedPtrDeque {
data_.insert(position, item.release());
}
+ scoped_ptr<T> take(iterator position) {
+ DCHECK(position <= end());
+ scoped_ptr<T> ret(*position);
+ data_.erase(position);
+ return ret.Pass();
+ }
+
void swap(iterator a, iterator b) {
DCHECK(a < end());
DCHECK(b < end());
diff --git a/cc/tile_manager.cc b/cc/tile_manager.cc
index 963dd9a..e4daf8a 100644
--- a/cc/tile_manager.cc
+++ b/cc/tile_manager.cc
@@ -39,6 +39,16 @@ const int kMaxPendingUploadBytes = 100 * 1024 * 1024;
const int kMaxPendingUploads = 1000;
#endif
+#if defined(OS_ANDROID)
+const int kMaxNumPendingTasksPerThread = 8;
+#else
+const int kMaxNumPendingTasksPerThread = 40;
+#endif
+
+// Limit for time spent running cheap tasks during a single frame.
+// TODO(skyostil): Determine this limit more dynamically.
+const int kRunCheapTasksTimeMs = 6;
+
// Determine bin based on three categories of tiles: things we need now,
// things we need soon, and eventually.
inline TileManagerBin BinFromTilePriority(const TilePriority& prio) {
@@ -198,7 +208,9 @@ TileManager::TileManager(
use_color_estimator_(use_color_estimator),
allow_cheap_tasks_(true),
did_schedule_cheap_tasks_(false),
- prediction_benchmarking_(prediction_benchmarking) {
+ prediction_benchmarking_(prediction_benchmarking),
+ pending_tasks_(0),
+ max_pending_tasks_(kMaxNumPendingTasksPerThread * num_raster_threads) {
for (int i = 0; i < NUM_STATES; ++i) {
for (int j = 0; j < NUM_TREES; ++j) {
for (int k = 0; k < NUM_BINS; ++k)
@@ -667,7 +679,7 @@ void TileManager::FreeResourcesForTile(Tile* tile) {
}
bool TileManager::CanDispatchRasterTask(Tile* tile) const {
- if (raster_worker_pool_->IsBusy())
+ if (pending_tasks_ >= max_pending_tasks_)
return false;
size_t new_bytes_pending = bytes_pending_upload_;
new_bytes_pending += tile->bytes_consumed_if_allocated();
@@ -778,7 +790,7 @@ void TileManager::DispatchImageDecodeTasksForTile(Tile* tile) {
rendering_stats_.totalDeferredImageCacheHitCount++;
pending_pixel_refs.erase(it++);
} else {
- if (raster_worker_pool_->IsBusy())
+ if (pending_tasks_ >= max_pending_tasks_)
return;
DispatchOneImageDecodeTask(tile, *it);
++it;
@@ -800,12 +812,14 @@ void TileManager::DispatchOneImageDecodeTask(
base::Unretained(this),
tile,
pixel_ref_id));
+ pending_tasks_++;
}
void TileManager::OnImageDecodeTaskCompleted(
scoped_refptr<Tile> tile, uint32_t pixel_ref_id) {
TRACE_EVENT0("cc", "TileManager::OnImageDecodeTaskCompleted");
pending_decode_tasks_.erase(pixel_ref_id);
+ pending_tasks_--;
for (TileList::iterator it = tiles_with_image_decoding_tasks_.begin();
it != tiles_with_image_decoding_tasks_.end(); ++it) {
@@ -859,7 +873,14 @@ void TileManager::DispatchOneRasterTask(scoped_refptr<Tile> tile) {
tile,
base::Passed(&resource),
manage_tiles_call_count_));
- did_schedule_cheap_tasks_ |= (allow_cheap_tasks_ && is_cheap_to_raster);
+ if ((allow_cheap_tasks_ && is_cheap_to_raster) &&
+ !did_schedule_cheap_tasks_) {
+ raster_worker_pool_->SetRunCheapTasksTimeLimit(
+ base::TimeTicks::Now() +
+ base::TimeDelta::FromMilliseconds(kRunCheapTasksTimeMs));
+ did_schedule_cheap_tasks_ = true;
+ }
+ pending_tasks_++;
}
TileManager::RasterTaskMetadata TileManager::GetRasterTaskMetadata(
@@ -880,6 +901,8 @@ void TileManager::OnRasterTaskCompleted(
int manage_tiles_call_count_when_dispatched) {
TRACE_EVENT0("cc", "TileManager::OnRasterTaskCompleted");
+ pending_tasks_--;
+
// Release raster resources.
resource_pool_->resource_provider()->UnmapPixelBuffer(resource->id());
diff --git a/cc/tile_manager.h b/cc/tile_manager.h
index 095f9a8..dd7e90b 100644
--- a/cc/tile_manager.h
+++ b/cc/tile_manager.h
@@ -249,6 +249,9 @@ class CC_EXPORT TileManager : public WorkerPoolClient {
int raster_state_count_[NUM_STATES][NUM_TREES][NUM_BINS];
bool prediction_benchmarking_;
+ size_t pending_tasks_;
+ size_t max_pending_tasks_;
+
DISALLOW_COPY_AND_ASSIGN(TileManager);
};
diff --git a/cc/worker_pool.cc b/cc/worker_pool.cc
index 88746e2..6624bf3 100644
--- a/cc/worker_pool.cc
+++ b/cc/worker_pool.cc
@@ -4,12 +4,12 @@
#include "cc/worker_pool.h"
-#include <algorithm>
-
#include "base/bind.h"
#include "base/debug/trace_event.h"
-#include "base/stl_util.h"
#include "base/stringprintf.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/threading/simple_thread.h"
+#include "cc/rendering_stats.h"
#if defined(OS_ANDROID)
// TODO(epenner): Move thread priorities to base. (crbug.com/170549)
@@ -27,379 +27,581 @@ class WorkerPoolTaskImpl : public internal::WorkerPoolTask {
: internal::WorkerPoolTask(reply),
task_(task) {}
+ virtual bool IsCheap() OVERRIDE { return false; }
+
virtual void WillRunOnThread(unsigned thread_index) OVERRIDE {}
virtual void Run(RenderingStats* rendering_stats) OVERRIDE {
task_.Run(rendering_stats);
- base::subtle::Release_Store(&completed_, 1);
}
private:
WorkerPool::Callback task_;
};
-const char* kWorkerThreadNamePrefix = "Compositor";
-
-#if defined(OS_ANDROID)
-const int kNumPendingTasksPerWorker = 8;
-#else
-const int kNumPendingTasksPerWorker = 40;
-#endif
-
-const int kCheckForCompletedTasksDelayMs = 6;
-
-// Limits for the total number of cheap tasks we are allowed to perform
-// during a single frame and the time spent running those tasks.
-// TODO(skyostil): Determine these limits more dynamically.
-const int kMaxCheapTaskCount = 6;
-const int kMaxCheapTaskMs = kCheckForCompletedTasksDelayMs;
-
} // namespace
namespace internal {
WorkerPoolTask::WorkerPoolTask(const base::Closure& reply) : reply_(reply) {
- base::subtle::Acquire_Store(&completed_, 0);
}
WorkerPoolTask::~WorkerPoolTask() {
}
-bool WorkerPoolTask::HasCompleted() {
- return base::subtle::Acquire_Load(&completed_) == 1;
-}
-
void WorkerPoolTask::DidComplete() {
- DCHECK_EQ(base::subtle::Acquire_Load(&completed_), 1);
reply_.Run();
}
} // namespace internal
-WorkerPool::Worker::Worker(
- WorkerPool* worker_pool, const std::string name, unsigned index)
- : base::Thread(name.c_str()),
- worker_pool_(worker_pool),
- rendering_stats_(make_scoped_ptr(new RenderingStats)),
- record_rendering_stats_(false),
- index_(index) {
- Start();
- DCHECK(IsRunning());
+// Internal to the worker pool. Any data or logic that needs to be
+// shared between threads lives in this class. All members are guarded
+// by |lock_|.
+class WorkerPool::Inner : public base::DelegateSimpleThread::Delegate {
+ public:
+ Inner(WorkerPool* worker_pool,
+ size_t num_threads,
+ const std::string& thread_name_prefix,
+ bool need_on_task_completed_callback);
+ ~Inner();
+
+ void Shutdown();
+
+ void SetRecordRenderingStats(bool record_rendering_stats);
+
+ void GetRenderingStats(RenderingStats* stats);
+
+ void PostTask(scoped_ptr<internal::WorkerPoolTask> task);
+
+ // Appends all completed tasks to worker pool's completed tasks queue
+ // and returns true if idle.
+ bool CollectCompletedTasks();
+
+ // Runs cheap tasks on caller thread until |time_limit| is reached
+ // and returns true if idle.
+ bool RunCheapTasksUntilTimeLimit(base::TimeTicks time_limit);
+
+ private:
+ // Appends all completed tasks to |completed_tasks|. Lock must
+ // already be acquired before calling this function.
+ bool AppendCompletedTasksWithLockAcquired(
+ ScopedPtrDeque<internal::WorkerPoolTask>* completed_tasks);
+
+ // Schedule a OnTaskCompletedOnOriginThread callback if not already
+ // pending. Lock must already be acquired before calling this function.
+ void ScheduleOnTaskCompletedWithLockAcquired();
+ void OnTaskCompletedOnOriginThread();
+
+ // Schedule an OnIdleOnOriginThread callback if not already pending.
+ // Lock must already be acquired before calling this function.
+ void ScheduleOnIdleWithLockAcquired();
+ void OnIdleOnOriginThread();
+
+ // Overridden from base::DelegateSimpleThread:
+ virtual void Run() OVERRIDE;
+
+ // Pointer to worker pool. Can only be used on origin thread.
+ // Not guarded by |lock_|.
+ WorkerPool* worker_pool_on_origin_thread_;
+
+ // This lock protects all members of this class except
+ // |worker_pool_on_origin_thread_|. Do not read or modify anything
+ // without holding this lock. Do not block while holding this lock.
+ mutable base::Lock lock_;
+
+ // Condition variable that is waited on by worker threads until new
+ // tasks are posted or shutdown starts.
+ base::ConditionVariable has_pending_tasks_cv_;
+
+ // Target message loop used for posting callbacks.
+ scoped_refptr<base::MessageLoopProxy> origin_loop_;
+
+ base::WeakPtrFactory<Inner> weak_ptr_factory_;
+
+ // Set to true when worker pool requires a callback for each
+ // completed task.
+ bool need_on_task_completed_callback_;
+
+ const base::Closure on_task_completed_callback_;
+ // Set when a OnTaskCompletedOnOriginThread() callback is pending.
+ bool on_task_completed_pending_;
+
+ const base::Closure on_idle_callback_;
+ // Set when a OnIdleOnOriginThread() callback is pending.
+ bool on_idle_pending_;
+
+ // Provides each running thread loop with a unique index. First thread
+ // loop index is 0.
+ unsigned next_thread_index_;
+
+ // Number of tasks currently running.
+ unsigned running_task_count_;
+
+ // Set during shutdown. Tells workers to exit when no more tasks
+ // are pending.
+ bool shutdown_;
+
+ typedef ScopedPtrDeque<internal::WorkerPoolTask> TaskDeque;
+ TaskDeque pending_tasks_;
+ TaskDeque completed_tasks_;
+
+ scoped_ptr<RenderingStats> rendering_stats_;
+
+ ScopedPtrDeque<base::DelegateSimpleThread> workers_;
+
+ DISALLOW_COPY_AND_ASSIGN(Inner);
+};
+
+WorkerPool::Inner::Inner(WorkerPool* worker_pool,
+ size_t num_threads,
+ const std::string& thread_name_prefix,
+ bool need_on_task_completed_callback)
+ : worker_pool_on_origin_thread_(worker_pool),
+ lock_(),
+ has_pending_tasks_cv_(&lock_),
+ origin_loop_(base::MessageLoopProxy::current()),
+ weak_ptr_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)),
+ need_on_task_completed_callback_(need_on_task_completed_callback),
+ on_task_completed_callback_(
+ base::Bind(&WorkerPool::Inner::OnTaskCompletedOnOriginThread,
+ weak_ptr_factory_.GetWeakPtr())),
+ on_task_completed_pending_(false),
+ on_idle_callback_(base::Bind(&WorkerPool::Inner::OnIdleOnOriginThread,
+ weak_ptr_factory_.GetWeakPtr())),
+ on_idle_pending_(false),
+ next_thread_index_(0),
+ running_task_count_(0),
+ shutdown_(false) {
+ base::AutoLock lock(lock_);
+
+ while (workers_.size() < num_threads) {
+ scoped_ptr<base::DelegateSimpleThread> worker = make_scoped_ptr(
+ new base::DelegateSimpleThread(
+ this,
+ thread_name_prefix +
+ StringPrintf("Worker%lu", workers_.size() + 1).c_str()));
+ worker->Start();
+ workers_.push_back(worker.Pass());
+ }
}
-WorkerPool::Worker::~Worker() {
- DCHECK(!IsRunning());
+WorkerPool::Inner::~Inner() {
+ base::AutoLock lock(lock_);
+
+ DCHECK(shutdown_);
+
+ // Cancel all pending callbacks.
+ weak_ptr_factory_.InvalidateWeakPtrs();
+
DCHECK_EQ(pending_tasks_.size(), 0);
+ DCHECK_EQ(completed_tasks_.size(), 0);
+ DCHECK_EQ(running_task_count_, 0);
}
-void WorkerPool::Worker::StopAfterCompletingAllPendingTasks() {
- // Signals the thread to exit and returns once all pending tasks have run.
- Stop();
+void WorkerPool::Inner::Shutdown() {
+ {
+ base::AutoLock lock(lock_);
+
+ DCHECK(!shutdown_);
+ shutdown_ = true;
+
+ // Wake up a worker so it knows it should exit. This will cause all workers
+ // to exit as each will wake up another worker before exiting.
+ has_pending_tasks_cv_.Signal();
+ }
- // Complete all pending tasks. The Stop() call above guarantees that
- // all tasks have finished running.
- while (!pending_tasks_.empty())
- OnTaskCompleted();
+ while (workers_.size()) {
+ scoped_ptr<base::DelegateSimpleThread> worker = workers_.take_front();
+ worker->Join();
+ }
}
-void WorkerPool::Worker::PostTask(scoped_ptr<internal::WorkerPoolTask> task) {
- RenderingStats* stats =
- record_rendering_stats_ ? rendering_stats_.get() : NULL;
+void WorkerPool::Inner::SetRecordRenderingStats(bool record_rendering_stats) {
+ base::AutoLock lock(lock_);
- task->WillRunOnThread(index_);
+ if (record_rendering_stats)
+ rendering_stats_.reset(new RenderingStats);
+ else
+ rendering_stats_.reset();
+}
- message_loop_proxy()->PostTask(
- FROM_HERE,
- base::Bind(&Worker::RunTask,
- base::Unretained(task.get()),
- base::Unretained(worker_pool_),
- base::Unretained(stats)));
+void WorkerPool::Inner::GetRenderingStats(RenderingStats* stats) {
+ base::AutoLock lock(lock_);
+
+ if (rendering_stats_)
+ stats->Add(*rendering_stats_);
+}
+
+void WorkerPool::Inner::PostTask(scoped_ptr<internal::WorkerPoolTask> task) {
+ base::AutoLock lock(lock_);
pending_tasks_.push_back(task.Pass());
+
+ // There is more work available, so wake up worker thread.
+ has_pending_tasks_cv_.Signal();
+}
+
+bool WorkerPool::Inner::CollectCompletedTasks() {
+ base::AutoLock lock(lock_);
+
+ return AppendCompletedTasksWithLockAcquired(
+ &worker_pool_on_origin_thread_->completed_tasks_);
+}
+
+bool WorkerPool::Inner::RunCheapTasksUntilTimeLimit(
+ base::TimeTicks time_limit) {
+ base::AutoLock lock(lock_);
+
+ while (base::TimeTicks::Now() < time_limit) {
+ scoped_ptr<internal::WorkerPoolTask> task;
+
+ // Find next cheap task.
+ for (TaskDeque::iterator iter = pending_tasks_.begin();
+ iter != pending_tasks_.end(); ++iter) {
+ if ((*iter)->IsCheap()) {
+ task = pending_tasks_.take(iter);
+ break;
+ }
+ }
+
+ if (!task) {
+ // Schedule an idle callback if requested and not pending.
+ if (!running_task_count_ && pending_tasks_.empty())
+ ScheduleOnIdleWithLockAcquired();
+
+ // Exit when no more cheap tasks are pending.
+ break;
+ }
+
+ scoped_ptr<RenderingStats> rendering_stats;
+ // Collect rendering stats if |rendering_stats_| is set.
+ if (rendering_stats_)
+ rendering_stats = make_scoped_ptr(new RenderingStats);
+
+ // Increment |running_task_count_| before starting to run task.
+ running_task_count_++;
+
+ {
+ base::AutoUnlock unlock(lock_);
+
+ task->Run(rendering_stats.get());
+
+ // Append tasks directly to worker pool's completed tasks queue.
+ worker_pool_on_origin_thread_->completed_tasks_.push_back(task.Pass());
+ if (need_on_task_completed_callback_)
+ worker_pool_on_origin_thread_->OnTaskCompleted();
+ }
+
+ // Add rendering stat results to |rendering_stats_|.
+ if (rendering_stats && rendering_stats_)
+ rendering_stats_->Add(*rendering_stats);
+
+ // Decrement |running_task_count_| now that we are done running task.
+ running_task_count_--;
+ }
+
+ // Append any other completed tasks before releasing lock.
+ return AppendCompletedTasksWithLockAcquired(
+ &worker_pool_on_origin_thread_->completed_tasks_);
+}
+
+bool WorkerPool::Inner::AppendCompletedTasksWithLockAcquired(
+ ScopedPtrDeque<internal::WorkerPoolTask>* completed_tasks) {
+ lock_.AssertAcquired();
+
+ while (completed_tasks_.size())
+ completed_tasks->push_back(completed_tasks_.take_front().Pass());
+
+ return !running_task_count_ && pending_tasks_.empty();
+}
+
+void WorkerPool::Inner::ScheduleOnTaskCompletedWithLockAcquired() {
+ lock_.AssertAcquired();
+
+ if (on_task_completed_pending_ || !need_on_task_completed_callback_)
+ return;
+ origin_loop_->PostTask(FROM_HERE, on_task_completed_callback_);
+ on_task_completed_pending_ = true;
+}
+
+void WorkerPool::Inner::OnTaskCompletedOnOriginThread() {
+ {
+ base::AutoLock lock(lock_);
+
+ DCHECK(on_task_completed_pending_);
+ on_task_completed_pending_ = false;
+
+ AppendCompletedTasksWithLockAcquired(
+ &worker_pool_on_origin_thread_->completed_tasks_);
+ }
+
+ worker_pool_on_origin_thread_->OnTaskCompleted();
+}
+
+void WorkerPool::Inner::ScheduleOnIdleWithLockAcquired() {
+ lock_.AssertAcquired();
+
+ if (on_idle_pending_)
+ return;
+ origin_loop_->PostTask(FROM_HERE, on_idle_callback_);
+ on_idle_pending_ = true;
}
-void WorkerPool::Worker::Init() {
+void WorkerPool::Inner::OnIdleOnOriginThread() {
+ {
+ base::AutoLock lock(lock_);
+
+ DCHECK(on_idle_pending_);
+ on_idle_pending_ = false;
+
+ // Early out if no longer idle.
+ if (running_task_count_ || !pending_tasks_.empty())
+ return;
+
+ AppendCompletedTasksWithLockAcquired(
+ &worker_pool_on_origin_thread_->completed_tasks_);
+ }
+
+ worker_pool_on_origin_thread_->OnIdle();
+}
+
+void WorkerPool::Inner::Run() {
#if defined(OS_ANDROID)
// TODO(epenner): Move thread priorities to base. (crbug.com/170549)
int nice_value = 10; // Idle priority.
setpriority(PRIO_PROCESS, base::PlatformThread::CurrentId(), nice_value);
#endif
-}
-// static
-void WorkerPool::Worker::RunTask(
- internal::WorkerPoolTask* task,
- WorkerPool* worker_pool,
- RenderingStats* rendering_stats) {
- task->Run(rendering_stats);
- worker_pool->OnWorkCompletedOnWorkerThread();
-}
+ {
+ base::AutoLock lock(lock_);
-void WorkerPool::Worker::OnTaskCompleted() {
- CHECK(!pending_tasks_.empty());
+ // Get a unique thread index.
+ int thread_index = next_thread_index_++;
- scoped_ptr<internal::WorkerPoolTask> task = pending_tasks_.take_front();
+ while (true) {
+ if (pending_tasks_.empty()) {
+ // Exit when shutdown is set and no more tasks are pending.
+ if (shutdown_)
+ break;
- // Notify worker pool of task completion.
- worker_pool_->OnTaskCompleted();
+ // Schedule an idle callback if requested and not pending.
+ if (!running_task_count_)
+ ScheduleOnIdleWithLockAcquired();
- task->DidComplete();
-}
+ // Wait for new pending tasks.
+ has_pending_tasks_cv_.Wait();
+ continue;
+ }
-void WorkerPool::Worker::CheckForCompletedTasks() {
- while (!pending_tasks_.empty()) {
- if (!pending_tasks_.front()->HasCompleted())
- return;
+ // Get next task.
+ scoped_ptr<internal::WorkerPoolTask> task = pending_tasks_.take_front();
- OnTaskCompleted();
+ scoped_ptr<RenderingStats> rendering_stats;
+ // Collect rendering stats if |rendering_stats_| is set.
+ if (rendering_stats_)
+ rendering_stats = make_scoped_ptr(new RenderingStats);
+
+ // Increment |running_task_count_| before starting to run task.
+ running_task_count_++;
+
+ // There may be more work available, so wake up another
+ // worker thread.
+ has_pending_tasks_cv_.Signal();
+
+ {
+ base::AutoUnlock unlock(lock_);
+
+ task->WillRunOnThread(thread_index);
+ task->Run(rendering_stats.get());
+ }
+
+ completed_tasks_.push_back(task.Pass());
+
+ // Add rendering stat results to |rendering_stats_|.
+ if (rendering_stats && rendering_stats_)
+ rendering_stats_->Add(*rendering_stats);
+
+ // Decrement |running_task_count_| now that we are done running task.
+ running_task_count_--;
+
+ // Schedule a task completed callback if requested and not pending.
+ ScheduleOnTaskCompletedWithLockAcquired();
+ }
+
+ // We noticed we should exit. Wake up the next worker so it knows it should
+ // exit as well (because the Shutdown() code only signals once).
+ has_pending_tasks_cv_.Signal();
}
}
-WorkerPool::WorkerPool(WorkerPoolClient* client, size_t num_threads)
+WorkerPool::WorkerPool(WorkerPoolClient* client,
+ size_t num_threads,
+ base::TimeDelta check_for_completed_tasks_delay,
+ const std::string& thread_name_prefix)
: client_(client),
origin_loop_(base::MessageLoopProxy::current()),
weak_ptr_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)),
- workers_need_sorting_(false),
- pending_task_count_(0),
- shutdown_(false),
- idle_callback_(
- base::Bind(&WorkerPool::OnIdle, weak_ptr_factory_.GetWeakPtr())),
- cheap_task_callback_(
+ check_for_completed_tasks_delay_(check_for_completed_tasks_delay),
+ check_for_completed_tasks_pending_(false),
+ run_cheap_tasks_callback_(
base::Bind(&WorkerPool::RunCheapTasks,
weak_ptr_factory_.GetWeakPtr())),
- run_cheap_tasks_pending_(false) {
- const std::string thread_name_prefix = kWorkerThreadNamePrefix;
- while (workers_.size() < num_threads) {
- unsigned thread_index = workers_.size();
- workers_.push_back(
- new Worker(this,
- thread_name_prefix +
- StringPrintf("Worker%d", thread_index + 1).c_str(),
- thread_index));
- }
- base::subtle::Acquire_Store(&pending_task_count_, 0);
+ run_cheap_tasks_pending_(false),
+ inner_(make_scoped_ptr(
+ new Inner(
+ this,
+ num_threads,
+ thread_name_prefix,
+ // Request OnTaskCompleted() callback when check
+ // for completed tasks delay is 0.
+ check_for_completed_tasks_delay == base::TimeDelta()))) {
}
WorkerPool::~WorkerPool() {
Shutdown();
- STLDeleteElements(&workers_);
+
// Cancel all pending callbacks.
weak_ptr_factory_.InvalidateWeakPtrs();
- DCHECK_EQ(base::subtle::Acquire_Load(&pending_task_count_), 0);
+
+ DCHECK_EQ(completed_tasks_.size(), 0);
}
void WorkerPool::Shutdown() {
- DCHECK(!shutdown_);
- shutdown_ = true;
-
- if (run_cheap_tasks_pending_)
- RunCheapTasks();
-
- for (WorkerVector::iterator it = workers_.begin();
- it != workers_.end(); it++) {
- Worker* worker = *it;
- worker->StopAfterCompletingAllPendingTasks();
- }
+ inner_->Shutdown();
+ DispatchCompletionCallbacks();
}
void WorkerPool::PostTaskAndReply(
const Callback& task, const base::Closure& reply) {
- PostTask(
- make_scoped_ptr(new WorkerPoolTaskImpl(
- task,
- reply)).PassAs<internal::WorkerPoolTask>(),
- false);
+ PostTask(make_scoped_ptr(new WorkerPoolTaskImpl(
+ task,
+ reply)).PassAs<internal::WorkerPoolTask>());
}
-bool WorkerPool::IsBusy() {
- Worker* worker = GetWorkerForNextTask();
-
- return worker->num_pending_tasks() >= kNumPendingTasksPerWorker;
+void WorkerPool::SetRunCheapTasksTimeLimit(
+ base::TimeTicks run_cheap_tasks_time_limit) {
+ run_cheap_tasks_time_limit_ = run_cheap_tasks_time_limit;
+ ScheduleRunCheapTasks();
}
void WorkerPool::SetRecordRenderingStats(bool record_rendering_stats) {
- if (record_rendering_stats)
- cheap_rendering_stats_.reset(new RenderingStats);
- else
- cheap_rendering_stats_.reset();
-
- for (WorkerVector::iterator it = workers_.begin();
- it != workers_.end(); ++it) {
- Worker* worker = *it;
- worker->set_record_rendering_stats(record_rendering_stats);
- }
+ inner_->SetRecordRenderingStats(record_rendering_stats);
}
void WorkerPool::GetRenderingStats(RenderingStats* stats) {
- stats->totalRasterizeTime = base::TimeDelta();
- stats->totalRasterizeTimeForNowBinsOnPendingTree = base::TimeDelta();
- stats->totalPixelsRasterized = 0;
- stats->totalDeferredImageDecodeCount = 0;
- stats->totalDeferredImageDecodeTime = base::TimeDelta();
- if (cheap_rendering_stats_) {
- stats->totalRasterizeTime +=
- cheap_rendering_stats_->totalRasterizeTime;
- stats->totalPixelsRasterized +=
- cheap_rendering_stats_->totalPixelsRasterized;
- stats->totalDeferredImageDecodeCount +=
- cheap_rendering_stats_->totalDeferredImageDecodeCount;
- stats->totalDeferredImageDecodeTime +=
- cheap_rendering_stats_->totalDeferredImageDecodeTime;
- }
- for (WorkerVector::iterator it = workers_.begin();
- it != workers_.end(); ++it) {
- Worker* worker = *it;
- CHECK(worker->rendering_stats());
- stats->totalRasterizeTime +=
- worker->rendering_stats()->totalRasterizeTime;
- stats->totalRasterizeTimeForNowBinsOnPendingTree +=
- worker->rendering_stats()->totalRasterizeTimeForNowBinsOnPendingTree;
- stats->totalPixelsRasterized +=
- worker->rendering_stats()->totalPixelsRasterized;
- stats->totalDeferredImageDecodeCount +=
- worker->rendering_stats()->totalDeferredImageDecodeCount;
- stats->totalDeferredImageDecodeTime +=
- worker->rendering_stats()->totalDeferredImageDecodeTime;
- }
+ inner_->GetRenderingStats(stats);
+}
+
+void WorkerPool::OnIdle() {
+ TRACE_EVENT0("cc", "WorkerPool::OnIdle");
+
+ DispatchCompletionCallbacks();
}
-WorkerPool::Worker* WorkerPool::GetWorkerForNextTask() {
- CHECK(!shutdown_);
- SortWorkersIfNeeded();
- return workers_.front();
+void WorkerPool::OnTaskCompleted() {
+ TRACE_EVENT0("cc", "WorkerPool::OnTaskCompleted");
+
+ DispatchCompletionCallbacks();
}
void WorkerPool::ScheduleCheckForCompletedTasks() {
- if (!check_for_completed_tasks_deadline_.is_null())
+ if (check_for_completed_tasks_pending_ ||
+ check_for_completed_tasks_delay_ == base::TimeDelta())
return;
-
check_for_completed_tasks_callback_.Reset(
- base::Bind(&WorkerPool::CheckForCompletedTasks,
- weak_ptr_factory_.GetWeakPtr()));
- base::TimeDelta delay =
- base::TimeDelta::FromMilliseconds(kCheckForCompletedTasksDelayMs);
- check_for_completed_tasks_deadline_ = base::TimeTicks::Now() + delay;
+ base::Bind(&WorkerPool::CheckForCompletedTasks,
+ weak_ptr_factory_.GetWeakPtr()));
+ check_for_completed_tasks_time_ = base::TimeTicks::Now() +
+ check_for_completed_tasks_delay_;
origin_loop_->PostDelayedTask(
FROM_HERE,
check_for_completed_tasks_callback_.callback(),
- delay);
-}
-
-void WorkerPool::OnWorkCompletedOnWorkerThread() {
- // Post idle handler task when pool work count reaches 0.
- if (base::subtle::Barrier_AtomicIncrement(&pending_task_count_, -1) == 0) {
- origin_loop_->PostTask(FROM_HERE, idle_callback_);
- }
-}
-
-void WorkerPool::OnIdle() {
- if (base::subtle::Acquire_Load(&pending_task_count_) == 0 &&
- pending_cheap_tasks_.empty()) {
- check_for_completed_tasks_callback_.Cancel();
- CheckForCompletedTasks();
- }
+ check_for_completed_tasks_delay_);
+ check_for_completed_tasks_pending_ = true;
}
void WorkerPool::CheckForCompletedTasks() {
TRACE_EVENT0("cc", "WorkerPool::CheckForCompletedTasks");
- check_for_completed_tasks_deadline_ = base::TimeTicks();
+ DCHECK(check_for_completed_tasks_pending_);
+ check_for_completed_tasks_pending_ = false;
- while (completed_cheap_tasks_.size()) {
- scoped_ptr<internal::WorkerPoolTask> task =
- completed_cheap_tasks_.take_front();
- task->DidComplete();
- }
+ // Schedule another check for completed tasks if not idle.
+ if (!inner_->CollectCompletedTasks())
+ ScheduleCheckForCompletedTasks();
- for (WorkerVector::iterator it = workers_.begin();
- it != workers_.end(); it++) {
- Worker* worker = *it;
- worker->CheckForCompletedTasks();
- }
+ DispatchCompletionCallbacks();
+}
- client_->DidFinishDispatchingWorkerPoolCompletionCallbacks();
+void WorkerPool::CancelCheckForCompletedTasks() {
+ if (!check_for_completed_tasks_pending_)
+ return;
- for (WorkerVector::iterator it = workers_.begin();
- it != workers_.end(); it++) {
- Worker* worker = *it;
- if (worker->num_pending_tasks()) {
- ScheduleCheckForCompletedTasks();
- break;
- }
- }
+ check_for_completed_tasks_callback_.Cancel();
+ check_for_completed_tasks_pending_ = false;
}
-void WorkerPool::OnTaskCompleted() {
- workers_need_sorting_ = true;
-}
+void WorkerPool::DispatchCompletionCallbacks() {
+ TRACE_EVENT0("cc", "WorkerPool::DispatchCompletionCallbacks");
-void WorkerPool::SortWorkersIfNeeded() {
- if (!workers_need_sorting_)
+ if (completed_tasks_.empty())
return;
- std::sort(workers_.begin(), workers_.end(), NumPendingTasksComparator());
- workers_need_sorting_ = false;
+ while (completed_tasks_.size()) {
+ scoped_ptr<internal::WorkerPoolTask> task = completed_tasks_.take_front();
+ task->DidComplete();
+ }
+
+ client_->DidFinishDispatchingWorkerPoolCompletionCallbacks();
}
-void WorkerPool::PostTask(
- scoped_ptr<internal::WorkerPoolTask> task, bool is_cheap) {
- if (is_cheap && CanPostCheapTask()) {
- pending_cheap_tasks_.push_back(task.Pass());
+void WorkerPool::PostTask(scoped_ptr<internal::WorkerPoolTask> task) {
+ if (task->IsCheap())
ScheduleRunCheapTasks();
- } else {
- base::subtle::Barrier_AtomicIncrement(&pending_task_count_, 1);
- workers_need_sorting_ = true;
- Worker* worker = GetWorkerForNextTask();
- worker->PostTask(task.Pass());
- }
+ // Schedule check for completed tasks if not pending.
ScheduleCheckForCompletedTasks();
-}
-bool WorkerPool::CanPostCheapTask() const {
- return pending_cheap_tasks_.size() < kMaxCheapTaskCount;
+ inner_->PostTask(task.Pass());
}
void WorkerPool::ScheduleRunCheapTasks() {
if (run_cheap_tasks_pending_)
return;
- origin_loop_->PostTask(FROM_HERE, cheap_task_callback_);
+ origin_loop_->PostTask(FROM_HERE, run_cheap_tasks_callback_);
run_cheap_tasks_pending_ = true;
}
void WorkerPool::RunCheapTasks() {
TRACE_EVENT0("cc", "WorkerPool::RunCheapTasks");
DCHECK(run_cheap_tasks_pending_);
+ run_cheap_tasks_pending_ = false;
- // Run as many cheap tasks as we can within the time limit.
- base::TimeTicks deadline = base::TimeTicks::Now() +
- base::TimeDelta::FromMilliseconds(kMaxCheapTaskMs);
- while (pending_cheap_tasks_.size()) {
- scoped_ptr<internal::WorkerPoolTask> task =
- pending_cheap_tasks_.take_front();
- task->Run(cheap_rendering_stats_.get());
- completed_cheap_tasks_.push_back(task.Pass());
-
- if (!check_for_completed_tasks_deadline_.is_null() &&
- base::TimeTicks::Now() >= check_for_completed_tasks_deadline_) {
- TRACE_EVENT_INSTANT0("cc", "WorkerPool::RunCheapTasks check deadline");
- CheckForCompletedTasks();
- }
- if (base::TimeTicks::Now() >= deadline) {
+ while (true) {
+ base::TimeTicks time_limit = run_cheap_tasks_time_limit_;
+
+ if (!check_for_completed_tasks_time_.is_null())
+ time_limit = std::min(time_limit, check_for_completed_tasks_time_);
+
+ bool is_idle = inner_->RunCheapTasksUntilTimeLimit(time_limit);
+
+ if (base::TimeTicks::Now() >= run_cheap_tasks_time_limit_) {
TRACE_EVENT_INSTANT0("cc", "WorkerPool::RunCheapTasks out of time");
break;
}
- }
- // Defer remaining tasks to worker threads.
- while (pending_cheap_tasks_.size()) {
- scoped_ptr<internal::WorkerPoolTask> task =
- pending_cheap_tasks_.take_front();
- PostTask(task.Pass(), false);
- }
+ // We must be out of cheap tasks if this happens.
+ if (check_for_completed_tasks_time_.is_null() ||
+ base::TimeTicks::Now() < run_cheap_tasks_time_limit_)
+ break;
- run_cheap_tasks_pending_ = false;
- if (base::subtle::Acquire_Load(&pending_task_count_) == 0)
- OnIdle();
+ TRACE_EVENT_INSTANT0("cc", "WorkerPool::RunCheapTasks check time");
+ CancelCheckForCompletedTasks();
+ DispatchCompletionCallbacks();
+ // Schedule another check for completed tasks if not idle.
+ if (!is_idle)
+ ScheduleCheckForCompletedTasks();
+ }
}
} // namespace cc
diff --git a/cc/worker_pool.h b/cc/worker_pool.h
index 0d5792b..d6474e8 100644
--- a/cc/worker_pool.h
+++ b/cc/worker_pool.h
@@ -7,38 +7,35 @@
#include <string>
-#include "base/basictypes.h"
-#include "base/callback.h"
#include "base/cancelable_callback.h"
#include "base/memory/scoped_ptr.h"
#include "base/memory/weak_ptr.h"
-#include "base/threading/thread.h"
-#include "cc/rendering_stats.h"
+#include "base/message_loop.h"
+#include "cc/cc_export.h"
#include "cc/scoped_ptr_deque.h"
namespace cc {
+struct RenderingStats;
+
namespace internal {
class WorkerPoolTask {
public:
virtual ~WorkerPoolTask();
- // Called when the task is scheduled to run on a thread that isn't the
- // origin thread. Called on the origin thread.
+ virtual bool IsCheap() = 0;
+
+ // Called before running the task on a thread that isn't the origin thread.
virtual void WillRunOnThread(unsigned thread_index) = 0;
virtual void Run(RenderingStats* rendering_stats) = 0;
- bool HasCompleted();
void DidComplete();
protected:
WorkerPoolTask(const base::Closure& reply);
const base::Closure reply_;
-
- // Accessed from multiple threads. Set to 1 when task has completed.
- base::subtle::Atomic32 completed_;
};
} // namespace internal
@@ -60,8 +57,14 @@ class WorkerPool {
virtual ~WorkerPool();
static scoped_ptr<WorkerPool> Create(
- WorkerPoolClient* client, size_t num_threads) {
- return make_scoped_ptr(new WorkerPool(client, num_threads));
+ WorkerPoolClient* client,
+ size_t num_threads,
+ base::TimeDelta check_for_completed_tasks_delay,
+ const std::string& thread_name_prefix) {
+ return make_scoped_ptr(new WorkerPool(client,
+ num_threads,
+ check_for_completed_tasks_delay,
+ thread_name_prefix));
}
// Tells the worker pool to shutdown and returns once all pending tasks have
@@ -72,116 +75,53 @@ class WorkerPool {
// is posted to the thread that called PostTaskAndReply().
void PostTaskAndReply(const Callback& task, const base::Closure& reply);
- // Returns true when worker pool has reached its internal limit for number
- // of pending tasks.
- bool IsBusy();
+ // Set time limit for running cheap tasks.
+ void SetRunCheapTasksTimeLimit(base::TimeTicks run_cheap_tasks_time_limit);
// Toggle rendering stats collection.
void SetRecordRenderingStats(bool record_rendering_stats);
- // Collect rendering stats all completed tasks.
+ // Collect rendering stats of all completed tasks.
void GetRenderingStats(RenderingStats* stats);
protected:
- class Worker : public base::Thread {
- public:
- Worker(WorkerPool* worker_pool, const std::string name, unsigned index);
- virtual ~Worker();
-
- // This must be called before the destructor.
- void StopAfterCompletingAllPendingTasks();
-
- // Posts a task to the worker thread.
- void PostTask(scoped_ptr<internal::WorkerPoolTask> task);
-
- // Check for completed tasks and run reply callbacks.
- void CheckForCompletedTasks();
-
- int num_pending_tasks() const { return pending_tasks_.size(); }
- void set_record_rendering_stats(bool record_rendering_stats) {
- record_rendering_stats_ = record_rendering_stats;
- }
- const RenderingStats* rendering_stats() const {
- return rendering_stats_.get();
- }
+ WorkerPool(WorkerPoolClient* client,
+ size_t num_threads,
+ base::TimeDelta check_for_completed_tasks_delay,
+ const std::string& thread_name_prefix);
- // Overridden from base::Thread:
- virtual void Init() OVERRIDE;
-
- private:
- static void RunTask(
- internal::WorkerPoolTask* task,
- WorkerPool* worker_pool,
- RenderingStats* rendering_stats);
-
- void OnTaskCompleted();
-
- WorkerPool* worker_pool_;
- ScopedPtrDeque<internal::WorkerPoolTask> pending_tasks_;
- scoped_ptr<RenderingStats> rendering_stats_;
- bool record_rendering_stats_;
- unsigned index_;
- };
-
- WorkerPool(WorkerPoolClient* client, size_t num_threads);
-
- void PostTask(scoped_ptr<internal::WorkerPoolTask> task, bool is_cheap);
+ void PostTask(scoped_ptr<internal::WorkerPoolTask> task);
private:
- class NumPendingTasksComparator {
- public:
- bool operator() (const Worker* a, const Worker* b) const {
- return a->num_pending_tasks() < b->num_pending_tasks();
- }
- };
-
- // Schedule a completed tasks check if not already pending.
- void ScheduleCheckForCompletedTasks();
+ class Inner;
+ friend class Inner;
- // Called on worker thread after completing work.
- void OnWorkCompletedOnWorkerThread();
-
- // Called on origin thread after becoming idle.
+ void OnTaskCompleted();
void OnIdle();
-
- // Check for completed tasks and run reply callbacks.
+ void ScheduleCheckForCompletedTasks();
void CheckForCompletedTasks();
-
- // Called when processing task completion.
- void OnTaskCompleted();
-
- // Ensure workers are sorted by number of pending tasks.
- void SortWorkersIfNeeded();
-
- // Schedule running cheap tasks on the origin thread unless already pending.
+ void CancelCheckForCompletedTasks();
+ void DispatchCompletionCallbacks();
void ScheduleRunCheapTasks();
-
- // Run pending cheap tasks on the origin thread. If the allotted time slot
- // for cheap tasks runs out, the remaining tasks are deferred to the thread
- // pool.
void RunCheapTasks();
- WorkerPool::Worker* GetWorkerForNextTask();
- bool CanPostCheapTask() const;
-
- typedef std::vector<Worker*> WorkerVector;
- WorkerVector workers_;
WorkerPoolClient* client_;
scoped_refptr<base::MessageLoopProxy> origin_loop_;
base::WeakPtrFactory<WorkerPool> weak_ptr_factory_;
- bool workers_need_sorting_;
- bool shutdown_;
+ base::TimeTicks check_for_completed_tasks_time_;
+ base::TimeDelta check_for_completed_tasks_delay_;
base::CancelableClosure check_for_completed_tasks_callback_;
- base::TimeTicks check_for_completed_tasks_deadline_;
- base::Closure idle_callback_;
- base::Closure cheap_task_callback_;
- // Accessed from multiple threads. 0 when worker pool is idle.
- base::subtle::Atomic32 pending_task_count_;
-
+ bool check_for_completed_tasks_pending_;
+ const base::Closure run_cheap_tasks_callback_;
+ base::TimeTicks run_cheap_tasks_time_limit_;
bool run_cheap_tasks_pending_;
- ScopedPtrDeque<internal::WorkerPoolTask> pending_cheap_tasks_;
- ScopedPtrDeque<internal::WorkerPoolTask> completed_cheap_tasks_;
- scoped_ptr<RenderingStats> cheap_rendering_stats_;
+
+ // Holds all completed tasks for which we have not yet dispatched
+ // reply callbacks.
+ ScopedPtrDeque<internal::WorkerPoolTask> completed_tasks_;
+
+ // Hide the gory details of the worker pool in |inner_|.
+ const scoped_ptr<Inner> inner_;
DISALLOW_COPY_AND_ASSIGN(WorkerPool);
};