summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--cc/base/scoped_ptr_hash_map.h2
-rw-r--r--cc/base/worker_pool.cc497
-rw-r--r--cc/base/worker_pool.h74
-rw-r--r--cc/base/worker_pool_perftest.cc234
-rw-r--r--cc/base/worker_pool_unittest.cc182
-rw-r--r--cc/cc_tests.gyp5
-rw-r--r--cc/resources/managed_tile_state.h7
-rw-r--r--cc/resources/raster_worker_pool.cc132
-rw-r--r--cc/resources/raster_worker_pool.h65
-rw-r--r--cc/resources/resource_provider.cc2
-rw-r--r--cc/resources/tile_manager.cc404
-rw-r--r--cc/resources/tile_manager.h39
-rw-r--r--cc/test/fake_tile_manager.h2
13 files changed, 1197 insertions, 448 deletions
diff --git a/cc/base/scoped_ptr_hash_map.h b/cc/base/scoped_ptr_hash_map.h
index 970984a..af792ec 100644
--- a/cc/base/scoped_ptr_hash_map.h
+++ b/cc/base/scoped_ptr_hash_map.h
@@ -31,7 +31,7 @@ class ScopedPtrHashMap {
~ScopedPtrHashMap() { clear(); }
- void swap(ScopedPtrHashMap<Key, Value*>& other) {
+ void swap(ScopedPtrHashMap<Key, Value>& other) {
data_.swap(other.data_);
}
diff --git a/cc/base/worker_pool.cc b/cc/base/worker_pool.cc
index 59dc828..bbf51c2 100644
--- a/cc/base/worker_pool.cc
+++ b/cc/base/worker_pool.cc
@@ -4,46 +4,93 @@
#include "cc/base/worker_pool.h"
-#include <algorithm>
+#if defined(OS_ANDROID)
+// TODO(epenner): Move thread priorities to base. (crbug.com/170549)
+#include <sys/resource.h>
+#endif
+
+#include <map>
#include "base/bind.h"
#include "base/debug/trace_event.h"
+#include "base/hash_tables.h"
#include "base/stringprintf.h"
-#include "base/synchronization/condition_variable.h"
#include "base/threading/simple_thread.h"
#include "base/threading/thread_restrictions.h"
+#include "cc/base/scoped_ptr_deque.h"
+#include "cc/base/scoped_ptr_hash_map.h"
+
+#if defined(COMPILER_GCC)
+namespace BASE_HASH_NAMESPACE {
+template <> struct hash<cc::internal::WorkerPoolTask*> {
+ size_t operator()(cc::internal::WorkerPoolTask* ptr) const {
+ return hash<size_t>()(reinterpret_cast<size_t>(ptr));
+ }
+};
+} // namespace BASE_HASH_NAMESPACE
+#endif // COMPILER
namespace cc {
-namespace {
-
-class WorkerPoolTaskImpl : public internal::WorkerPoolTask {
- public:
- WorkerPoolTaskImpl(const WorkerPool::Callback& task,
- const base::Closure& reply)
- : internal::WorkerPoolTask(reply),
- task_(task) {}
+namespace internal {
- virtual void RunOnThread(unsigned thread_index) OVERRIDE {
- task_.Run();
- }
+WorkerPoolTask::WorkerPoolTask()
+ : did_schedule_(false),
+ did_run_(false),
+ did_complete_(false) {
+}
- private:
- WorkerPool::Callback task_;
-};
+WorkerPoolTask::WorkerPoolTask(TaskVector* dependencies)
+ : did_schedule_(false),
+ did_run_(false),
+ did_complete_(false) {
+ dependencies_.swap(*dependencies);
+}
-} // namespace
+WorkerPoolTask::~WorkerPoolTask() {
+ DCHECK_EQ(did_schedule_, did_complete_);
+ DCHECK(!did_run_ || did_schedule_);
+ DCHECK(!did_run_ || did_complete_);
+}
-namespace internal {
+void WorkerPoolTask::DidSchedule() {
+ DCHECK(!did_complete_);
+ did_schedule_ = true;
+}
-WorkerPoolTask::WorkerPoolTask(const base::Closure& reply) : reply_(reply) {
+void WorkerPoolTask::WillRun() {
+ DCHECK(did_schedule_);
+ DCHECK(!did_complete_);
+ DCHECK(!did_run_);
}
-WorkerPoolTask::~WorkerPoolTask() {
+void WorkerPoolTask::DidRun() {
+ did_run_ = true;
}
void WorkerPoolTask::DidComplete() {
- reply_.Run();
+ DCHECK(did_schedule_);
+ DCHECK(!did_complete_);
+ did_complete_ = true;
+}
+
+bool WorkerPoolTask::IsReadyToRun() const {
+ // TODO(reveman): Use counter to improve performance.
+ for (TaskVector::const_reverse_iterator it = dependencies_.rbegin();
+ it != dependencies_.rend(); ++it) {
+ WorkerPoolTask* dependency = *it;
+ if (!dependency->HasFinishedRunning())
+ return false;
+ }
+ return true;
+}
+
+bool WorkerPoolTask::HasFinishedRunning() const {
+ return did_run_;
+}
+
+bool WorkerPoolTask::HasCompleted() const {
+ return did_complete_;
}
} // namespace internal
@@ -60,17 +107,51 @@ class WorkerPool::Inner : public base::DelegateSimpleThread::Delegate {
void Shutdown();
- void PostTask(scoped_ptr<internal::WorkerPoolTask> task);
+ // Schedule running of |root| task and all its dependencies. Tasks
+ // previously scheduled but no longer needed to run |root| will be
+ // canceled unless already running. Canceled tasks are moved to
+ // |completed_tasks_| without being run. The result is that once
+ // scheduled, a task is guaranteed to end up in the |completed_tasks_|
+ // queue even if they later get canceled by another call to
+ // ScheduleTasks().
+ void ScheduleTasks(internal::WorkerPoolTask* root);
- // Appends all completed tasks to worker pool's completed tasks queue
- // and returns true if idle.
- bool CollectCompletedTasks();
+ // Collect all completed tasks in |completed_tasks|. Returns true if idle.
+ bool CollectCompletedTasks(TaskDeque* completed_tasks);
private:
- // Appends all completed tasks to |completed_tasks|. Lock must
- // already be acquired before calling this function.
- bool AppendCompletedTasksWithLockAcquired(
- ScopedPtrDeque<internal::WorkerPoolTask>* completed_tasks);
+ class ScheduledTask {
+ public:
+ ScheduledTask(internal::WorkerPoolTask* dependent, unsigned priority)
+ : priority_(priority) {
+ if (dependent)
+ dependents_.push_back(dependent);
+ }
+
+ internal::WorkerPoolTask::TaskVector& dependents() { return dependents_; }
+ unsigned priority() const { return priority_; }
+
+ private:
+ internal::WorkerPoolTask::TaskVector dependents_;
+ unsigned priority_;
+ };
+ typedef internal::WorkerPoolTask* ScheduledTaskMapKey;
+ typedef ScopedPtrHashMap<ScheduledTaskMapKey, ScheduledTask>
+ ScheduledTaskMap;
+
+ // This builds a ScheduledTaskMap from a root task.
+ static unsigned BuildScheduledTaskMapRecursive(
+ internal::WorkerPoolTask* task,
+ internal::WorkerPoolTask* dependent,
+ unsigned priority,
+ ScheduledTaskMap* scheduled_tasks);
+ static void BuildScheduledTaskMap(
+ internal::WorkerPoolTask* root, ScheduledTaskMap* scheduled_tasks);
+
+ // Collect all completed tasks by swapping the contents of
+ // |completed_tasks| and |completed_tasks_|. Lock must be acquired
+ // before calling this function. Returns true if idle.
+ bool CollectCompletedTasksWithLockAcquired(TaskDeque* completed_tasks);
// Schedule an OnIdleOnOriginThread callback if not already pending.
// Lock must already be acquired before calling this function.
@@ -90,8 +171,8 @@ class WorkerPool::Inner : public base::DelegateSimpleThread::Delegate {
mutable base::Lock lock_;
// Condition variable that is waited on by worker threads until new
- // tasks are posted or shutdown starts.
- base::ConditionVariable has_pending_tasks_cv_;
+ // tasks are ready to run or shutdown starts.
+ base::ConditionVariable has_ready_to_run_tasks_cv_;
// Target message loop used for posting callbacks.
scoped_refptr<base::MessageLoopProxy> origin_loop_;
@@ -106,15 +187,25 @@ class WorkerPool::Inner : public base::DelegateSimpleThread::Delegate {
// loop index is 0.
unsigned next_thread_index_;
- // Number of tasks currently running.
- unsigned running_task_count_;
-
// Set during shutdown. Tells workers to exit when no more tasks
// are pending.
bool shutdown_;
- typedef ScopedPtrDeque<internal::WorkerPoolTask> TaskDeque;
- TaskDeque pending_tasks_;
+ // The root task that is a dependent of all other tasks.
+ scoped_refptr<internal::WorkerPoolTask> root_;
+
+ // This set contains all pending tasks.
+ ScheduledTaskMap pending_tasks_;
+
+ // Ordered set of tasks that are ready to run.
+ // TODO(reveman): priority_queue might be more efficient.
+ typedef std::map<unsigned, internal::WorkerPoolTask*> TaskMap;
+ TaskMap ready_to_run_tasks_;
+
+ // This set contains all currently running tasks.
+ ScheduledTaskMap running_tasks_;
+
+ // Completed tasks not yet collected by origin thread.
TaskDeque completed_tasks_;
ScopedPtrDeque<base::DelegateSimpleThread> workers_;
@@ -127,25 +218,24 @@ WorkerPool::Inner::Inner(WorkerPool* worker_pool,
const std::string& thread_name_prefix)
: worker_pool_on_origin_thread_(worker_pool),
lock_(),
- has_pending_tasks_cv_(&lock_),
+ has_ready_to_run_tasks_cv_(&lock_),
origin_loop_(base::MessageLoopProxy::current()),
weak_ptr_factory_(this),
on_idle_callback_(base::Bind(&WorkerPool::Inner::OnIdleOnOriginThread,
weak_ptr_factory_.GetWeakPtr())),
on_idle_pending_(false),
next_thread_index_(0),
- running_task_count_(0),
shutdown_(false) {
base::AutoLock lock(lock_);
while (workers_.size() < num_threads) {
scoped_ptr<base::DelegateSimpleThread> worker = make_scoped_ptr(
new base::DelegateSimpleThread(
- this,
- thread_name_prefix +
- base::StringPrintf(
- "Worker%u",
- static_cast<unsigned>(workers_.size() + 1)).c_str()));
+ this,
+ thread_name_prefix +
+ base::StringPrintf(
+ "Worker%u",
+ static_cast<unsigned>(workers_.size() + 1)).c_str()));
worker->Start();
workers_.push_back(worker.Pass());
}
@@ -156,12 +246,10 @@ WorkerPool::Inner::~Inner() {
DCHECK(shutdown_);
- // Cancel all pending callbacks.
- weak_ptr_factory_.InvalidateWeakPtrs();
-
DCHECK_EQ(0u, pending_tasks_.size());
+ DCHECK_EQ(0u, ready_to_run_tasks_.size());
+ DCHECK_EQ(0u, running_tasks_.size());
DCHECK_EQ(0u, completed_tasks_.size());
- DCHECK_EQ(0u, running_task_count_);
}
void WorkerPool::Inner::Shutdown() {
@@ -173,7 +261,7 @@ void WorkerPool::Inner::Shutdown() {
// Wake up a worker so it knows it should exit. This will cause all workers
// to exit as each will wake up another worker before exiting.
- has_pending_tasks_cv_.Signal();
+ has_ready_to_run_tasks_cv_.Signal();
}
while (workers_.size()) {
@@ -183,32 +271,100 @@ void WorkerPool::Inner::Shutdown() {
base::ThreadRestrictions::ScopedAllowIO allow_io;
worker->Join();
}
+
+ // Cancel any pending OnIdle callback.
+ weak_ptr_factory_.InvalidateWeakPtrs();
}
-void WorkerPool::Inner::PostTask(scoped_ptr<internal::WorkerPoolTask> task) {
- base::AutoLock lock(lock_);
+void WorkerPool::Inner::ScheduleTasks(internal::WorkerPoolTask* root) {
+ // It is OK to call ScheduleTasks() after shutdown if |root| is NULL.
+ DCHECK(!root || !shutdown_);
+
+ scoped_refptr<internal::WorkerPoolTask> new_root(root);
+
+ ScheduledTaskMap new_pending_tasks;
+ ScheduledTaskMap new_running_tasks;
+ TaskMap new_ready_to_run_tasks;
+
+ // Build scheduled task map before acquiring |lock_|.
+ if (root)
+ BuildScheduledTaskMap(root, &new_pending_tasks);
+
+ {
+ base::AutoLock lock(lock_);
+
+ // First remove all completed tasks from |new_pending_tasks|.
+ for (TaskDeque::iterator it = completed_tasks_.begin();
+ it != completed_tasks_.end(); ++it) {
+ internal::WorkerPoolTask* task = *it;
+ new_pending_tasks.take_and_erase(task);
+ }
- pending_tasks_.push_back(task.Pass());
+ // Move tasks not present in |new_pending_tasks| to |completed_tasks_|.
+ for (ScheduledTaskMap::iterator it = pending_tasks_.begin();
+ it != pending_tasks_.end(); ++it) {
+ internal::WorkerPoolTask* task = it->first;
- // There is more work available, so wake up worker thread.
- has_pending_tasks_cv_.Signal();
+ // Task has completed if not present in |new_pending_tasks|.
+ if (!new_pending_tasks.contains(task))
+ completed_tasks_.push_back(task);
+ }
+
+ // Build new running task set.
+ for (ScheduledTaskMap::iterator it = running_tasks_.begin();
+ it != running_tasks_.end(); ++it) {
+ internal::WorkerPoolTask* task = it->first;
+ // Transfer scheduled task value from |new_pending_tasks| to
+ // |new_running_tasks| if currently running. Value must be set to
+ // NULL if |new_pending_tasks| doesn't contain task. This does
+ // the right in both cases.
+ new_running_tasks.set(task, new_pending_tasks.take_and_erase(task));
+ }
+
+ // Build new "ready to run" tasks queue.
+ for (ScheduledTaskMap::iterator it = new_pending_tasks.begin();
+ it != new_pending_tasks.end(); ++it) {
+ internal::WorkerPoolTask* task = it->first;
+
+ // Completed tasks should not exist in |new_pending_tasks|.
+ DCHECK(!task->HasFinishedRunning());
+
+ // Call DidSchedule() to indicate that this task has been scheduled.
+ // Note: This is only for debugging purposes.
+ task->DidSchedule();
+
+ DCHECK_EQ(0u, new_ready_to_run_tasks.count(it->second->priority()));
+ if (task->IsReadyToRun())
+ new_ready_to_run_tasks[it->second->priority()] = task;
+ }
+
+ // Swap root taskand task sets.
+ // Note: old tasks are intentionally destroyed after releasing |lock_|.
+ root_.swap(new_root);
+ pending_tasks_.swap(new_pending_tasks);
+ running_tasks_.swap(new_running_tasks);
+ ready_to_run_tasks_.swap(new_ready_to_run_tasks);
+
+ // If there is more work available, wake up worker thread.
+ if (!ready_to_run_tasks_.empty())
+ has_ready_to_run_tasks_cv_.Signal();
+ }
}
-bool WorkerPool::Inner::CollectCompletedTasks() {
+bool WorkerPool::Inner::CollectCompletedTasks(TaskDeque* completed_tasks) {
base::AutoLock lock(lock_);
- return AppendCompletedTasksWithLockAcquired(
- &worker_pool_on_origin_thread_->completed_tasks_);
+ return CollectCompletedTasksWithLockAcquired(completed_tasks);
}
-bool WorkerPool::Inner::AppendCompletedTasksWithLockAcquired(
- ScopedPtrDeque<internal::WorkerPoolTask>* completed_tasks) {
+bool WorkerPool::Inner::CollectCompletedTasksWithLockAcquired(
+ TaskDeque* completed_tasks) {
lock_.AssertAcquired();
- while (completed_tasks_.size())
- completed_tasks->push_back(completed_tasks_.take_front().Pass());
+ DCHECK_EQ(0u, completed_tasks->size());
+ completed_tasks->swap(completed_tasks_);
- return !running_task_count_ && pending_tasks_.empty();
+ return running_tasks_.empty() && pending_tasks_.empty();
}
void WorkerPool::Inner::ScheduleOnIdleWithLockAcquired() {
@@ -221,6 +377,8 @@ void WorkerPool::Inner::ScheduleOnIdleWithLockAcquired() {
}
void WorkerPool::Inner::OnIdleOnOriginThread() {
+ TaskDeque completed_tasks;
+
{
base::AutoLock lock(lock_);
@@ -228,14 +386,13 @@ void WorkerPool::Inner::OnIdleOnOriginThread() {
on_idle_pending_ = false;
// Early out if no longer idle.
- if (running_task_count_ || !pending_tasks_.empty())
+ if (!running_tasks_.empty() || !pending_tasks_.empty())
return;
- AppendCompletedTasksWithLockAcquired(
- &worker_pool_on_origin_thread_->completed_tasks_);
+ CollectCompletedTasksWithLockAcquired(&completed_tasks);
}
- worker_pool_on_origin_thread_->OnIdle();
+ worker_pool_on_origin_thread_->OnIdle(&completed_tasks);
}
void WorkerPool::Inner::Run() {
@@ -251,29 +408,37 @@ void WorkerPool::Inner::Run() {
int thread_index = next_thread_index_++;
while (true) {
- if (pending_tasks_.empty()) {
- // Exit when shutdown is set and no more tasks are pending.
- if (shutdown_)
- break;
-
- // Schedule an idle callback if requested and not pending.
- if (!running_task_count_)
- ScheduleOnIdleWithLockAcquired();
-
- // Wait for new pending tasks.
- has_pending_tasks_cv_.Wait();
+ if (ready_to_run_tasks_.empty()) {
+ if (pending_tasks_.empty()) {
+ // Exit when shutdown is set and no more tasks are pending.
+ if (shutdown_)
+ break;
+
+ // Schedule an idle callback if no tasks are running.
+ if (running_tasks_.empty())
+ ScheduleOnIdleWithLockAcquired();
+ }
+
+ // Wait for more tasks.
+ has_ready_to_run_tasks_cv_.Wait();
continue;
}
- // Get next task.
- scoped_ptr<internal::WorkerPoolTask> task = pending_tasks_.take_front();
+ // Take top priority task from |ready_to_run_tasks_|.
+ scoped_refptr<internal::WorkerPoolTask> task(
+ ready_to_run_tasks_.begin()->second);
+ ready_to_run_tasks_.erase(ready_to_run_tasks_.begin());
+
+ // Move task from |pending_tasks_| to |running_tasks_|.
+ DCHECK(pending_tasks_.contains(task));
+ DCHECK(!running_tasks_.contains(task));
+ running_tasks_.set(task, pending_tasks_.take_and_erase(task));
- // Increment |running_task_count_| before starting to run task.
- running_task_count_++;
+ // There may be more work available, so wake up another worker thread.
+ has_ready_to_run_tasks_cv_.Signal();
- // There may be more work available, so wake up another
- // worker thread.
- has_pending_tasks_cv_.Signal();
+ // Call WillRun() before releasing |lock_| and running task.
+ task->WillRun();
{
base::AutoUnlock unlock(lock_);
@@ -281,15 +446,95 @@ void WorkerPool::Inner::Run() {
task->RunOnThread(thread_index);
}
- completed_tasks_.push_back(task.Pass());
+ // This will mark task as finished running.
+ task->DidRun();
+
+ // Now iterate over all dependents to check if they are ready to run.
+ scoped_ptr<ScheduledTask> scheduled_task = running_tasks_.take_and_erase(
+ task);
+ if (scheduled_task) {
+ typedef internal::WorkerPoolTask::TaskVector TaskVector;
+ for (TaskVector::iterator it = scheduled_task->dependents().begin();
+ it != scheduled_task->dependents().end(); ++it) {
+ internal::WorkerPoolTask* dependent = *it;
+ if (!dependent->IsReadyToRun())
+ continue;
+
+ // Task is ready. Add it to |ready_to_run_tasks_|.
+ DCHECK(pending_tasks_.contains(dependent));
+ unsigned priority = pending_tasks_.get(dependent)->priority();
+ DCHECK(!ready_to_run_tasks_.count(priority) ||
+ ready_to_run_tasks_[priority] == dependent);
+ ready_to_run_tasks_[priority] = dependent;
+ }
+ }
- // Decrement |running_task_count_| now that we are done running task.
- running_task_count_--;
+ // Finally add task to |completed_tasks_|.
+ completed_tasks_.push_back(task);
}
// We noticed we should exit. Wake up the next worker so it knows it should
// exit as well (because the Shutdown() code only signals once).
- has_pending_tasks_cv_.Signal();
+ has_ready_to_run_tasks_cv_.Signal();
+}
+
+// BuildScheduledTaskMap() takes a task tree as input and constructs
+// a unique set of tasks with edges between dependencies pointing in
+// the direction of the dependents. Each task is given a unique priority
+// which is currently the same as the DFS traversal order.
+//
+// Input: Output:
+//
+// root task4 Task | Priority (lower is better)
+// / \ / \ -------+---------------------------
+// task1 task2 task3 task2 root | 4
+// | | | | task1 | 2
+// task3 | task1 | task2 | 3
+// | | \ / task3 | 1
+// task4 task4 root task4 | 0
+//
+// The output can be used to efficiently maintain a queue of
+// "ready to run" tasks.
+
+// static
+unsigned WorkerPool::Inner::BuildScheduledTaskMapRecursive(
+ internal::WorkerPoolTask* task,
+ internal::WorkerPoolTask* dependent,
+ unsigned priority,
+ ScheduledTaskMap* scheduled_tasks) {
+ // Skip sub-tree if task has already completed.
+ if (task->HasCompleted())
+ return priority;
+
+ ScheduledTaskMap::iterator scheduled_it = scheduled_tasks->find(task);
+ if (scheduled_it != scheduled_tasks->end()) {
+ DCHECK(dependent);
+ scheduled_it->second->dependents().push_back(dependent);
+ return priority;
+ }
+
+ typedef internal::WorkerPoolTask::TaskVector TaskVector;
+ for (TaskVector::iterator it = task->dependencies().begin();
+ it != task->dependencies().end(); ++it) {
+ internal::WorkerPoolTask* dependency = *it;
+ priority = BuildScheduledTaskMapRecursive(
+ dependency, task, priority, scheduled_tasks);
+ }
+
+ scheduled_tasks->set(task,
+ make_scoped_ptr(new ScheduledTask(dependent,
+ priority)));
+
+ return priority + 1;
+}
+
+// static
+void WorkerPool::Inner::BuildScheduledTaskMap(
+ internal::WorkerPoolTask* root,
+ ScheduledTaskMap* scheduled_tasks) {
+ const unsigned kBasePriority = 0u;
+ DCHECK(root);
+ BuildScheduledTaskMapRecursive(root, NULL, kBasePriority, scheduled_tasks);
}
WorkerPool::WorkerPool(size_t num_threads,
@@ -297,83 +542,105 @@ WorkerPool::WorkerPool(size_t num_threads,
const std::string& thread_name_prefix)
: client_(NULL),
origin_loop_(base::MessageLoopProxy::current()),
- weak_ptr_factory_(this),
check_for_completed_tasks_delay_(check_for_completed_tasks_delay),
check_for_completed_tasks_pending_(false),
+ in_dispatch_completion_callbacks_(false),
inner_(make_scoped_ptr(new Inner(this,
num_threads,
thread_name_prefix))) {
}
WorkerPool::~WorkerPool() {
- // Cancel all pending callbacks.
- weak_ptr_factory_.InvalidateWeakPtrs();
-
- DCHECK_EQ(0u, completed_tasks_.size());
}
void WorkerPool::Shutdown() {
+ TRACE_EVENT0("cc", "WorkerPool::Shutdown");
+
+ DCHECK(!in_dispatch_completion_callbacks_);
+
inner_->Shutdown();
- inner_->CollectCompletedTasks();
- DispatchCompletionCallbacks();
-}
-void WorkerPool::PostTaskAndReply(
- const Callback& task, const base::Closure& reply) {
- PostTask(make_scoped_ptr(new WorkerPoolTaskImpl(
- task,
- reply)).PassAs<internal::WorkerPoolTask>());
+ TaskDeque completed_tasks;
+ inner_->CollectCompletedTasks(&completed_tasks);
+ DispatchCompletionCallbacks(&completed_tasks);
}
-void WorkerPool::OnIdle() {
+void WorkerPool::OnIdle(TaskDeque* completed_tasks) {
TRACE_EVENT0("cc", "WorkerPool::OnIdle");
- DispatchCompletionCallbacks();
+ DCHECK(!in_dispatch_completion_callbacks_);
+
+ DispatchCompletionCallbacks(completed_tasks);
+
+ // Cancel any pending check for completed tasks.
+ check_for_completed_tasks_callback_.Cancel();
+ check_for_completed_tasks_pending_ = false;
}
void WorkerPool::ScheduleCheckForCompletedTasks() {
if (check_for_completed_tasks_pending_)
return;
+ check_for_completed_tasks_callback_.Reset(
+ base::Bind(&WorkerPool::CheckForCompletedTasks,
+ base::Unretained(this)));
origin_loop_->PostDelayedTask(
FROM_HERE,
- base::Bind(&WorkerPool::CheckForCompletedTasks,
- weak_ptr_factory_.GetWeakPtr()),
+ check_for_completed_tasks_callback_.callback(),
check_for_completed_tasks_delay_);
check_for_completed_tasks_pending_ = true;
}
void WorkerPool::CheckForCompletedTasks() {
TRACE_EVENT0("cc", "WorkerPool::CheckForCompletedTasks");
- DCHECK(check_for_completed_tasks_pending_);
+
+ DCHECK(!in_dispatch_completion_callbacks_);
+
+ check_for_completed_tasks_callback_.Cancel();
check_for_completed_tasks_pending_ = false;
+ TaskDeque completed_tasks;
+
// Schedule another check for completed tasks if not idle.
- if (!inner_->CollectCompletedTasks())
+ if (!inner_->CollectCompletedTasks(&completed_tasks))
ScheduleCheckForCompletedTasks();
- DispatchCompletionCallbacks();
+ DispatchCompletionCallbacks(&completed_tasks);
}
-void WorkerPool::DispatchCompletionCallbacks() {
+void WorkerPool::DispatchCompletionCallbacks(TaskDeque* completed_tasks) {
TRACE_EVENT0("cc", "WorkerPool::DispatchCompletionCallbacks");
- if (completed_tasks_.empty())
+ // Early out when |completed_tasks| is empty to prevent unnecessary
+ // call to DidFinishDispatchingWorkerPoolCompletionCallbacks().
+ if (completed_tasks->empty())
return;
- while (completed_tasks_.size()) {
- scoped_ptr<internal::WorkerPoolTask> task = completed_tasks_.take_front();
+ // Worker pool instance is not reentrant while processing completed tasks.
+ in_dispatch_completion_callbacks_ = true;
+
+ while (!completed_tasks->empty()) {
+ scoped_refptr<internal::WorkerPoolTask> task = completed_tasks->front();
+ completed_tasks->pop_front();
task->DidComplete();
+ task->DispatchCompletionCallback();
}
+ in_dispatch_completion_callbacks_ = false;
+
DCHECK(client_);
client_->DidFinishDispatchingWorkerPoolCompletionCallbacks();
}
-void WorkerPool::PostTask(scoped_ptr<internal::WorkerPoolTask> task) {
- // Schedule check for completed tasks if not pending.
- ScheduleCheckForCompletedTasks();
+void WorkerPool::ScheduleTasks(internal::WorkerPoolTask* root) {
+ TRACE_EVENT0("cc", "WorkerPool::ScheduleTasks");
+
+ DCHECK(!in_dispatch_completion_callbacks_);
+
+ // Schedule check for completed tasks.
+ if (root)
+ ScheduleCheckForCompletedTasks();
- inner_->PostTask(task.Pass());
+ inner_->ScheduleTasks(root);
}
} // namespace cc
diff --git a/cc/base/worker_pool.h b/cc/base/worker_pool.h
index aa49ec9..a27757a 100644
--- a/cc/base/worker_pool.h
+++ b/cc/base/worker_pool.h
@@ -5,31 +5,52 @@
#ifndef CC_BASE_WORKER_POOL_H_
#define CC_BASE_WORKER_POOL_H_
+#include <deque>
#include <string>
+#include <vector>
#include "base/cancelable_callback.h"
+#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
#include "base/memory/weak_ptr.h"
#include "base/message_loop.h"
#include "cc/base/cc_export.h"
-#include "cc/base/scoped_ptr_deque.h"
namespace cc {
namespace internal {
-class WorkerPoolTask {
+class CC_EXPORT WorkerPoolTask
+ : public base::RefCountedThreadSafe<WorkerPoolTask> {
public:
- virtual ~WorkerPoolTask();
+ typedef std::vector<scoped_refptr<WorkerPoolTask> > TaskVector;
virtual void RunOnThread(unsigned thread_index) = 0;
+ virtual void DispatchCompletionCallback() = 0;
+ void DidSchedule();
+ void WillRun();
+ void DidRun();
void DidComplete();
+ bool IsReadyToRun() const;
+ bool HasFinishedRunning() const;
+ bool HasCompleted() const;
+
+ TaskVector& dependencies() { return dependencies_; }
+
protected:
- explicit WorkerPoolTask(const base::Closure& reply);
+ friend class base::RefCountedThreadSafe<WorkerPoolTask>;
+
+ WorkerPoolTask();
+ explicit WorkerPoolTask(TaskVector* dependencies);
+ virtual ~WorkerPoolTask();
- const base::Closure reply_;
+ private:
+ bool did_schedule_;
+ bool did_run_;
+ bool did_complete_;
+ TaskVector dependencies_;
};
} // namespace internal
@@ -42,67 +63,50 @@ class CC_EXPORT WorkerPoolClient {
virtual ~WorkerPoolClient() {}
};
-// A worker thread pool that runs rendering tasks and guarantees completion
-// of all pending tasks at shutdown.
+// A worker thread pool that runs tasks provided by task graph and
+// guarantees completion of all pending tasks at shutdown.
class CC_EXPORT WorkerPool {
public:
- typedef base::Callback<void()> Callback;
-
virtual ~WorkerPool();
- static scoped_ptr<WorkerPool> Create(
- size_t num_threads,
- base::TimeDelta check_for_completed_tasks_delay,
- const std::string& thread_name_prefix) {
- return make_scoped_ptr(new WorkerPool(num_threads,
- check_for_completed_tasks_delay,
- thread_name_prefix));
- }
-
// Tells the worker pool to shutdown and returns once all pending tasks have
// completed.
- void Shutdown();
-
- // Posts |task| to worker pool. On completion, |reply|
- // is posted to the thread that called PostTaskAndReply().
- void PostTaskAndReply(const Callback& task, const base::Closure& reply);
+ virtual void Shutdown();
// Set a new client.
void SetClient(WorkerPoolClient* client) {
client_ = client;
}
+ // Force a check for completed tasks.
+ void CheckForCompletedTasks();
+
protected:
WorkerPool(size_t num_threads,
base::TimeDelta check_for_completed_tasks_delay,
const std::string& thread_name_prefix);
- void PostTask(scoped_ptr<internal::WorkerPoolTask> task);
+ void ScheduleTasks(internal::WorkerPoolTask* root);
private:
class Inner;
friend class Inner;
- void OnTaskCompleted();
- void OnIdle();
+ typedef std::deque<scoped_refptr<internal::WorkerPoolTask> > TaskDeque;
+
+ void OnIdle(TaskDeque* completed_tasks);
void ScheduleCheckForCompletedTasks();
- void CheckForCompletedTasks();
- void DispatchCompletionCallbacks();
+ void DispatchCompletionCallbacks(TaskDeque* completed_tasks);
WorkerPoolClient* client_;
scoped_refptr<base::MessageLoopProxy> origin_loop_;
- base::WeakPtrFactory<WorkerPool> weak_ptr_factory_;
+ base::CancelableClosure check_for_completed_tasks_callback_;
base::TimeDelta check_for_completed_tasks_delay_;
bool check_for_completed_tasks_pending_;
-
- // Holds all completed tasks for which we have not yet dispatched
- // reply callbacks.
- ScopedPtrDeque<internal::WorkerPoolTask> completed_tasks_;
+ bool in_dispatch_completion_callbacks_;
// Hide the gory details of the worker pool in |inner_|.
const scoped_ptr<Inner> inner_;
-
- DISALLOW_COPY_AND_ASSIGN(WorkerPool);
};
} // namespace cc
diff --git a/cc/base/worker_pool_perftest.cc b/cc/base/worker_pool_perftest.cc
new file mode 100644
index 0000000..c03bcc8
--- /dev/null
+++ b/cc/base/worker_pool_perftest.cc
@@ -0,0 +1,234 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cc/base/worker_pool.h"
+
+#include "base/time.h"
+#include "cc/base/completion_event.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cc {
+
+namespace {
+
+static const int kTimeLimitMillis = 2000;
+static const int kWarmupRuns = 5;
+static const int kTimeCheckInterval = 10;
+
+class PerfTaskImpl : public internal::WorkerPoolTask {
+ public:
+ explicit PerfTaskImpl(internal::WorkerPoolTask::TaskVector* dependencies)
+ : internal::WorkerPoolTask(dependencies) {}
+
+ // Overridden from internal::WorkerPoolTask:
+ virtual void RunOnThread(unsigned thread_index) OVERRIDE {}
+ virtual void DispatchCompletionCallback() OVERRIDE {}
+
+ private:
+ virtual ~PerfTaskImpl() {}
+};
+
+class PerfControlTaskImpl : public internal::WorkerPoolTask {
+ public:
+ explicit PerfControlTaskImpl(
+ internal::WorkerPoolTask::TaskVector* dependencies)
+ : internal::WorkerPoolTask(dependencies),
+ did_start_(new CompletionEvent),
+ can_finish_(new CompletionEvent) {}
+
+ // Overridden from internal::WorkerPoolTask:
+ virtual void RunOnThread(unsigned thread_index) OVERRIDE {
+ did_start_->Signal();
+ can_finish_->Wait();
+ }
+ virtual void DispatchCompletionCallback() OVERRIDE {}
+
+ void WaitForTaskToStartRunning() {
+ did_start_->Wait();
+ }
+
+ void AllowTaskToFinish() {
+ can_finish_->Signal();
+ }
+
+ private:
+ virtual ~PerfControlTaskImpl() {}
+
+ scoped_ptr<CompletionEvent> did_start_;
+ scoped_ptr<CompletionEvent> can_finish_;
+};
+
+class PerfWorkerPool : public WorkerPool {
+ public:
+ PerfWorkerPool() : WorkerPool(1, base::TimeDelta::FromDays(1024), "test") {}
+ virtual ~PerfWorkerPool() {}
+
+ static scoped_ptr<PerfWorkerPool> Create() {
+ return make_scoped_ptr(new PerfWorkerPool);
+ }
+
+ void ScheduleTasks(internal::WorkerPoolTask* root) {
+ WorkerPool::ScheduleTasks(root);
+ }
+};
+
+class WorkerPoolPerfTest : public testing::Test,
+ public WorkerPoolClient {
+ public:
+ WorkerPoolPerfTest() : num_runs_(0) {}
+
+ // Overridden from testing::Test:
+ virtual void SetUp() OVERRIDE {
+ worker_pool_ = PerfWorkerPool::Create();
+ worker_pool_->SetClient(this);
+ }
+ virtual void TearDown() OVERRIDE {
+ worker_pool_->Shutdown();
+ }
+
+ // Overridden from WorkerPoolClient:
+ virtual void DidFinishDispatchingWorkerPoolCompletionCallbacks() OVERRIDE {}
+
+ void EndTest() {
+ elapsed_ = base::TimeTicks::HighResNow() - start_time_;
+ }
+
+ void AfterTest(const std::string test_name) {
+ // Format matches chrome/test/perf/perf_test.h:PrintResult
+ printf("*RESULT %s: %.2f runs/s\n",
+ test_name.c_str(),
+ num_runs_ / elapsed_.InSecondsF());
+ }
+
+ void BuildTaskGraph(internal::WorkerPoolTask::TaskVector* dependencies,
+ unsigned current_depth,
+ unsigned max_depth,
+ unsigned num_children_per_node) {
+ internal::WorkerPoolTask::TaskVector children;
+ if (current_depth < max_depth) {
+ for (unsigned i = 0; i < num_children_per_node; ++i) {
+ BuildTaskGraph(&children,
+ current_depth + 1,
+ max_depth,
+ num_children_per_node);
+ }
+ } else if (leaf_task_) {
+ children.push_back(leaf_task_);
+ }
+ dependencies->push_back(make_scoped_refptr(new PerfTaskImpl(&children)));
+ }
+
+ bool DidRun() {
+ ++num_runs_;
+ if (num_runs_ == kWarmupRuns)
+ start_time_ = base::TimeTicks::HighResNow();
+
+ if (!start_time_.is_null() && (num_runs_ % kTimeCheckInterval) == 0) {
+ base::TimeDelta elapsed = base::TimeTicks::HighResNow() - start_time_;
+ if (elapsed >= base::TimeDelta::FromMilliseconds(kTimeLimitMillis)) {
+ elapsed_ = elapsed;
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ void RunBuildTaskGraphTest(const std::string test_name,
+ unsigned max_depth,
+ unsigned num_children_per_node) {
+ start_time_ = base::TimeTicks();
+ num_runs_ = 0;
+ do {
+ internal::WorkerPoolTask::TaskVector children;
+ BuildTaskGraph(&children, 0, max_depth, num_children_per_node);
+ } while (DidRun());
+
+ AfterTest(test_name);
+ }
+
+ void RunScheduleTasksTest(const std::string test_name,
+ unsigned max_depth,
+ unsigned num_children_per_node) {
+ start_time_ = base::TimeTicks();
+ num_runs_ = 0;
+ do {
+ internal::WorkerPoolTask::TaskVector empty;
+ leaf_task_ = make_scoped_refptr(new PerfControlTaskImpl(&empty));
+ internal::WorkerPoolTask::TaskVector children;
+ BuildTaskGraph(&children, 0, max_depth, num_children_per_node);
+ scoped_refptr<PerfTaskImpl> root_task(
+ make_scoped_refptr(new PerfTaskImpl(&children)));
+
+ worker_pool_->ScheduleTasks(root_task);
+ leaf_task_->WaitForTaskToStartRunning();
+ worker_pool_->ScheduleTasks(NULL);
+ worker_pool_->CheckForCompletedTasks();
+ leaf_task_->AllowTaskToFinish();
+ } while (DidRun());
+
+ AfterTest(test_name);
+ }
+
+ void RunExecuteTasksTest(const std::string test_name,
+ unsigned max_depth,
+ unsigned num_children_per_node) {
+ start_time_ = base::TimeTicks();
+ num_runs_ = 0;
+ do {
+ internal::WorkerPoolTask::TaskVector children;
+ BuildTaskGraph(&children, 0, max_depth, num_children_per_node);
+ scoped_refptr<PerfControlTaskImpl> root_task(
+ make_scoped_refptr(new PerfControlTaskImpl(&children)));
+
+ worker_pool_->ScheduleTasks(root_task);
+ root_task->WaitForTaskToStartRunning();
+ root_task->AllowTaskToFinish();
+ worker_pool_->CheckForCompletedTasks();
+ } while (DidRun());
+
+ AfterTest(test_name);
+ }
+
+ protected:
+ scoped_ptr<PerfWorkerPool> worker_pool_;
+ scoped_refptr<PerfControlTaskImpl> leaf_task_;
+ base::TimeTicks start_time_;
+ base::TimeDelta elapsed_;
+ int num_runs_;
+};
+
+TEST_F(WorkerPoolPerfTest, BuildTaskGraph) {
+ RunBuildTaskGraphTest("build_task_graph_1_10", 1, 10);
+ RunBuildTaskGraphTest("build_task_graph_1_1000", 1, 1000);
+ RunBuildTaskGraphTest("build_task_graph_2_10", 2, 10);
+ RunBuildTaskGraphTest("build_task_graph_5_5", 5, 5);
+ RunBuildTaskGraphTest("build_task_graph_10_2", 10, 2);
+ RunBuildTaskGraphTest("build_task_graph_1000_1", 1000, 1);
+ RunBuildTaskGraphTest("build_task_graph_10_1", 10, 1);
+}
+
+TEST_F(WorkerPoolPerfTest, ScheduleTasks) {
+ RunScheduleTasksTest("schedule_tasks_1_10", 1, 10);
+ RunScheduleTasksTest("schedule_tasks_1_1000", 1, 1000);
+ RunScheduleTasksTest("schedule_tasks_2_10", 2, 10);
+ RunScheduleTasksTest("schedule_tasks_5_5", 5, 5);
+ RunScheduleTasksTest("schedule_tasks_10_2", 10, 2);
+ RunScheduleTasksTest("schedule_tasks_1000_1", 1000, 1);
+ RunScheduleTasksTest("schedule_tasks_10_1", 10, 1);
+}
+
+TEST_F(WorkerPoolPerfTest, ExecuteTasks) {
+ RunExecuteTasksTest("execute_tasks_1_10", 1, 10);
+ RunExecuteTasksTest("execute_tasks_1_1000", 1, 1000);
+ RunExecuteTasksTest("execute_tasks_2_10", 2, 10);
+ RunExecuteTasksTest("execute_tasks_5_5", 5, 5);
+ RunExecuteTasksTest("execute_tasks_10_2", 10, 2);
+ RunExecuteTasksTest("execute_tasks_1000_1", 1000, 1);
+ RunExecuteTasksTest("execute_tasks_10_1", 10, 1);
+}
+
+} // namespace
+
+} // namespace cc
diff --git a/cc/base/worker_pool_unittest.cc b/cc/base/worker_pool_unittest.cc
index c032e2c..de904b1 100644
--- a/cc/base/worker_pool_unittest.cc
+++ b/cc/base/worker_pool_unittest.cc
@@ -4,27 +4,101 @@
#include "cc/base/worker_pool.h"
+#include <vector>
+
+#include "cc/base/completion_event.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace cc {
namespace {
-class WorkerPoolTest : public testing::Test,
- public WorkerPoolClient {
+class FakeTaskImpl : public internal::WorkerPoolTask {
public:
- WorkerPoolTest()
- : run_task_count_(0),
- on_task_completed_count_(0),
- finish_dispatching_completion_callbacks_count_(0) {
+ FakeTaskImpl(const base::Closure& callback,
+ const base::Closure& reply,
+ internal::WorkerPoolTask::TaskVector* dependencies)
+ : internal::WorkerPoolTask(dependencies),
+ callback_(callback),
+ reply_(reply) {
+ }
+ FakeTaskImpl(const base::Closure& callback, const base::Closure& reply)
+ : callback_(callback),
+ reply_(reply) {
+ }
+
+ // Overridden from internal::WorkerPoolTask:
+ virtual void RunOnThread(unsigned thread_index) OVERRIDE {
+ if (!callback_.is_null())
+ callback_.Run();
+ }
+ virtual void DispatchCompletionCallback() OVERRIDE {
+ if (!reply_.is_null())
+ reply_.Run();
+ }
+
+ private:
+ virtual ~FakeTaskImpl() {}
+
+ const base::Closure callback_;
+ const base::Closure reply_;
+};
+
+class FakeWorkerPool : public WorkerPool {
+ public:
+ FakeWorkerPool() : WorkerPool(1, base::TimeDelta::FromDays(1024), "test") {}
+ virtual ~FakeWorkerPool() {}
+
+ static scoped_ptr<FakeWorkerPool> Create() {
+ return make_scoped_ptr(new FakeWorkerPool);
+ }
+
+ void ScheduleTasks(const base::Closure& callback,
+ const base::Closure& reply,
+ const base::Closure& dependency,
+ int count) {
+ scoped_refptr<FakeTaskImpl> dependency_task(
+ new FakeTaskImpl(dependency, base::Closure()));
+
+ internal::WorkerPoolTask::TaskVector tasks;
+ for (int i = 0; i < count; ++i) {
+ internal::WorkerPoolTask::TaskVector dependencies(1, dependency_task);
+ tasks.push_back(new FakeTaskImpl(callback, reply, &dependencies));
+ }
+ scoped_refptr<FakeTaskImpl> completion_task(
+ new FakeTaskImpl(base::Bind(&FakeWorkerPool::OnTasksCompleted,
+ base::Unretained(this)),
+ base::Closure(),
+ &tasks));
+
+ scheduled_tasks_completion_.reset(new CompletionEvent);
+ WorkerPool::ScheduleTasks(completion_task);
}
- virtual ~WorkerPoolTest() {
+
+ void WaitForTasksToComplete() {
+ DCHECK(scheduled_tasks_completion_);
+ scheduled_tasks_completion_->Wait();
}
+ private:
+ void OnTasksCompleted() {
+ DCHECK(scheduled_tasks_completion_);
+ scheduled_tasks_completion_->Signal();
+ }
+
+ scoped_ptr<CompletionEvent> scheduled_tasks_completion_;
+};
+
+class WorkerPoolTest : public testing::Test,
+ public WorkerPoolClient {
+ public:
+ WorkerPoolTest() : finish_dispatching_completion_callbacks_count_(0) {}
+ virtual ~WorkerPoolTest() {}
+
+ // Overridden from testing::Test:
virtual void SetUp() OVERRIDE {
Reset();
}
-
virtual void TearDown() OVERRIDE {
worker_pool_->Shutdown();
}
@@ -35,35 +109,34 @@ class WorkerPoolTest : public testing::Test,
}
void Reset() {
- worker_pool_ = WorkerPool::Create(1,
- base::TimeDelta::FromDays(1024),
- "test");
+ worker_pool_ = FakeWorkerPool::Create();
worker_pool_->SetClient(this);
}
void RunAllTasksAndReset() {
+ worker_pool_->WaitForTasksToComplete();
worker_pool_->Shutdown();
Reset();
}
- WorkerPool* worker_pool() {
+ FakeWorkerPool* worker_pool() {
return worker_pool_.get();
}
- void RunTask() {
- ++run_task_count_;
+ void RunTask(unsigned id) {
+ run_task_ids_.push_back(id);
}
- void OnTaskCompleted() {
- ++on_task_completed_count_;
+ void OnTaskCompleted(unsigned id) {
+ on_task_completed_ids_.push_back(id);
}
- unsigned run_task_count() {
- return run_task_count_;
+ const std::vector<unsigned>& run_task_ids() {
+ return run_task_ids_;
}
- unsigned on_task_completed_count() {
- return on_task_completed_count_;
+ const std::vector<unsigned>& on_task_completed_ids() {
+ return on_task_completed_ids_;
}
unsigned finish_dispatching_completion_callbacks_count() {
@@ -71,39 +144,72 @@ class WorkerPoolTest : public testing::Test,
}
private:
- scoped_ptr<WorkerPool> worker_pool_;
- unsigned run_task_count_;
- unsigned on_task_completed_count_;
+ scoped_ptr<FakeWorkerPool> worker_pool_;
+ std::vector<unsigned> run_task_ids_;
+ std::vector<unsigned> on_task_completed_ids_;
unsigned finish_dispatching_completion_callbacks_count_;
};
TEST_F(WorkerPoolTest, Basic) {
- EXPECT_EQ(0u, run_task_count());
- EXPECT_EQ(0u, on_task_completed_count());
+ EXPECT_EQ(0u, run_task_ids().size());
+ EXPECT_EQ(0u, on_task_completed_ids().size());
EXPECT_EQ(0u, finish_dispatching_completion_callbacks_count());
- worker_pool()->PostTaskAndReply(
- base::Bind(&WorkerPoolTest::RunTask, base::Unretained(this)),
- base::Bind(&WorkerPoolTest::OnTaskCompleted, base::Unretained(this)));
+ worker_pool()->ScheduleTasks(
+ base::Bind(&WorkerPoolTest::RunTask, base::Unretained(this), 0u),
+ base::Bind(&WorkerPoolTest::OnTaskCompleted, base::Unretained(this), 0u),
+ base::Closure(),
+ 1);
RunAllTasksAndReset();
- EXPECT_EQ(1u, run_task_count());
- EXPECT_EQ(1u, on_task_completed_count());
+ EXPECT_EQ(1u, run_task_ids().size());
+ EXPECT_EQ(1u, on_task_completed_ids().size());
EXPECT_EQ(1u, finish_dispatching_completion_callbacks_count());
- worker_pool()->PostTaskAndReply(
- base::Bind(&WorkerPoolTest::RunTask, base::Unretained(this)),
- base::Bind(&WorkerPoolTest::OnTaskCompleted, base::Unretained(this)));
- worker_pool()->PostTaskAndReply(
- base::Bind(&WorkerPoolTest::RunTask, base::Unretained(this)),
- base::Bind(&WorkerPoolTest::OnTaskCompleted, base::Unretained(this)));
+ worker_pool()->ScheduleTasks(
+ base::Bind(&WorkerPoolTest::RunTask, base::Unretained(this), 0u),
+ base::Bind(&WorkerPoolTest::OnTaskCompleted, base::Unretained(this), 0u),
+ base::Closure(),
+ 2);
RunAllTasksAndReset();
- EXPECT_EQ(3u, run_task_count());
- EXPECT_EQ(3u, on_task_completed_count());
+ EXPECT_EQ(3u, run_task_ids().size());
+ EXPECT_EQ(3u, on_task_completed_ids().size());
EXPECT_EQ(2u, finish_dispatching_completion_callbacks_count());
}
+TEST_F(WorkerPoolTest, Dependencies) {
+ worker_pool()->ScheduleTasks(
+ base::Bind(&WorkerPoolTest::RunTask, base::Unretained(this), 1u),
+ base::Bind(&WorkerPoolTest::OnTaskCompleted, base::Unretained(this), 1u),
+ base::Bind(&WorkerPoolTest::RunTask, base::Unretained(this), 0u),
+ 1);
+ RunAllTasksAndReset();
+
+ // Check if dependency ran before task.
+ ASSERT_EQ(2u, run_task_ids().size());
+ EXPECT_EQ(0u, run_task_ids()[0]);
+ EXPECT_EQ(1u, run_task_ids()[1]);
+ ASSERT_EQ(1u, on_task_completed_ids().size());
+ EXPECT_EQ(1u, on_task_completed_ids()[0]);
+
+ worker_pool()->ScheduleTasks(
+ base::Bind(&WorkerPoolTest::RunTask, base::Unretained(this), 1u),
+ base::Bind(&WorkerPoolTest::OnTaskCompleted, base::Unretained(this), 1u),
+ base::Bind(&WorkerPoolTest::RunTask, base::Unretained(this), 0u),
+ 2);
+ RunAllTasksAndReset();
+
+ // Dependency should only run once.
+ ASSERT_EQ(5u, run_task_ids().size());
+ EXPECT_EQ(0u, run_task_ids()[2]);
+ EXPECT_EQ(1u, run_task_ids()[3]);
+ EXPECT_EQ(1u, run_task_ids()[4]);
+ ASSERT_EQ(3u, on_task_completed_ids().size());
+ EXPECT_EQ(1u, on_task_completed_ids()[1]);
+ EXPECT_EQ(1u, on_task_completed_ids()[2]);
+}
+
} // namespace
} // namespace cc
diff --git a/cc/cc_tests.gyp b/cc/cc_tests.gyp
index 1f54b0f..a8c23b3 100644
--- a/cc/cc_tests.gyp
+++ b/cc/cc_tests.gyp
@@ -225,9 +225,10 @@
'cc_test_support',
],
'sources': [
- 'trees/layer_tree_host_perftest.cc',
- 'test/run_all_unittests.cc',
+ 'base/worker_pool_perftest.cc',
'test/cc_test_suite.cc',
+ 'test/run_all_unittests.cc',
+ 'trees/layer_tree_host_perftest.cc',
],
'include_dirs': [
'test',
diff --git a/cc/resources/managed_tile_state.h b/cc/resources/managed_tile_state.h
index 37086a2..7d5eec7 100644
--- a/cc/resources/managed_tile_state.h
+++ b/cc/resources/managed_tile_state.h
@@ -5,9 +5,9 @@
#ifndef CC_RESOURCES_MANAGED_TILE_STATE_H_
#define CC_RESOURCES_MANAGED_TILE_STATE_H_
-#include "base/hash_tables.h"
#include "base/memory/scoped_ptr.h"
#include "cc/resources/platform_color.h"
+#include "cc/resources/raster_worker_pool.h"
#include "cc/resources/resource_pool.h"
#include "cc/resources/resource_provider.h"
#include "cc/resources/tile_manager.h"
@@ -113,11 +113,10 @@ class CC_EXPORT ManagedTileState {
scoped_ptr<base::Value> AsValue() const;
// Persisted state: valid all the time.
- typedef base::hash_set<uint32_t> PixelRefSet;
- PixelRefSet decoded_pixel_refs;
TileVersion tile_version;
- PicturePileImpl::Analysis picture_pile_analysis;
bool picture_pile_analyzed;
+ PicturePileImpl::Analysis picture_pile_analysis;
+ RasterWorkerPool::Task raster_task;
// Ephemeral state, valid only during TileManager::ManageTiles.
bool is_in_never_bin_on_both_trees() const {
diff --git a/cc/resources/raster_worker_pool.cc b/cc/resources/raster_worker_pool.cc
index dba3fc2..a078fb0 100644
--- a/cc/resources/raster_worker_pool.cc
+++ b/cc/resources/raster_worker_pool.cc
@@ -10,24 +10,72 @@ namespace cc {
namespace {
+class RasterWorkerPoolContainerTaskImpl : public internal::WorkerPoolTask {
+ public:
+ RasterWorkerPoolContainerTaskImpl(
+ internal::WorkerPoolTask::TaskVector* dependencies)
+ : internal::WorkerPoolTask(dependencies) {
+ }
+
+ // Overridden from internal::WorkerPoolTask:
+ virtual void RunOnThread(unsigned thread_index) OVERRIDE {}
+ virtual void DispatchCompletionCallback() OVERRIDE {}
+
+ private:
+ virtual ~RasterWorkerPoolContainerTaskImpl() {}
+};
+
class RasterWorkerPoolTaskImpl : public internal::WorkerPoolTask {
public:
- RasterWorkerPoolTaskImpl(PicturePileImpl* picture_pile,
- const RasterWorkerPool::RasterCallback& task,
- const base::Closure& reply)
- : internal::WorkerPoolTask(reply),
+ RasterWorkerPoolTaskImpl(const base::Closure& callback,
+ const RasterWorkerPool::Task::Reply& reply)
+ : callback_(callback),
+ reply_(reply) {
+ }
+
+ // Overridden from internal::WorkerPoolTask:
+ virtual void RunOnThread(unsigned thread_index) OVERRIDE {
+ callback_.Run();
+ }
+ virtual void DispatchCompletionCallback() OVERRIDE {
+ reply_.Run(!HasFinishedRunning());
+ }
+
+ private:
+ virtual ~RasterWorkerPoolTaskImpl() {}
+
+ const base::Closure callback_;
+ const RasterWorkerPool::Task::Reply reply_;
+};
+
+class RasterWorkerPoolPictureTaskImpl : public internal::WorkerPoolTask {
+ public:
+ RasterWorkerPoolPictureTaskImpl(
+ PicturePileImpl* picture_pile,
+ const RasterWorkerPool::PictureTask::Callback& callback,
+ const RasterWorkerPool::Task::Reply& reply,
+ internal::WorkerPoolTask::TaskVector* dependencies)
+ : internal::WorkerPoolTask(dependencies),
picture_pile_(picture_pile),
- task_(task) {
+ callback_(callback),
+ reply_(reply) {
DCHECK(picture_pile_);
}
+ // Overridden from internal::WorkerPoolTask:
virtual void RunOnThread(unsigned thread_index) OVERRIDE {
- task_.Run(picture_pile_->GetCloneForDrawingOnThread(thread_index));
+ callback_.Run(picture_pile_->GetCloneForDrawingOnThread(thread_index));
+ }
+ virtual void DispatchCompletionCallback() OVERRIDE {
+ reply_.Run(!HasFinishedRunning());
}
private:
+ virtual ~RasterWorkerPoolPictureTaskImpl() {}
+
scoped_refptr<PicturePileImpl> picture_pile_;
- RasterWorkerPool::RasterCallback task_;
+ const RasterWorkerPool::PictureTask::Callback callback_;
+ const RasterWorkerPool::Task::Reply reply_;
};
const char* kWorkerThreadNamePrefix = "CompositorRaster";
@@ -36,23 +84,69 @@ const int kCheckForCompletedTasksDelayMs = 6;
} // namespace
-RasterWorkerPool::RasterWorkerPool(size_t num_threads)
- : WorkerPool(
- num_threads,
- base::TimeDelta::FromMilliseconds(kCheckForCompletedTasksDelayMs),
- kWorkerThreadNamePrefix) {
+RasterWorkerPool::Task::Queue::Queue() {
+}
+
+RasterWorkerPool::Task::Queue::~Queue() {
+}
+
+void RasterWorkerPool::Task::Queue::Append(const Task& task) {
+ DCHECK(!task.is_null());
+ tasks_.push_back(task.internal_);
+}
+
+RasterWorkerPool::Task::Task() {
+}
+
+RasterWorkerPool::Task::Task(const base::Closure& callback,
+ const Reply& reply)
+ : internal_(new RasterWorkerPoolTaskImpl(callback, reply)) {
+}
+
+RasterWorkerPool::Task::Task(Queue* dependencies)
+ : internal_(new RasterWorkerPoolContainerTaskImpl(&dependencies->tasks_)) {
+}
+
+RasterWorkerPool::Task::Task(scoped_refptr<internal::WorkerPoolTask> internal)
+ : internal_(internal) {
+}
+
+RasterWorkerPool::Task::~Task() {
+}
+
+void RasterWorkerPool::Task::Reset() {
+ internal_ = NULL;
+}
+
+RasterWorkerPool::PictureTask::PictureTask(PicturePileImpl* picture_pile,
+ const Callback& callback,
+ const Reply& reply,
+ Task::Queue* dependencies)
+ : RasterWorkerPool::Task(
+ new RasterWorkerPoolPictureTaskImpl(picture_pile,
+ callback,
+ reply,
+ &dependencies->tasks_)) {
+}
+
+RasterWorkerPool::RasterWorkerPool(size_t num_threads) : WorkerPool(
+ num_threads,
+ base::TimeDelta::FromMilliseconds(kCheckForCompletedTasksDelayMs),
+ kWorkerThreadNamePrefix) {
}
RasterWorkerPool::~RasterWorkerPool() {
}
-void RasterWorkerPool::PostRasterTaskAndReply(PicturePileImpl* picture_pile,
- const RasterCallback& task,
- const base::Closure& reply) {
- PostTask(make_scoped_ptr(new RasterWorkerPoolTaskImpl(
- picture_pile,
- task,
- reply)).PassAs<internal::WorkerPoolTask>());
+void RasterWorkerPool::Shutdown() {
+ // Cancel all previously scheduled tasks.
+ WorkerPool::ScheduleTasks(NULL);
+
+ WorkerPool::Shutdown();
+}
+
+void RasterWorkerPool::ScheduleTasks(Task* task) {
+ WorkerPool::ScheduleTasks(task ? task->internal_ : NULL);
}
} // namespace cc
diff --git a/cc/resources/raster_worker_pool.h b/cc/resources/raster_worker_pool.h
index 76d0497..98032dd 100644
--- a/cc/resources/raster_worker_pool.h
+++ b/cc/resources/raster_worker_pool.h
@@ -5,8 +5,6 @@
#ifndef CC_RESOURCES_RASTER_WORKER_POOL_H_
#define CC_RESOURCES_RASTER_WORKER_POOL_H_
-#include <string>
-
#include "cc/base/worker_pool.h"
namespace cc {
@@ -15,7 +13,55 @@ class PicturePileImpl;
// A worker thread pool that runs raster tasks.
class CC_EXPORT RasterWorkerPool : public WorkerPool {
public:
- typedef base::Callback<void(PicturePileImpl* picture_pile)> RasterCallback;
+ class Task {
+ public:
+ typedef base::Callback<void(bool)> Reply;
+
+ // Highest priority task first. Order of execution is not guaranteed.
+ class Queue {
+ public:
+ Queue();
+ ~Queue();
+
+ bool empty() const { return tasks_.empty(); }
+
+ // Add task to the back of the queue.
+ void Append(const Task& task);
+
+ private:
+ friend class RasterWorkerPool;
+
+ internal::WorkerPoolTask::TaskVector tasks_;
+ };
+
+ Task();
+ Task(const base::Closure& callback, const Reply& reply);
+ explicit Task(Queue* dependencies);
+ ~Task();
+
+ // Returns true if Task is null (doesn't refer to anything).
+ bool is_null() const { return !internal_; }
+
+ // Returns the Task into an uninitialized state.
+ void Reset();
+
+ protected:
+ friend class RasterWorkerPool;
+
+ explicit Task(scoped_refptr<internal::WorkerPoolTask> internal);
+
+ scoped_refptr<internal::WorkerPoolTask> internal_;
+ };
+
+ class PictureTask : public Task {
+ public:
+ typedef base::Callback<void(PicturePileImpl*)> Callback;
+
+ PictureTask(PicturePileImpl* picture_pile,
+ const Callback& callback,
+ const Reply& reply,
+ Queue* dependencies);
+ };
virtual ~RasterWorkerPool();
@@ -23,9 +69,16 @@ class CC_EXPORT RasterWorkerPool : public WorkerPool {
return make_scoped_ptr(new RasterWorkerPool(num_threads));
}
- void PostRasterTaskAndReply(PicturePileImpl* picture_pile,
- const RasterCallback& task,
- const base::Closure& reply);
+ // Tells the worker pool to shutdown after canceling all previously
+ // scheduled tasks. Reply callbacks are still guaranteed to run.
+ virtual void Shutdown() OVERRIDE;
+
+ // Schedule running of |root| task and all its dependencies. Tasks
+ // previously scheduled but no longer needed to run |root| will be
+ // canceled unless already running. Once scheduled, reply callbacks
+ // are guaranteed to run for all tasks even if they later get
+ // canceled by another call to ScheduleTasks().
+ void ScheduleTasks(Task* root);
private:
explicit RasterWorkerPool(size_t num_threads);
diff --git a/cc/resources/resource_provider.cc b/cc/resources/resource_provider.cc
index a1e330b..b77912f 100644
--- a/cc/resources/resource_provider.cc
+++ b/cc/resources/resource_provider.cc
@@ -981,6 +981,8 @@ uint8_t* ResourceProvider::MapPixelBuffer(ResourceId id) {
context3d->mapBufferCHROMIUM(
GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM, GL_WRITE_ONLY));
context3d->bindBuffer(GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM, 0);
+ // Buffer is required to be 4-byte aligned.
+ CHECK(!(reinterpret_cast<intptr_t>(image) & 3));
return image;
}
diff --git a/cc/resources/tile_manager.cc b/cc/resources/tile_manager.cc
index a318c3c..209367b 100644
--- a/cc/resources/tile_manager.cc
+++ b/cc/resources/tile_manager.cc
@@ -135,14 +135,12 @@ TileManager::TileManager(
resource_pool_(ResourcePool::Create(resource_provider)),
raster_worker_pool_(raster_worker_pool.Pass()),
manage_tiles_pending_(false),
- manage_tiles_call_count_(0),
bytes_pending_upload_(0),
has_performed_uploads_since_last_flush_(false),
ever_exceeded_memory_budget_(false),
rendering_stats_instrumentation_(rendering_stats_instrumentation),
use_color_estimator_(use_color_estimator),
did_initialize_visible_tile_(false),
- pending_tasks_(0),
max_pending_tasks_(kMaxNumPendingTasksPerThread * num_raster_threads) {
raster_worker_pool_->SetClient(this);
}
@@ -309,7 +307,6 @@ void TileManager::ManageTiles() {
TRACE_EVENT0("cc", "TileManager::ManageTiles");
manage_tiles_pending_ = false;
- ++manage_tiles_call_count_;
AssignBinsToTiles();
SortTiles();
@@ -319,8 +316,8 @@ void TileManager::ManageTiles() {
"cc", "DidManage", TRACE_EVENT_SCOPE_THREAD,
"state", TracedValue::FromValue(BasicStateAsValue().release()));
- // Finally, kick the rasterizer.
- DispatchMoreTasks();
+ // Finally, schedule rasterizer tasks.
+ ScheduleTasks();
}
void TileManager::CheckForCompletedTileUploads() {
@@ -349,7 +346,7 @@ void TileManager::CheckForCompletedTileUploads() {
tiles_with_pending_upload_.pop();
}
- DispatchMoreTasks();
+ ScheduleTasks();
}
void TileManager::AbortPendingTileUploads() {
@@ -387,11 +384,6 @@ void TileManager::ForceTileUploadToComplete(Tile* tile) {
DidFinishTileInitialization(tile);
DCHECK(tile->tile_version().IsReadyToDraw());
}
-
- if (did_initialize_visible_tile_) {
- did_initialize_visible_tile_ = false;
- client_->DidInitializeVisibleTile();
- }
}
void TileManager::GetMemoryStats(
@@ -469,7 +461,7 @@ void TileManager::DidFinishDispatchingWorkerPoolCompletionCallbacks() {
has_performed_uploads_since_last_flush_ = false;
}
- DispatchMoreTasks();
+ ScheduleTasks();
}
void TileManager::AssignGpuMemoryToTiles() {
@@ -484,7 +476,7 @@ void TileManager::AssignGpuMemoryToTiles() {
// By clearing the tiles_that_need_to_be_rasterized_ vector list
// above we move all tiles currently waiting for raster to idle state.
// Some memory cannot be released. We figure out how much in this
- // loop as well.
+ // loop.
for (TileVector::const_iterator it = tiles_.begin();
it != tiles_.end();
++it) {
@@ -503,6 +495,7 @@ void TileManager::AssignGpuMemoryToTiles() {
size_t bytes_left = bytes_allocatable;
size_t bytes_oom_in_now_bin_on_pending_tree = 0;
TileVector tiles_requiring_memory_but_oomed;
+ bool higher_priority_tile_oomed = false;
for (TileVector::iterator it = tiles_.begin();
it != tiles_.end();
++it) {
@@ -514,45 +507,61 @@ void TileManager::AssignGpuMemoryToTiles() {
if (!tile_version.requires_resource())
continue;
- // If the memory is unreleasable, then we do not need to do anything.
- if (tile_version.memory_state_ == USING_UNRELEASABLE_MEMORY) {
- if (tile->required_for_activation()) {
- AddRequiredTileForActivation(tile);
- // If after rasterizing, this tile has become required or the client has
- // changed its mind about forcing tiles, do that now.
- if (!tile->tile_version().forced_upload_ &&
- client_->ShouldForceTileUploadsRequiredForActivationToComplete()) {
- ForceTileUploadToComplete(tile);
- }
- }
- continue;
- }
-
size_t tile_bytes = tile->bytes_consumed_if_allocated();
+ // Memory is already reserved for tile with unreleasable memory
+ // so adding it to |tiles_that_need_to_be_rasterized_| doesn't
+ // affect bytes_allocatable.
+ if (tile_version.memory_state_ == USING_UNRELEASABLE_MEMORY)
+ tile_bytes = 0;
+
// If the tile is not needed, free it up.
if (mts.is_in_never_bin_on_both_trees()) {
- FreeResourcesForTile(tile);
- tile_version.memory_state_ = NOT_ALLOWED_TO_USE_MEMORY;
+ if (tile_version.memory_state_ != USING_UNRELEASABLE_MEMORY) {
+ FreeResourcesForTile(tile);
+ tile_version.memory_state_ = NOT_ALLOWED_TO_USE_MEMORY;
+ }
continue;
}
+
// Tile is OOM.
if (tile_bytes > bytes_left) {
- FreeResourcesForTile(tile);
tile->tile_version().set_rasterize_on_demand();
if (mts.tree_bin[PENDING_TREE] == NOW_BIN) {
tiles_requiring_memory_but_oomed.push_back(tile);
bytes_oom_in_now_bin_on_pending_tree += tile_bytes;
}
+ FreeResourcesForTile(tile);
+ higher_priority_tile_oomed = true;
continue;
}
+
tile_version.set_use_resource();
bytes_left -= tile_bytes;
- if (!tile_version.resource_ &&
- tile_version.memory_state_ == CAN_USE_MEMORY) {
+
+ // Tile shouldn't be rasterized if we've failed to assign
+ // gpu memory to a higher priority tile. This is important for
+ // two reasons:
+ // 1. Tile size should not impact raster priority.
+ // 2. Tile with unreleasable memory could otherwise incorrectly
+ // be added as it's not affected by |bytes_allocatable|.
+ if (higher_priority_tile_oomed)
+ continue;
+
+ if (!tile_version.resource_)
tiles_that_need_to_be_rasterized_.push_back(tile);
- }
+
if (!tile_version.resource_ && tile->required_for_activation())
AddRequiredTileForActivation(tile);
+
+ if (tile_version.memory_state_ == USING_UNRELEASABLE_MEMORY &&
+ tile->required_for_activation()) {
+ // If after rasterizing, this tile has become required or the client has
+ // changed its mind about forcing tiles, do that now.
+ if (!tile->tile_version().forced_upload_ &&
+ client_->ShouldForceTileUploadsRequiredForActivationToComplete()) {
+ ForceTileUploadToComplete(tile);
+ }
+ }
}
// In OOM situation, we iterate tiles_, remove the memory for active tree
@@ -611,16 +620,10 @@ void TileManager::AssignGpuMemoryToTiles() {
memory_stats_from_last_assign_.bytes_unreleasable = unreleasable_bytes;
memory_stats_from_last_assign_.bytes_over =
bytes_that_exceeded_memory_budget_in_now_bin;
-
- // Reverse two tiles_that_need_* vectors such that pop_back gets
- // the highest priority tile.
- std::reverse(
- tiles_that_need_to_be_rasterized_.begin(),
- tiles_that_need_to_be_rasterized_.end());
}
void TileManager::FreeResourcesForTile(Tile* tile) {
- DCHECK(tile->tile_version().memory_state_ != USING_UNRELEASABLE_MEMORY);
+ DCHECK_NE(USING_UNRELEASABLE_MEMORY, tile->tile_version().memory_state_);
if (tile->tile_version().resource_) {
resource_pool_->ReleaseResource(
tile->tile_version().resource_.Pass());
@@ -628,146 +631,158 @@ void TileManager::FreeResourcesForTile(Tile* tile) {
tile->tile_version().memory_state_ = NOT_ALLOWED_TO_USE_MEMORY;
}
-bool TileManager::CanDispatchRasterTask(Tile* tile) const {
- if (pending_tasks_ >= max_pending_tasks_)
- return false;
- size_t new_bytes_pending = bytes_pending_upload_;
- new_bytes_pending += tile->bytes_consumed_if_allocated();
- return new_bytes_pending <= kMaxPendingUploadBytes &&
- tiles_with_pending_upload_.size() < kMaxPendingUploads;
-}
-
-void TileManager::DispatchMoreTasks() {
- TileVector tiles_with_image_decoding_tasks;
-
- // Process all tiles in the need_to_be_rasterized queue:
- // 1. Dispatch image decode tasks.
- // 2. If the image decode isn't done, save the tile for later processing.
- // 3. Attempt to dispatch a raster task, or break out of the loop.
- while (!tiles_that_need_to_be_rasterized_.empty()) {
- Tile* tile = tiles_that_need_to_be_rasterized_.back();
+void TileManager::ScheduleTasks() {
+ TRACE_EVENT0("cc", "TileManager::ScheduleTasks");
+ RasterWorkerPool::Task::Queue tasks;
- DCHECK(tile->tile_version().requires_resource());
+ size_t bytes_pending_upload = bytes_pending_upload_;
+ unsigned pending_tasks = 0;
- if (DispatchImageDecodeTasksForTile(tile)) {
- tiles_with_image_decoding_tasks.push_back(tile);
- } else if (!CanDispatchRasterTask(tile)) {
- break;
- } else {
- DispatchOneRasterTask(tile);
- }
- tiles_that_need_to_be_rasterized_.pop_back();
- }
+ // Build a new task queue containing all task currently needed. Tasks
+ // are added in order of priority, highest priority task first.
+ for (TileVector::iterator it = tiles_that_need_to_be_rasterized_.begin();
+ it != tiles_that_need_to_be_rasterized_.end();
+ ++it) {
+ Tile* tile = *it;
+ ManagedTileState& mts = tile->managed_state();
- // Put the saved tiles back into the queue. The order is reversed
- // to preserve original ordering.
- tiles_that_need_to_be_rasterized_.insert(
- tiles_that_need_to_be_rasterized_.end(),
- tiles_with_image_decoding_tasks.rbegin(),
- tiles_with_image_decoding_tasks.rend());
+ // Skip tile if determined to not require resource.
+ if (!tile->tile_version().requires_resource())
+ continue;
- if (did_initialize_visible_tile_) {
- did_initialize_visible_tile_ = false;
- client_->DidInitializeVisibleTile();
- }
-}
+ // Skip tile if already rasterized.
+ if (tile->tile_version().resource_)
+ continue;
-bool TileManager::DispatchImageDecodeTasksForTile(Tile* tile) {
- TRACE_EVENT0("cc", "TileManager::DispatchImageDecodeTasksForTile");
- ManagedTileState& mts = tile->managed_state();
- bool pending_decode_tasks = false;
+ // TODO(reveman): Remove throttling based on max pending tasks.
+ if (pending_tasks >= max_pending_tasks_)
+ break;
- for (PicturePileImpl::PixelRefIterator iter(tile->content_rect(),
- tile->contents_scale(),
- tile->picture_pile());
- iter; ++iter) {
- skia::LazyPixelRef* pixel_ref = *iter;
- uint32_t id = pixel_ref->getGenerationID();
+ // TODO(reveman): Remove throttling based on max pending uploads.
+ if (tiles_with_pending_upload_.size() >= kMaxPendingUploads)
+ break;
- // Check if image has already been decoded.
- if (mts.decoded_pixel_refs.find(id) != mts.decoded_pixel_refs.end())
- continue;
+ // TODO(reveman): Throttle based on shared memory usage rather
+ // than bytes pending upload.
+ size_t new_bytes_pending = bytes_pending_upload;
+ new_bytes_pending += tile->bytes_consumed_if_allocated();
+ if (new_bytes_pending > kMaxPendingUploadBytes)
+ break;
+ bytes_pending_upload = new_bytes_pending;
- // Check if decode task is already pending.
- if (pending_decode_tasks_.find(id) != pending_decode_tasks_.end()) {
- pending_decode_tasks = true;
- continue;
- }
+ // Create raster task for this tile if necessary.
+ if (mts.raster_task.is_null())
+ mts.raster_task = CreateRasterTask(tile);
- // TODO(qinmin): passing correct image size to PrepareToDecode().
- if (pixel_ref->PrepareToDecode(skia::LazyPixelRef::PrepareParams())) {
- rendering_stats_instrumentation_->IncrementDeferredImageCacheHitCount();
- mts.decoded_pixel_refs.insert(id);
- continue;
- }
+ // Finally append raster task.
+ tasks.Append(mts.raster_task);
+ pending_tasks++;
+ }
- if (pending_tasks_ >= max_pending_tasks_)
- break;
+ if (!tasks.empty()) {
+ RasterWorkerPool::Task root(&tasks);
- DispatchOneImageDecodeTask(tile, pixel_ref);
- pending_decode_tasks = true;
+ // Schedule running of |tasks|. This replaces any previously
+ // scheduled tasks and effectively cancels all tasks not present
+ // in |tasks|.
+ raster_worker_pool_->ScheduleTasks(&root);
+ } else {
+ raster_worker_pool_->ScheduleTasks(NULL);
}
- return pending_decode_tasks;
+ if (did_initialize_visible_tile_) {
+ did_initialize_visible_tile_ = false;
+ client_->DidInitializeVisibleTile();
+ }
}
-void TileManager::DispatchOneImageDecodeTask(
- scoped_refptr<Tile> tile, skia::LazyPixelRef* pixel_ref) {
- TRACE_EVENT0("cc", "TileManager::DispatchOneImageDecodeTask");
- uint32_t pixel_ref_id = pixel_ref->getGenerationID();
- DCHECK(pending_decode_tasks_.end() ==
- pending_decode_tasks_.find(pixel_ref_id));
- pending_decode_tasks_.insert(pixel_ref_id);
+RasterWorkerPool::Task TileManager::CreateImageDecodeTask(
+ Tile* tile, skia::LazyPixelRef* pixel_ref) {
+ TRACE_EVENT0("cc", "TileManager::CreateImageDecodeTask");
- raster_worker_pool_->PostTaskAndReply(
+ return RasterWorkerPool::Task(
base::Bind(&TileManager::RunImageDecodeTask,
pixel_ref,
tile->layer_id(),
rendering_stats_instrumentation_),
base::Bind(&TileManager::OnImageDecodeTaskCompleted,
base::Unretained(this),
- tile,
- pixel_ref_id));
- pending_tasks_++;
+ make_scoped_refptr(tile),
+ pixel_ref->getGenerationID()));
}
-void TileManager::OnImageDecodeTaskCompleted(
- scoped_refptr<Tile> tile, uint32_t pixel_ref_id) {
+void TileManager::OnImageDecodeTaskCompleted(scoped_refptr<Tile> tile,
+ uint32_t pixel_ref_id,
+ bool was_canceled) {
TRACE_EVENT0("cc", "TileManager::OnImageDecodeTaskCompleted");
- ManagedTileState& mts = tile->managed_state();
- mts.decoded_pixel_refs.insert(pixel_ref_id);
+ DCHECK(pending_decode_tasks_.find(pixel_ref_id) !=
+ pending_decode_tasks_.end());
pending_decode_tasks_.erase(pixel_ref_id);
- pending_tasks_--;
}
-scoped_ptr<ResourcePool::Resource> TileManager::PrepareTileForRaster(
- Tile* tile) {
- scoped_ptr<ResourcePool::Resource> resource = resource_pool_->AcquireResource(
- tile->tile_size_.size(),
- tile->tile_version().resource_format_);
+TileManager::RasterTaskMetadata TileManager::GetRasterTaskMetadata(
+ const Tile& tile) const {
+ RasterTaskMetadata metadata;
+ const ManagedTileState& mts = tile.managed_state();
+ metadata.is_tile_in_pending_tree_now_bin =
+ mts.tree_bin[PENDING_TREE] == NOW_BIN;
+ metadata.tile_resolution = mts.resolution;
+ metadata.layer_id = tile.layer_id();
+ metadata.tile_id = &tile;
+ metadata.source_frame_number = tile.source_frame_number();
+ return metadata;
+}
+
+RasterWorkerPool::Task TileManager::CreateRasterTask(Tile* tile) {
+ TRACE_EVENT0("cc", "TileManager::CreateRasterTask");
+
+ scoped_ptr<ResourcePool::Resource> resource =
+ resource_pool_->AcquireResource(
+ tile->tile_size_.size(),
+ tile->tile_version().resource_format_);
resource_pool_->resource_provider()->AcquirePixelBuffer(resource->id());
+ DCHECK_EQ(CAN_USE_MEMORY, tile->tile_version().memory_state_);
tile->tile_version().memory_state_ = USING_UNRELEASABLE_MEMORY;
- return resource.Pass();
-}
-
-void TileManager::DispatchOneRasterTask(scoped_refptr<Tile> tile) {
- TRACE_EVENT0("cc", "TileManager::DispatchOneRasterTask");
- scoped_ptr<ResourcePool::Resource> resource = PrepareTileForRaster(tile);
- ResourceProvider::ResourceId resource_id = resource->id();
PicturePileImpl::Analysis* analysis = new PicturePileImpl::Analysis;
// MapPixelBuffer() returns NULL if context was lost at the time
- // AcquirePixelBuffer() was called. For simplicity we still post
+ // AcquirePixelBuffer() was called. For simplicity we still create
// a raster task that is essentially a noop in these situations.
uint8* buffer = resource_pool_->resource_provider()->MapPixelBuffer(
- resource_id);
+ resource->id());
+
+ // Create and queue all image decode tasks that this tile depends on.
+ RasterWorkerPool::Task::Queue decode_tasks;
+ for (PicturePileImpl::PixelRefIterator iter(tile->content_rect(),
+ tile->contents_scale(),
+ tile->picture_pile());
+ iter; ++iter) {
+ skia::LazyPixelRef* pixel_ref = *iter;
+ uint32_t id = pixel_ref->getGenerationID();
- // skia requires that our buffer be 4-byte aligned
- CHECK(!(reinterpret_cast<intptr_t>(buffer) & 3));
+ // Append existing image decode task if available.
+ PixelRefMap::iterator decode_task_it = pending_decode_tasks_.find(id);
+ if (decode_task_it != pending_decode_tasks_.end()) {
+ decode_tasks.Append(decode_task_it->second);
+ continue;
+ }
- raster_worker_pool_->PostRasterTaskAndReply(
+ // TODO(qinmin): passing correct image size to PrepareToDecode().
+ if (pixel_ref->PrepareToDecode(skia::LazyPixelRef::PrepareParams())) {
+ rendering_stats_instrumentation_->IncrementDeferredImageCacheHitCount();
+ continue;
+ }
+
+ // Create and append new image decode task for this pixel ref.
+ RasterWorkerPool::Task decode_task = CreateImageDecodeTask(
+ tile, pixel_ref);
+ decode_tasks.Append(decode_task);
+ pending_decode_tasks_[id] = decode_task;
+ }
+
+ return RasterWorkerPool::PictureTask(
tile->picture_pile(),
base::Bind(&TileManager::RunAnalyzeAndRasterTask,
base::Bind(&TileManager::RunAnalyzeTask,
@@ -786,43 +801,38 @@ void TileManager::DispatchOneRasterTask(scoped_refptr<Tile> tile) {
rendering_stats_instrumentation_)),
base::Bind(&TileManager::OnRasterTaskCompleted,
base::Unretained(this),
- tile,
+ make_scoped_refptr(tile),
base::Passed(&resource),
- base::Owned(analysis),
- manage_tiles_call_count_));
- pending_tasks_++;
-}
-
-TileManager::RasterTaskMetadata TileManager::GetRasterTaskMetadata(
- const Tile& tile) const {
- RasterTaskMetadata metadata;
- const ManagedTileState& mts = tile.managed_state();
- metadata.is_tile_in_pending_tree_now_bin =
- mts.tree_bin[PENDING_TREE] == NOW_BIN;
- metadata.tile_resolution = mts.resolution;
- metadata.layer_id = tile.layer_id();
- metadata.tile_id = &tile;
- metadata.source_frame_number = tile.source_frame_number();
- return metadata;
+ base::Owned(analysis)),
+ &decode_tasks);
}
void TileManager::OnRasterTaskCompleted(
scoped_refptr<Tile> tile,
scoped_ptr<ResourcePool::Resource> resource,
PicturePileImpl::Analysis* analysis,
- int manage_tiles_call_count_when_dispatched) {
+ bool was_canceled) {
TRACE_EVENT0("cc", "TileManager::OnRasterTaskCompleted");
- pending_tasks_--;
+ ManagedTileState& mts = tile->managed_state();
+ DCHECK(!mts.raster_task.is_null());
+ mts.raster_task.Reset();
+
+ // Tile resources can't be freed until upload has completed.
+ DCHECK_EQ(USING_UNRELEASABLE_MEMORY, tile->tile_version().memory_state_);
// Release raster resources.
resource_pool_->resource_provider()->UnmapPixelBuffer(resource->id());
- tile->tile_version().memory_state_ = USING_RELEASABLE_MEMORY;
+ if (was_canceled) {
+ tile->tile_version().memory_state_ = CAN_USE_MEMORY;
+ resource_pool_->resource_provider()->ReleasePixelBuffer(resource->id());
+ resource_pool_->ReleaseResource(resource.Pass());
+ return;
+ }
- ManagedTileState& managed_tile_state = tile->managed_state();
- managed_tile_state.picture_pile_analysis = *analysis;
- managed_tile_state.picture_pile_analyzed = true;
+ mts.picture_pile_analysis = *analysis;
+ mts.picture_pile_analyzed = true;
if (analysis->is_solid_color) {
tile->tile_version().set_solid_color(analysis->solid_color);
@@ -832,35 +842,17 @@ void TileManager::OnRasterTaskCompleted(
return;
}
- // Tile can be freed after the completion of the raster task. Call
- // AssignGpuMemoryToTiles() to re-assign gpu memory to highest priority
- // tiles if ManageTiles() was called since task was dispatched. The result
- // of this could be that this tile is no longer allowed to use gpu
- // memory and in that case we need to abort initialization and free all
- // associated resources before calling DispatchMoreTasks().
- if (manage_tiles_call_count_when_dispatched != manage_tiles_call_count_)
- AssignGpuMemoryToTiles();
-
- // Finish resource initialization we're still using memory.
- if (tile->tile_version().memory_state_ == USING_RELEASABLE_MEMORY) {
- // Tile resources can't be freed until upload has completed.
- tile->tile_version().memory_state_ = USING_UNRELEASABLE_MEMORY;
-
- resource_pool_->resource_provider()->BeginSetPixels(resource->id());
- has_performed_uploads_since_last_flush_ = true;
+ resource_pool_->resource_provider()->BeginSetPixels(resource->id());
+ has_performed_uploads_since_last_flush_ = true;
- tile->tile_version().resource_ = resource.Pass();
+ tile->tile_version().resource_ = resource.Pass();
- bytes_pending_upload_ += tile->bytes_consumed_if_allocated();
- tiles_with_pending_upload_.push(tile);
+ bytes_pending_upload_ += tile->bytes_consumed_if_allocated();
+ tiles_with_pending_upload_.push(tile);
- if (tile->required_for_activation() &&
- client_->ShouldForceTileUploadsRequiredForActivationToComplete())
- ForceTileUploadToComplete(tile);
- } else {
- resource_pool_->resource_provider()->ReleasePixelBuffer(resource->id());
- resource_pool_->ReleaseResource(resource.Pass());
- }
+ if (tile->required_for_activation() &&
+ client_->ShouldForceTileUploadsRequiredForActivationToComplete())
+ ForceTileUploadToComplete(tile);
}
void TileManager::DidFinishTileInitialization(Tile* tile) {
@@ -882,9 +874,23 @@ void TileManager::DidTileTreeBinChange(Tile* tile,
}
// static
+void TileManager::RunImageDecodeTask(
+ skia::LazyPixelRef* pixel_ref,
+ int layer_id,
+ RenderingStatsInstrumentation* stats_instrumentation) {
+ TRACE_EVENT0("cc", "TileManager::RunImageDecodeTask");
+ devtools_instrumentation::ScopedLayerTask image_decode_task(
+ devtools_instrumentation::kImageDecodeTask, layer_id);
+ base::TimeTicks start_time = stats_instrumentation->StartRecording();
+ pixel_ref->Decode();
+ base::TimeDelta duration = stats_instrumentation->EndRecording(start_time);
+ stats_instrumentation->AddDeferredImageDecode(duration);
+}
+
+// static
void TileManager::RunAnalyzeAndRasterTask(
- const RasterWorkerPool::RasterCallback& analyze_task,
- const RasterWorkerPool::RasterCallback& raster_task,
+ const RasterWorkerPool::PictureTask::Callback& analyze_task,
+ const RasterWorkerPool::PictureTask::Callback& raster_task,
PicturePileImpl* picture_pile) {
analyze_task.Run(picture_pile);
raster_task.Run(picture_pile);
@@ -980,18 +986,4 @@ void TileManager::RunRasterTask(
}
}
-// static
-void TileManager::RunImageDecodeTask(
- skia::LazyPixelRef* pixel_ref,
- int layer_id,
- RenderingStatsInstrumentation* stats_instrumentation) {
- TRACE_EVENT0("cc", "TileManager::RunImageDecodeTask");
- devtools_instrumentation::ScopedLayerTask image_decode_task(
- devtools_instrumentation::kImageDecodeTask, layer_id);
- base::TimeTicks start_time = stats_instrumentation->StartRecording();
- pixel_ref->Decode();
- base::TimeDelta duration = stats_instrumentation->EndRecording(start_time);
- stats_instrumentation->AddDeferredImageDecode(duration);
-}
-
} // namespace cc
diff --git a/cc/resources/tile_manager.h b/cc/resources/tile_manager.h
index 543539d..c43b857 100644
--- a/cc/resources/tile_manager.h
+++ b/cc/resources/tile_manager.h
@@ -62,6 +62,8 @@ scoped_ptr<base::Value> TileManagerBinPriorityAsValue(
// created, and unregister from the manager when they are deleted.
class CC_EXPORT TileManager : public WorkerPoolClient {
public:
+ typedef base::hash_set<uint32_t> PixelRefSet;
+
static scoped_ptr<TileManager> Create(
TileManagerClient* client,
ResourceProvider* resource_provider,
@@ -116,7 +118,7 @@ class CC_EXPORT TileManager : public WorkerPoolClient {
void UnregisterTile(Tile* tile);
// Virtual for test
- virtual void DispatchMoreTasks();
+ virtual void ScheduleTasks();
private:
// Data that is passed to raster tasks.
@@ -129,8 +131,6 @@ class CC_EXPORT TileManager : public WorkerPoolClient {
int source_frame_number;
};
- RasterTaskMetadata GetRasterTaskMetadata(const Tile& tile) const;
-
void AssignBinsToTiles();
void SortTiles();
void AssignGpuMemoryToTiles();
@@ -142,20 +142,19 @@ class CC_EXPORT TileManager : public WorkerPoolClient {
client_->ScheduleManageTiles();
manage_tiles_pending_ = true;
}
- bool DispatchImageDecodeTasksForTile(Tile* tile);
- void DispatchOneImageDecodeTask(
- scoped_refptr<Tile> tile, skia::LazyPixelRef* pixel_ref);
+ RasterWorkerPool::Task CreateImageDecodeTask(
+ Tile* tile, skia::LazyPixelRef* pixel_ref);
void OnImageDecodeTaskCompleted(
scoped_refptr<Tile> tile,
- uint32_t pixel_ref_id);
- bool CanDispatchRasterTask(Tile* tile) const;
- scoped_ptr<ResourcePool::Resource> PrepareTileForRaster(Tile* tile);
- void DispatchOneRasterTask(scoped_refptr<Tile> tile);
+ uint32_t pixel_ref_id,
+ bool was_canceled);
+ RasterTaskMetadata GetRasterTaskMetadata(const Tile& tile) const;
+ RasterWorkerPool::Task CreateRasterTask(Tile* tile);
void OnRasterTaskCompleted(
scoped_refptr<Tile> tile,
scoped_ptr<ResourcePool::Resource> resource,
PicturePileImpl::Analysis* analysis,
- int manage_tiles_call_count_when_dispatched);
+ bool was_canceled);
void DidFinishTileInitialization(Tile* tile);
void DidTileTreeBinChange(Tile* tile,
TileManagerBin new_tree_bin,
@@ -163,9 +162,13 @@ class CC_EXPORT TileManager : public WorkerPoolClient {
scoped_ptr<Value> GetMemoryRequirementsAsValue() const;
void AddRequiredTileForActivation(Tile* tile);
+ static void RunImageDecodeTask(
+ skia::LazyPixelRef* pixel_ref,
+ int layer_id,
+ RenderingStatsInstrumentation* stats_instrumentation);
static void RunAnalyzeAndRasterTask(
- const RasterWorkerPool::RasterCallback& analyze_task,
- const RasterWorkerPool::RasterCallback& raster_task,
+ const RasterWorkerPool::PictureTask::Callback& analyze_task,
+ const RasterWorkerPool::PictureTask::Callback& raster_task,
PicturePileImpl* picture_pile);
static void RunAnalyzeTask(
PicturePileImpl::Analysis* analysis,
@@ -183,16 +186,11 @@ class CC_EXPORT TileManager : public WorkerPoolClient {
const RasterTaskMetadata& metadata,
RenderingStatsInstrumentation* stats_instrumentation,
PicturePileImpl* picture_pile);
- static void RunImageDecodeTask(
- skia::LazyPixelRef* pixel_ref,
- int layer_id,
- RenderingStatsInstrumentation* stats_instrumentation);
TileManagerClient* client_;
scoped_ptr<ResourcePool> resource_pool_;
scoped_ptr<RasterWorkerPool> raster_worker_pool_;
bool manage_tiles_pending_;
- int manage_tiles_call_count_;
GlobalStateThatImpactsTilePriority global_state_;
@@ -202,8 +200,8 @@ class CC_EXPORT TileManager : public WorkerPoolClient {
typedef std::set<Tile*> TileSet;
TileSet tiles_that_need_to_be_initialized_for_activation_;
- typedef base::hash_set<uint32_t> PixelRefSet;
- PixelRefSet pending_decode_tasks_;
+ typedef base::hash_map<uint32_t, RasterWorkerPool::Task> PixelRefMap;
+ PixelRefMap pending_decode_tasks_;
typedef std::queue<scoped_refptr<Tile> > TileQueue;
TileQueue tiles_with_pending_upload_;
@@ -217,7 +215,6 @@ class CC_EXPORT TileManager : public WorkerPoolClient {
bool use_color_estimator_;
bool did_initialize_visible_tile_;
- size_t pending_tasks_;
size_t max_pending_tasks_;
DISALLOW_COPY_AND_ASSIGN(TileManager);
diff --git a/cc/test/fake_tile_manager.h b/cc/test/fake_tile_manager.h
index 7bd4d03..fb62ce3 100644
--- a/cc/test/fake_tile_manager.h
+++ b/cc/test/fake_tile_manager.h
@@ -17,7 +17,7 @@ class FakeTileManager : public TileManager {
protected:
// Do nothing
- virtual void DispatchMoreTasks() OVERRIDE { }
+ virtual void ScheduleTasks() OVERRIDE { }
};
} // namespace cc