summaryrefslogtreecommitdiffstats
path: root/cc/resources
diff options
context:
space:
mode:
authorreveman@chromium.org <reveman@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2014-02-07 02:42:32 +0000
committerreveman@chromium.org <reveman@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2014-02-07 02:42:32 +0000
commitfeed4c458ae290892959673cdc0c0feb6d9e7bc3 (patch)
treea326a03915e63ebd92ed927488eee58cf3343c8c /cc/resources
parent257286418b4731ad2395bce6a22e7b394599b5b0 (diff)
downloadchromium_src-feed4c458ae290892959673cdc0c0feb6d9e7bc3.zip
chromium_src-feed4c458ae290892959673cdc0c0feb6d9e7bc3.tar.gz
chromium_src-feed4c458ae290892959673cdc0c0feb6d9e7bc3.tar.bz2
cc: Switch to vector based TaskGraph implementation.
Provides improved performance for graph sizes used with impl-side painting. Here are two reasons for why this is better: 1. TaskGraph creation can be done without any heap allocations. Prior to this we spent most of the graph construction time creating and deleting objects. This eliminates that completely. 2. Linear scans are typically faster than hash map lookups for the number of nodes and edges used in our task graphs. A large graph with raster tasks typically has ~50 nodes and ~100 edges. Improvement in graph construction performance is somewhere between 5x-10x on desktop and likely even greater on Android where heap allocations might be more expensive. Graph scheduling performance is up by 3x-10x while the improvement to execution time is a more moderate 2x-5x. Execution cost is less important as it's paid by worker threads and already considered good enough before this change. There's still room for improvement after this change. ie. by keeping graph edges sorted, DependentIterator performance can be improved. Trivial changes to our RasterWorkerPool implementations are needed to further avoid heap allocations and fully take advantage of the performance improvements in this patch. This has been left as follow up work. BUG=246546 Review URL: https://codereview.chromium.org/154003006 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@249589 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'cc/resources')
-rw-r--r--cc/resources/image_raster_worker_pool.cc64
-rw-r--r--cc/resources/image_raster_worker_pool.h9
-rw-r--r--cc/resources/pixel_buffer_raster_worker_pool.cc90
-rw-r--r--cc/resources/raster_worker_pool.cc73
-rw-r--r--cc/resources/raster_worker_pool.h23
-rw-r--r--cc/resources/raster_worker_pool_perftest.cc37
-rw-r--r--cc/resources/task_graph_runner.cc357
-rw-r--r--cc/resources/task_graph_runner.h126
-rw-r--r--cc/resources/task_graph_runner_perftest.cc113
-rw-r--r--cc/resources/task_graph_runner_unittest.cc56
10 files changed, 478 insertions, 470 deletions
diff --git a/cc/resources/image_raster_worker_pool.cc b/cc/resources/image_raster_worker_pool.cc
index d4419ae..45cd08f 100644
--- a/cc/resources/image_raster_worker_pool.cc
+++ b/cc/resources/image_raster_worker_pool.cc
@@ -34,23 +34,18 @@ void ImageRasterWorkerPool::ScheduleTasks(RasterTask::Queue* queue) {
raster_tasks_pending_ = true;
raster_tasks_required_for_activation_pending_ = true;
- unsigned priority = 0u;
- TaskGraph graph;
+ unsigned priority = kRasterTaskPriorityBase;
+ internal::TaskGraph graph;
scoped_refptr<internal::WorkerPoolTask>
new_raster_required_for_activation_finished_task(
CreateRasterRequiredForActivationFinishedTask(
raster_tasks_required_for_activation().size()));
- internal::GraphNode* raster_required_for_activation_finished_node =
- CreateGraphNodeForTask(
- new_raster_required_for_activation_finished_task.get(),
- priority++,
- &graph);
-
scoped_refptr<internal::WorkerPoolTask> new_raster_finished_task(
CreateRasterFinishedTask());
- internal::GraphNode* raster_finished_node = CreateGraphNodeForTask(
- new_raster_finished_task.get(), priority++, &graph);
+
+ size_t raster_required_for_activation_finished_dependency_count = 0u;
+ size_t raster_finished_dependency_count = 0u;
RasterTaskVector gpu_raster_tasks;
@@ -65,16 +60,28 @@ void ImageRasterWorkerPool::ScheduleTasks(RasterTask::Queue* queue) {
continue;
}
- CreateGraphNodeForImageRasterTask(
- task,
- task->dependencies(),
- priority++,
- IsRasterTaskRequiredForActivation(task),
- raster_required_for_activation_finished_node,
- raster_finished_node,
- &graph);
+ if (IsRasterTaskRequiredForActivation(task)) {
+ ++raster_required_for_activation_finished_dependency_count;
+ graph.edges.push_back(internal::TaskGraph::Edge(
+ task, new_raster_required_for_activation_finished_task.get()));
+ }
+
+ InsertNodeForRasterTask(&graph, task, task->dependencies(), priority++);
+
+ ++raster_finished_dependency_count;
+ graph.edges.push_back(
+ internal::TaskGraph::Edge(task, new_raster_finished_task.get()));
}
+ InsertNodeForTask(&graph,
+ new_raster_required_for_activation_finished_task.get(),
+ kRasterRequiredForActivationFinishedTaskPriority,
+ raster_required_for_activation_finished_dependency_count);
+ InsertNodeForTask(&graph,
+ new_raster_finished_task.get(),
+ kRasterFinishedTaskPriority,
+ raster_finished_dependency_count);
+
SetTaskGraph(&graph);
set_raster_finished_task(new_raster_finished_task);
@@ -171,25 +178,4 @@ scoped_ptr<base::Value> ImageRasterWorkerPool::StateAsValue() const {
return state.PassAs<base::Value>();
}
-// static
-void ImageRasterWorkerPool::CreateGraphNodeForImageRasterTask(
- internal::WorkerPoolTask* image_task,
- const internal::Task::Vector& decode_tasks,
- unsigned priority,
- bool is_required_for_activation,
- internal::GraphNode* raster_required_for_activation_finished_node,
- internal::GraphNode* raster_finished_node,
- TaskGraph* graph) {
- internal::GraphNode* image_node =
- CreateGraphNodeForRasterTask(image_task, decode_tasks, priority, graph);
-
- if (is_required_for_activation) {
- raster_required_for_activation_finished_node->add_dependency();
- image_node->add_dependent(raster_required_for_activation_finished_node);
- }
-
- raster_finished_node->add_dependency();
- image_node->add_dependent(raster_finished_node);
-}
-
} // namespace cc
diff --git a/cc/resources/image_raster_worker_pool.h b/cc/resources/image_raster_worker_pool.h
index 6de7479..b0eb14b 100644
--- a/cc/resources/image_raster_worker_pool.h
+++ b/cc/resources/image_raster_worker_pool.h
@@ -46,15 +46,6 @@ class CC_EXPORT ImageRasterWorkerPool : public RasterWorkerPool {
scoped_ptr<base::Value> StateAsValue() const;
- static void CreateGraphNodeForImageRasterTask(
- internal::WorkerPoolTask* image_task,
- const internal::Task::Vector& decode_tasks,
- unsigned priority,
- bool is_required_for_activation,
- internal::GraphNode* raster_required_for_activation_finished_node,
- internal::GraphNode* raster_finished_node,
- TaskGraph* graph);
-
const unsigned texture_target_;
bool raster_tasks_pending_;
diff --git a/cc/resources/pixel_buffer_raster_worker_pool.cc b/cc/resources/pixel_buffer_raster_worker_pool.cc
index f9fe49b..32a79aa 100644
--- a/cc/resources/pixel_buffer_raster_worker_pool.cc
+++ b/cc/resources/pixel_buffer_raster_worker_pool.cc
@@ -9,11 +9,6 @@
#include "base/values.h"
#include "cc/debug/traced_value.h"
#include "cc/resources/resource.h"
-#include "third_party/skia/include/core/SkBitmapDevice.h"
-
-#if defined(OS_ANDROID)
-#include "base/android/sys_utils.h"
-#endif
namespace cc {
namespace {
@@ -22,20 +17,8 @@ const int kCheckForCompletedRasterTasksDelayMs = 6;
const size_t kMaxScheduledRasterTasks = 48;
-typedef base::StackVector<internal::GraphNode*, kMaxScheduledRasterTasks>
- NodeVector;
-
-void AddDependenciesToGraphNode(internal::GraphNode* node,
- const NodeVector::ContainerType& dependencies) {
- for (NodeVector::ContainerType::const_iterator it = dependencies.begin();
- it != dependencies.end();
- ++it) {
- internal::GraphNode* dependency = *it;
-
- node->add_dependency();
- dependency->add_dependent(node);
- }
-}
+typedef base::StackVector<internal::WorkerPoolTask*, kMaxScheduledRasterTasks>
+ WorkerPoolTaskVector;
// Only used as std::find_if predicate for DCHECKs.
bool WasCanceled(const internal::RasterWorkerPoolTask* task) {
@@ -487,14 +470,11 @@ void PixelBufferRasterWorkerPool::CheckForCompletedRasterTasks() {
void PixelBufferRasterWorkerPool::ScheduleMoreTasks() {
TRACE_EVENT0("cc", "PixelBufferRasterWorkerPool::ScheduleMoreTasks");
- enum RasterTaskType {
- PREPAINT_TYPE = 0,
- REQUIRED_FOR_ACTIVATION_TYPE = 1,
- NUM_TYPES = 2
- };
- NodeVector tasks[NUM_TYPES];
- unsigned priority = 2u; // 0-1 reserved for RasterFinished tasks.
- TaskGraph graph;
+ WorkerPoolTaskVector tasks;
+ WorkerPoolTaskVector tasks_required_for_activation;
+
+ unsigned priority = kRasterTaskPriorityBase;
+ internal::TaskGraph graph;
size_t bytes_pending_upload = bytes_pending_upload_;
bool did_throttle_raster_tasks = false;
@@ -534,10 +514,7 @@ void PixelBufferRasterWorkerPool::ScheduleMoreTasks() {
}
// Throttle raster tasks based on kMaxScheduledRasterTasks.
- size_t scheduled_raster_task_count =
- tasks[PREPAINT_TYPE].container().size() +
- tasks[REQUIRED_FOR_ACTIVATION_TYPE].container().size();
- if (scheduled_raster_task_count >= kMaxScheduledRasterTasks) {
+ if (tasks.container().size() >= kMaxScheduledRasterTasks) {
did_throttle_raster_tasks = true;
break;
}
@@ -546,22 +523,21 @@ void PixelBufferRasterWorkerPool::ScheduleMoreTasks() {
// throttling limits.
bytes_pending_upload = new_bytes_pending_upload;
- RasterTaskType type = IsRasterTaskRequiredForActivation(task)
- ? REQUIRED_FOR_ACTIVATION_TYPE
- : PREPAINT_TYPE;
-
DCHECK(state_it->second == UNSCHEDULED || state_it->second == SCHEDULED);
state_it->second = SCHEDULED;
- tasks[type].container().push_back(CreateGraphNodeForRasterTask(
- task, task->dependencies(), priority++, &graph));
+ InsertNodeForRasterTask(&graph, task, task->dependencies(), priority++);
+
+ tasks.container().push_back(task);
+ if (IsRasterTaskRequiredForActivation(task))
+ tasks_required_for_activation.container().push_back(task);
}
scoped_refptr<internal::WorkerPoolTask>
new_raster_required_for_activation_finished_task;
size_t scheduled_raster_task_required_for_activation_count =
- tasks[REQUIRED_FOR_ACTIVATION_TYPE].container().size();
+ tasks_required_for_activation.container().size();
DCHECK_LE(scheduled_raster_task_required_for_activation_count,
raster_tasks_required_for_activation_.size());
// Schedule OnRasterTasksRequiredForActivationFinished call only when
@@ -574,20 +550,22 @@ void PixelBufferRasterWorkerPool::ScheduleMoreTasks() {
CreateRasterRequiredForActivationFinishedTask(
raster_tasks_required_for_activation_.size());
raster_required_for_activation_finished_task_pending_ = true;
- internal::GraphNode* raster_required_for_activation_finished_node =
- CreateGraphNodeForTask(
- new_raster_required_for_activation_finished_task.get(),
- 0u, // Priority 0
- &graph);
- AddDependenciesToGraphNode(raster_required_for_activation_finished_node,
- tasks[REQUIRED_FOR_ACTIVATION_TYPE].container());
+ InsertNodeForTask(&graph,
+ new_raster_required_for_activation_finished_task.get(),
+ kRasterRequiredForActivationFinishedTaskPriority,
+ scheduled_raster_task_required_for_activation_count);
+ for (WorkerPoolTaskVector::ContainerType::const_iterator it =
+ tasks_required_for_activation.container().begin();
+ it != tasks_required_for_activation.container().end();
+ ++it) {
+ graph.edges.push_back(internal::TaskGraph::Edge(
+ *it, new_raster_required_for_activation_finished_task.get()));
+ }
}
scoped_refptr<internal::WorkerPoolTask> new_raster_finished_task;
- size_t scheduled_raster_task_count =
- tasks[PREPAINT_TYPE].container().size() +
- tasks[REQUIRED_FOR_ACTIVATION_TYPE].container().size();
+ size_t scheduled_raster_task_count = tasks.container().size();
DCHECK_LE(scheduled_raster_task_count, PendingRasterTaskCount());
// Schedule OnRasterTasksFinished call only when notification is pending
// and throttling is not preventing all pending tasks from being scheduled.
@@ -595,12 +573,16 @@ void PixelBufferRasterWorkerPool::ScheduleMoreTasks() {
should_notify_client_if_no_tasks_are_pending_) {
new_raster_finished_task = CreateRasterFinishedTask();
raster_finished_task_pending_ = true;
- internal::GraphNode* raster_finished_node =
- CreateGraphNodeForTask(new_raster_finished_task.get(),
- 1u, // Priority 1
- &graph);
- for (unsigned type = 0; type < NUM_TYPES; ++type) {
- AddDependenciesToGraphNode(raster_finished_node, tasks[type].container());
+ InsertNodeForTask(&graph,
+ new_raster_finished_task.get(),
+ kRasterFinishedTaskPriority,
+ scheduled_raster_task_count);
+ for (WorkerPoolTaskVector::ContainerType::const_iterator it =
+ tasks.container().begin();
+ it != tasks.container().end();
+ ++it) {
+ graph.edges.push_back(
+ internal::TaskGraph::Edge(*it, new_raster_finished_task.get()));
}
}
diff --git a/cc/resources/raster_worker_pool.cc b/cc/resources/raster_worker_pool.cc
index 142be46..510c636 100644
--- a/cc/resources/raster_worker_pool.cc
+++ b/cc/resources/raster_worker_pool.cc
@@ -23,7 +23,6 @@
#include "third_party/skia/include/gpu/SkGpuDevice.h"
namespace cc {
-
namespace {
// Subclass of Allocator that takes a suitably allocated pointer and uses
@@ -235,7 +234,7 @@ class RasterWorkerPoolTaskImpl : public internal::RasterWorkerPoolTask {
}
protected:
- virtual ~RasterWorkerPoolTaskImpl() {}
+ virtual ~RasterWorkerPoolTaskImpl() { DCHECK(!buffer_); }
private:
scoped_ptr<base::Value> DataAsValue() const {
@@ -544,6 +543,13 @@ void RasterWorkerPool::RasterTask::Reset() { internal_ = NULL; }
RasterWorkerPool::RasterTask::~RasterTask() {}
+// Task priorities that make sure raster finished tasks run before any
+// remaining raster tasks.
+unsigned RasterWorkerPool::kRasterFinishedTaskPriority = 1u;
+unsigned RasterWorkerPool::kRasterRequiredForActivationFinishedTaskPriority =
+ 0u;
+unsigned RasterWorkerPool::kRasterTaskPriorityBase = 2u;
+
RasterWorkerPool::RasterWorkerPool(ResourceProvider* resource_provider,
ContextProvider* context_provider)
: namespace_token_(g_task_graph_runner.Pointer()->GetNamespaceToken()),
@@ -618,7 +624,7 @@ void RasterWorkerPool::Shutdown() {
TRACE_EVENT0("cc", "RasterWorkerPool::Shutdown");
raster_tasks_.clear();
- TaskGraph empty;
+ internal::TaskGraph empty;
SetTaskGraph(&empty);
g_task_graph_runner.Pointer()->WaitForTasksToFinishRunning(namespace_token_);
weak_ptr_factory_.InvalidateWeakPtrs();
@@ -636,15 +642,15 @@ bool RasterWorkerPool::IsRasterTaskRequiredForActivation(
raster_tasks_required_for_activation_.end();
}
-void RasterWorkerPool::SetTaskGraph(TaskGraph* graph) {
- TRACE_EVENT1(
- "cc", "RasterWorkerPool::SetTaskGraph", "num_tasks", graph->size());
+void RasterWorkerPool::SetTaskGraph(internal::TaskGraph* graph) {
+ TRACE_EVENT0("cc", "RasterWorkerPool::SetTaskGraph");
- for (internal::GraphNode::Map::iterator it = graph->begin();
- it != graph->end();
+ for (internal::TaskGraph::Node::Vector::iterator it = graph->nodes.begin();
+ it != graph->nodes.end();
++it) {
+ internal::TaskGraph::Node& node = *it;
internal::WorkerPoolTask* task =
- static_cast<internal::WorkerPoolTask*>(it->first);
+ static_cast<internal::WorkerPoolTask*>(node.task);
if (!task->HasBeenScheduled()) {
task->WillSchedule();
@@ -756,24 +762,25 @@ scoped_ptr<base::Value> RasterWorkerPool::ScheduledStateAsValue() const {
}
// static
-internal::GraphNode* RasterWorkerPool::CreateGraphNodeForTask(
- internal::WorkerPoolTask* task,
- unsigned priority,
- TaskGraph* graph) {
- internal::GraphNode* node = new internal::GraphNode(task, priority);
- DCHECK(graph->find(task) == graph->end());
- graph->set(task, make_scoped_ptr(node));
- return node;
+void RasterWorkerPool::InsertNodeForTask(internal::TaskGraph* graph,
+ internal::WorkerPoolTask* task,
+ unsigned priority,
+ size_t dependencies) {
+ DCHECK(std::find_if(graph->nodes.begin(),
+ graph->nodes.end(),
+ internal::TaskGraph::Node::TaskComparator(task)) ==
+ graph->nodes.end());
+ graph->nodes.push_back(
+ internal::TaskGraph::Node(task, priority, dependencies));
}
// static
-internal::GraphNode* RasterWorkerPool::CreateGraphNodeForRasterTask(
+void RasterWorkerPool::InsertNodeForRasterTask(
+ internal::TaskGraph* graph,
internal::WorkerPoolTask* raster_task,
const internal::Task::Vector& decode_tasks,
- unsigned priority,
- TaskGraph* graph) {
- internal::GraphNode* raster_node =
- CreateGraphNodeForTask(raster_task, priority, graph);
+ unsigned priority) {
+ size_t dependencies = 0u;
// Insert image decode tasks.
for (internal::Task::Vector::const_iterator it = decode_tasks.begin();
@@ -786,22 +793,20 @@ internal::GraphNode* RasterWorkerPool::CreateGraphNodeForRasterTask(
if (decode_task->HasCompleted())
continue;
- raster_node->add_dependency();
+ dependencies++;
- // Check if decode task already exists in graph.
- internal::GraphNode::Map::iterator decode_it = graph->find(decode_task);
- if (decode_it != graph->end()) {
- internal::GraphNode* decode_node = decode_it->second;
- decode_node->add_dependent(raster_node);
- continue;
- }
+ // Add decode task if it doesn't already exists in graph.
+ internal::TaskGraph::Node::Vector::iterator decode_it =
+ std::find_if(graph->nodes.begin(),
+ graph->nodes.end(),
+ internal::TaskGraph::Node::TaskComparator(decode_task));
+ if (decode_it == graph->nodes.end())
+ InsertNodeForTask(graph, decode_task, priority, 0u);
- internal::GraphNode* decode_node =
- CreateGraphNodeForTask(decode_task, priority, graph);
- decode_node->add_dependent(raster_node);
+ graph->edges.push_back(internal::TaskGraph::Edge(decode_task, raster_task));
}
- return raster_node;
+ InsertNodeForTask(graph, raster_task, priority, dependencies);
}
} // namespace cc
diff --git a/cc/resources/raster_worker_pool.h b/cc/resources/raster_worker_pool.h
index c715e0e..81a9f9a 100644
--- a/cc/resources/raster_worker_pool.h
+++ b/cc/resources/raster_worker_pool.h
@@ -241,7 +241,6 @@ class CC_EXPORT RasterWorkerPool : public internal::WorkerPoolTaskClient {
virtual ResourceFormat GetResourceFormat() const = 0;
protected:
- typedef internal::TaskGraphRunner::TaskGraph TaskGraph;
typedef std::vector<scoped_refptr<internal::WorkerPoolTask> > TaskVector;
typedef std::deque<scoped_refptr<internal::WorkerPoolTask> > TaskDeque;
typedef std::vector<scoped_refptr<internal::RasterWorkerPoolTask> >
@@ -257,7 +256,7 @@ class CC_EXPORT RasterWorkerPool : public internal::WorkerPoolTaskClient {
void SetRasterTasks(RasterTask::Queue* queue);
bool IsRasterTaskRequiredForActivation(internal::RasterWorkerPoolTask* task)
const;
- void SetTaskGraph(TaskGraph* graph);
+ void SetTaskGraph(internal::TaskGraph* graph);
void CollectCompletedWorkerPoolTasks(internal::Task::Vector* completed_tasks);
// Run raster tasks that use GPU on current thread.
@@ -288,16 +287,20 @@ class CC_EXPORT RasterWorkerPool : public internal::WorkerPoolTaskClient {
scoped_ptr<base::Value> ScheduledStateAsValue() const;
- static internal::GraphNode* CreateGraphNodeForTask(
- internal::WorkerPoolTask* task,
- unsigned priority,
- TaskGraph* graph);
+ static void InsertNodeForTask(internal::TaskGraph* graph,
+ internal::WorkerPoolTask* task,
+ unsigned priority,
+ size_t dependencies);
- static internal::GraphNode* CreateGraphNodeForRasterTask(
- internal::WorkerPoolTask* raster_task,
+ static void InsertNodeForRasterTask(
+ internal::TaskGraph* graph,
+ internal::WorkerPoolTask* task,
const internal::Task::Vector& decode_tasks,
- unsigned priority,
- TaskGraph* graph);
+ unsigned priority);
+
+ static unsigned kRasterFinishedTaskPriority;
+ static unsigned kRasterRequiredForActivationFinishedTaskPriority;
+ static unsigned kRasterTaskPriorityBase;
private:
void OnRasterFinished(const internal::WorkerPoolTask* source);
diff --git a/cc/resources/raster_worker_pool_perftest.cc b/cc/resources/raster_worker_pool_perftest.cc
index 6a170d5..274bfd0 100644
--- a/cc/resources/raster_worker_pool_perftest.cc
+++ b/cc/resources/raster_worker_pool_perftest.cc
@@ -79,40 +79,43 @@ class PerfRasterWorkerPool : public RasterWorkerPool {
}
void BuildTaskGraph() {
- unsigned priority = 0;
- TaskGraph graph;
+ unsigned priority = 2u;
+ internal::TaskGraph graph;
scoped_refptr<internal::WorkerPoolTask>
raster_required_for_activation_finished_task(
CreateRasterRequiredForActivationFinishedTask(
raster_tasks_required_for_activation().size()));
- internal::GraphNode* raster_required_for_activation_finished_node =
- CreateGraphNodeForTask(
- raster_required_for_activation_finished_task.get(),
- priority++,
- &graph);
-
scoped_refptr<internal::WorkerPoolTask> raster_finished_task(
CreateRasterFinishedTask());
- internal::GraphNode* raster_finished_node =
- CreateGraphNodeForTask(raster_finished_task.get(), priority++, &graph);
+
+ size_t raster_required_for_activation_finished_dependencies = 0u;
+ size_t raster_finished_dependencies = 0u;
for (RasterTaskVector::const_iterator it = raster_tasks().begin();
it != raster_tasks().end();
++it) {
internal::RasterWorkerPoolTask* task = it->get();
- internal::GraphNode* node = CreateGraphNodeForRasterTask(
- task, task->dependencies(), priority++, &graph);
-
if (IsRasterTaskRequiredForActivation(task)) {
- raster_required_for_activation_finished_node->add_dependency();
- node->add_dependent(raster_required_for_activation_finished_node);
+ raster_required_for_activation_finished_dependencies++;
+ graph.edges.push_back(internal::TaskGraph::Edge(
+ task, raster_required_for_activation_finished_task.get()));
}
- raster_finished_node->add_dependency();
- node->add_dependent(raster_finished_node);
+ InsertNodeForRasterTask(&graph, task, task->dependencies(), priority++);
+
+ raster_finished_dependencies++;
+ graph.edges.push_back(
+ internal::TaskGraph::Edge(task, raster_finished_task.get()));
}
+
+ InsertNodeForTask(&graph,
+ raster_required_for_activation_finished_task.get(),
+ 0u,
+ raster_required_for_activation_finished_dependencies);
+ InsertNodeForTask(
+ &graph, raster_finished_task.get(), 1u, raster_finished_dependencies);
}
private:
diff --git a/cc/resources/task_graph_runner.cc b/cc/resources/task_graph_runner.cc
index 975881b..8a6d7f8 100644
--- a/cc/resources/task_graph_runner.cc
+++ b/cc/resources/task_graph_runner.cc
@@ -6,13 +6,67 @@
#include <algorithm>
-#include "base/containers/hash_tables.h"
#include "base/debug/trace_event.h"
#include "base/strings/stringprintf.h"
#include "base/threading/thread_restrictions.h"
namespace cc {
namespace internal {
+namespace {
+
+// Helper class for iterating over all dependents of a task.
+class DependentIterator {
+ public:
+ DependentIterator(TaskGraph* graph, const Task* task)
+ : graph_(graph), task_(task), current_index_(-1), current_node_(NULL) {
+ ++(*this);
+ }
+
+ TaskGraph::Node& operator->() const {
+ DCHECK_LT(current_index_, graph_->edges.size());
+ DCHECK_EQ(graph_->edges[current_index_].task, task_);
+ DCHECK(current_node_);
+ return *current_node_;
+ }
+
+ TaskGraph::Node& operator*() const {
+ DCHECK_LT(current_index_, graph_->edges.size());
+ DCHECK_EQ(graph_->edges[current_index_].task, task_);
+ DCHECK(current_node_);
+ return *current_node_;
+ }
+
+ // Note: Performance can be improved by keeping edges sorted.
+ DependentIterator& operator++() {
+ // Find next dependency edge for |task_|.
+ do {
+ ++current_index_;
+ if (current_index_ == graph_->edges.size())
+ return *this;
+ } while (graph_->edges[current_index_].task != task_);
+
+ // Now find the node for the dependent of this edge.
+ TaskGraph::Node::Vector::iterator it =
+ std::find_if(graph_->nodes.begin(),
+ graph_->nodes.end(),
+ TaskGraph::Node::TaskComparator(
+ graph_->edges[current_index_].dependent));
+ DCHECK(it != graph_->nodes.end());
+ current_node_ = &(*it);
+
+ return *this;
+ }
+
+ operator bool() const { return current_index_ < graph_->edges.size(); }
+
+ private:
+ TaskGraph* graph_;
+ const Task* task_;
+ size_t current_index_;
+ TaskGraph::Node* current_node_;
+};
+
+} // namespace
Task::Task() : did_run_(false) {}
@@ -26,12 +80,21 @@ void Task::DidRun() { did_run_ = true; }
bool Task::HasFinishedRunning() const { return did_run_; }
-GraphNode::GraphNode(Task* task, unsigned priority)
- : task_(task), priority_(priority), num_dependencies_(0) {}
+TaskGraph::TaskGraph() {}
+
+TaskGraph::~TaskGraph() {}
-GraphNode::~GraphNode() {}
+void TaskGraph::Swap(TaskGraph* other) {
+ nodes.swap(other->nodes);
+ edges.swap(other->edges);
+}
+
+void TaskGraph::Reset() {
+ nodes.clear();
+ edges.clear();
+}
-TaskGraphRunner::TaskNamespace::TaskNamespace() {}
+TaskGraphRunner::TaskNamespace::TaskNamespace() : num_running_tasks(0u) {}
TaskGraphRunner::TaskNamespace::~TaskNamespace() {}
@@ -42,6 +105,8 @@ TaskGraphRunner::TaskGraphRunner(size_t num_threads,
has_namespaces_with_finished_running_tasks_cv_(&lock_),
next_namespace_id_(1),
next_thread_index_(0u),
+ // |num_threads| can be 0 for test.
+ running_tasks_(std::max(num_threads, static_cast<size_t>(1)), NULL),
shutdown_(false) {
base::AutoLock lock(lock_);
@@ -93,29 +158,37 @@ NamespaceToken TaskGraphRunner::GetNamespaceToken() {
}
void TaskGraphRunner::WaitForTasksToFinishRunning(NamespaceToken token) {
- base::AutoLock lock(lock_);
+ TRACE_EVENT0("cc", "TaskGraphRunner::WaitForTasksToFinishRunning");
DCHECK(token.IsValid());
- TaskNamespaceMap::iterator it = namespaces_.find(token.id_);
- if (it == namespaces_.end())
- return;
- TaskNamespace* task_namespace = it->second;
- while (!HasFinishedRunningTasksInNamespace(task_namespace))
- has_namespaces_with_finished_running_tasks_cv_.Wait();
+ {
+ base::AutoLock lock(lock_);
+
+ TaskNamespaceMap::iterator it = namespaces_.find(token.id_);
+ if (it == namespaces_.end())
+ return;
- // There may be other namespaces that have finished running
- // tasks, so wake up another origin thread.
- has_namespaces_with_finished_running_tasks_cv_.Signal();
+ TaskNamespace* task_namespace = it->second;
+
+ while (!HasFinishedRunningTasksInNamespace(task_namespace))
+ has_namespaces_with_finished_running_tasks_cv_.Wait();
+
+ // There may be other namespaces that have finished running
+ // tasks, so wake up another origin thread.
+ has_namespaces_with_finished_running_tasks_cv_.Signal();
+ }
}
void TaskGraphRunner::SetTaskGraph(NamespaceToken token, TaskGraph* graph) {
- DCHECK(token.IsValid());
-
- TaskGraph new_pending_tasks;
- TaskGraph new_running_tasks;
+ TRACE_EVENT2("cc",
+ "TaskGraphRunner::SetTaskGraph",
+ "num_nodes",
+ graph->nodes.size(),
+ "num_edges",
+ graph->edges.size());
- new_pending_tasks.swap(*graph);
+ DCHECK(token.IsValid());
{
base::AutoLock lock(lock_);
@@ -129,53 +202,51 @@ void TaskGraphRunner::SetTaskGraph(NamespaceToken token, TaskGraph* graph) {
if (!task_namespace)
task_namespace.reset(new TaskNamespace);
- // First remove all completed tasks from |new_pending_tasks| and
- // adjust number of dependencies.
+ // First adjust number of dependencies to reflect completed tasks.
for (Task::Vector::iterator it = task_namespace->completed_tasks.begin();
it != task_namespace->completed_tasks.end();
++it) {
- Task* task = it->get();
-
- scoped_ptr<GraphNode> node = new_pending_tasks.take_and_erase(task);
- if (node) {
- for (GraphNode::Vector::const_iterator it = node->dependents().begin();
- it != node->dependents().end();
- ++it) {
- GraphNode* dependent_node = *it;
- dependent_node->remove_dependency();
- }
+ for (DependentIterator node_it(graph, it->get()); node_it; ++node_it) {
+ TaskGraph::Node& node = *node_it;
+ DCHECK_LT(0u, node.dependencies);
+ node.dependencies--;
}
}
- // Build new running task set.
- for (TaskGraph::iterator it = task_namespace->running_tasks.begin();
- it != task_namespace->running_tasks.end();
- ++it) {
- Task* task = it->first;
- // Transfer scheduled task value from |new_pending_tasks| to
- // |new_running_tasks| if currently running. Value must be set to
- // NULL if |new_pending_tasks| doesn't contain task. This does
- // the right in both cases.
- new_running_tasks.set(task, new_pending_tasks.take_and_erase(task));
- }
-
- // Build new "ready to run" tasks queue.
+ // Build new "ready to run" queue and remove nodes from old graph.
task_namespace->ready_to_run_tasks.clear();
- for (TaskGraph::iterator it = new_pending_tasks.begin();
- it != new_pending_tasks.end();
+ for (TaskGraph::Node::Vector::iterator it = graph->nodes.begin();
+ it != graph->nodes.end();
++it) {
- Task* task = it->first;
- DCHECK(task);
- GraphNode* node = it->second;
+ TaskGraph::Node& node = *it;
+
+ // Remove any old nodes that are associated with this task. The result is
+ // that the old graph is left all nodes not present in this graph, which
+ // we use below to determine what tasks need to be canceled.
+ TaskGraph::Node::Vector::iterator old_it =
+ std::find_if(task_namespace->graph.nodes.begin(),
+ task_namespace->graph.nodes.end(),
+ TaskGraph::Node::TaskComparator(node.task));
+ if (old_it != task_namespace->graph.nodes.end()) {
+ std::swap(*old_it, task_namespace->graph.nodes.back());
+ task_namespace->graph.nodes.pop_back();
+ }
- // Completed tasks should not exist in |new_pending_tasks|.
- DCHECK(!task->HasFinishedRunning());
+ // Task is not ready to run if dependencies are not yet satisfied.
+ if (node.dependencies)
+ continue;
- if (!node->num_dependencies())
- task_namespace->ready_to_run_tasks.push_back(node);
+ // Skip if already finished running task.
+ if (node.task->HasFinishedRunning())
+ continue;
- // Erase the task from old pending tasks.
- task_namespace->pending_tasks.erase(task);
+ // Skip if already running.
+ if (std::find(running_tasks_.begin(), running_tasks_.end(), node.task) !=
+ running_tasks_.end())
+ continue;
+
+ task_namespace->ready_to_run_tasks.push_back(
+ PrioritizedTask(node.task, node.priority));
}
// Rearrange the elements in |ready_to_run_tasks| in such a way that
@@ -184,35 +255,29 @@ void TaskGraphRunner::SetTaskGraph(NamespaceToken token, TaskGraph* graph) {
task_namespace->ready_to_run_tasks.end(),
CompareTaskPriority);
- task_namespace->completed_tasks.reserve(
- task_namespace->completed_tasks.size() +
- task_namespace->pending_tasks.size());
+ // Swap task graph.
+ task_namespace->graph.Swap(graph);
- // The items left in |pending_tasks| need to be canceled.
- for (TaskGraph::const_iterator it = task_namespace->pending_tasks.begin();
- it != task_namespace->pending_tasks.end();
+ // Determine what tasks in old graph need to be canceled.
+ for (TaskGraph::Node::Vector::iterator it = graph->nodes.begin();
+ it != graph->nodes.end();
++it) {
- task_namespace->completed_tasks.push_back(it->first);
- }
+ TaskGraph::Node& node = *it;
- // Swap task sets.
- // Note: old tasks are intentionally destroyed after releasing |lock_|.
- task_namespace->pending_tasks.swap(new_pending_tasks);
- task_namespace->running_tasks.swap(new_running_tasks);
-
- // If |ready_to_run_tasks| is empty, it means we either have
- // running tasks, or we have no pending tasks.
- DCHECK(!task_namespace->ready_to_run_tasks.empty() ||
- (task_namespace->pending_tasks.empty() ||
- !task_namespace->running_tasks.empty()));
-
- // Add task namespace if not empty.
- if (!task_namespace->pending_tasks.empty() ||
- !task_namespace->running_tasks.empty() ||
- !task_namespace->completed_tasks.empty()) {
- namespaces_.set(token.id_, task_namespace.Pass());
+ // Skip if already finished running task.
+ if (node.task->HasFinishedRunning())
+ continue;
+
+ // Skip if already running.
+ if (std::find(running_tasks_.begin(), running_tasks_.end(), node.task) !=
+ running_tasks_.end())
+ continue;
+
+ task_namespace->completed_tasks.push_back(node.task);
}
+ namespaces_.set(token.id_, task_namespace.Pass());
+
// Build new "ready to run" task namespaces queue.
ready_to_run_namespaces_.clear();
for (TaskNamespaceMap::iterator it = namespaces_.begin();
@@ -236,26 +301,30 @@ void TaskGraphRunner::SetTaskGraph(NamespaceToken token, TaskGraph* graph) {
void TaskGraphRunner::CollectCompletedTasks(NamespaceToken token,
Task::Vector* completed_tasks) {
- base::AutoLock lock(lock_);
+ TRACE_EVENT0("cc", "TaskGraphRunner::CollectCompletedTasks");
DCHECK(token.IsValid());
- TaskNamespaceMap::iterator it = namespaces_.find(token.id_);
- if (it == namespaces_.end())
- return;
-
- TaskNamespace* task_namespace = it->second;
-
- DCHECK_EQ(0u, completed_tasks->size());
- completed_tasks->swap(task_namespace->completed_tasks);
- if (!HasFinishedRunningTasksInNamespace(task_namespace))
- return;
-
- // Remove namespace if finished running tasks.
- DCHECK_EQ(0u, task_namespace->pending_tasks.size());
- DCHECK_EQ(0u, task_namespace->running_tasks.size());
- DCHECK_EQ(0u, task_namespace->completed_tasks.size());
- DCHECK_EQ(0u, task_namespace->ready_to_run_tasks.size());
- namespaces_.erase(it);
+
+ {
+ base::AutoLock lock(lock_);
+
+ TaskNamespaceMap::iterator it = namespaces_.find(token.id_);
+ if (it == namespaces_.end())
+ return;
+
+ TaskNamespace* task_namespace = it->second;
+
+ DCHECK_EQ(0u, completed_tasks->size());
+ completed_tasks->swap(task_namespace->completed_tasks);
+ if (!HasFinishedRunningTasksInNamespace(task_namespace))
+ return;
+
+ // Remove namespace if finished running tasks.
+ DCHECK_EQ(0u, task_namespace->completed_tasks.size());
+ DCHECK_EQ(0u, task_namespace->ready_to_run_tasks.size());
+ DCHECK_EQ(0u, task_namespace->num_running_tasks);
+ namespaces_.erase(it);
+ }
}
bool TaskGraphRunner::RunTaskForTesting() {
@@ -294,6 +363,8 @@ void TaskGraphRunner::Run() {
}
void TaskGraphRunner::RunTaskWithLockAcquired(int thread_index) {
+ TRACE_EVENT1("cc", "TaskGraphRunner::RunTask", "thread_index", thread_index);
+
lock_.AssertAcquired();
DCHECK(!ready_to_run_namespaces_.empty());
@@ -309,7 +380,7 @@ void TaskGraphRunner::RunTaskWithLockAcquired(int thread_index) {
std::pop_heap(task_namespace->ready_to_run_tasks.begin(),
task_namespace->ready_to_run_tasks.end(),
CompareTaskPriority);
- scoped_refptr<Task> task(task_namespace->ready_to_run_tasks.back()->task());
+ scoped_refptr<Task> task(task_namespace->ready_to_run_tasks.back().task);
task_namespace->ready_to_run_tasks.pop_back();
// Add task namespace back to |ready_to_run_namespaces_| if not
@@ -321,11 +392,13 @@ void TaskGraphRunner::RunTaskWithLockAcquired(int thread_index) {
CompareTaskNamespacePriority);
}
- // Move task from |pending_tasks| to |running_tasks|.
- DCHECK(task_namespace->pending_tasks.contains(task.get()));
- DCHECK(!task_namespace->running_tasks.contains(task.get()));
- task_namespace->running_tasks.set(
- task.get(), task_namespace->pending_tasks.take_and_erase(task.get()));
+ // Add task to |running_tasks_|.
+ DCHECK_LT(static_cast<size_t>(thread_index), running_tasks_.size());
+ DCHECK(!running_tasks_[thread_index]);
+ running_tasks_[thread_index] = task.get();
+
+ // Increment running task count for task namespace.
+ task_namespace->num_running_tasks++;
// There may be more work available, so wake up another worker thread.
has_ready_to_run_tasks_cv_.Signal();
@@ -342,47 +415,47 @@ void TaskGraphRunner::RunTaskWithLockAcquired(int thread_index) {
// This will mark task as finished running.
task->DidRun();
- // Now iterate over all dependents to remove dependency and check
- // if they are ready to run.
- scoped_ptr<GraphNode> node =
- task_namespace->running_tasks.take_and_erase(task.get());
- if (node) {
- bool ready_to_run_namespaces_has_heap_properties = true;
-
- for (GraphNode::Vector::const_iterator it = node->dependents().begin();
- it != node->dependents().end();
- ++it) {
- GraphNode* dependent_node = *it;
-
- dependent_node->remove_dependency();
- // Task is ready if it has no dependencies. Add it to
- // |ready_to_run_tasks_|.
- if (!dependent_node->num_dependencies()) {
- bool was_empty = task_namespace->ready_to_run_tasks.empty();
- task_namespace->ready_to_run_tasks.push_back(dependent_node);
- std::push_heap(task_namespace->ready_to_run_tasks.begin(),
- task_namespace->ready_to_run_tasks.end(),
- CompareTaskPriority);
- // Task namespace is ready if it has at least one ready
- // to run task. Add it to |ready_to_run_namespaces_| if
- // it just become ready.
- if (was_empty) {
- DCHECK(std::find(ready_to_run_namespaces_.begin(),
- ready_to_run_namespaces_.end(),
- task_namespace) == ready_to_run_namespaces_.end());
- ready_to_run_namespaces_.push_back(task_namespace);
- }
- ready_to_run_namespaces_has_heap_properties = false;
+ // Decrement running task count for task namespace.
+ DCHECK_LT(0u, task_namespace->num_running_tasks);
+ task_namespace->num_running_tasks--;
+
+ // Remove task from |running_tasks_|.
+ running_tasks_[thread_index] = NULL;
+
+ // Now iterate over all dependents to decrement dependencies and check if they
+ // are ready to run.
+ bool ready_to_run_namespaces_has_heap_properties = true;
+ for (DependentIterator it(&task_namespace->graph, task.get()); it; ++it) {
+ TaskGraph::Node& dependent_node = *it;
+
+ DCHECK_LT(0u, dependent_node.dependencies);
+ dependent_node.dependencies--;
+ // Task is ready if it has no dependencies. Add it to |ready_to_run_tasks_|.
+ if (!dependent_node.dependencies) {
+ bool was_empty = task_namespace->ready_to_run_tasks.empty();
+ task_namespace->ready_to_run_tasks.push_back(
+ PrioritizedTask(dependent_node.task, dependent_node.priority));
+ std::push_heap(task_namespace->ready_to_run_tasks.begin(),
+ task_namespace->ready_to_run_tasks.end(),
+ CompareTaskPriority);
+ // Task namespace is ready if it has at least one ready to run task. Add
+ // it to |ready_to_run_namespaces_| if it just become ready.
+ if (was_empty) {
+ DCHECK(std::find(ready_to_run_namespaces_.begin(),
+ ready_to_run_namespaces_.end(),
+ task_namespace) == ready_to_run_namespaces_.end());
+ ready_to_run_namespaces_.push_back(task_namespace);
}
+ ready_to_run_namespaces_has_heap_properties = false;
}
+ }
- // Rearrange the task namespaces in |ready_to_run_namespaces_|
- // in such a way that they yet again form a heap.
- if (!ready_to_run_namespaces_has_heap_properties) {
- std::make_heap(ready_to_run_namespaces_.begin(),
- ready_to_run_namespaces_.end(),
- CompareTaskNamespacePriority);
- }
+ // Rearrange the task namespaces in |ready_to_run_namespaces_| in such a way
+ // that they yet again form a heap.
+ if (!ready_to_run_namespaces_has_heap_properties) {
+ std::make_heap(ready_to_run_namespaces_.begin(),
+ ready_to_run_namespaces_.end(),
+ CompareTaskNamespacePriority);
}
// Finally add task to |completed_tasks_|.
diff --git a/cc/resources/task_graph_runner.h b/cc/resources/task_graph_runner.h
index 5a55f6d..722335a 100644
--- a/cc/resources/task_graph_runner.h
+++ b/cc/resources/task_graph_runner.h
@@ -5,13 +5,11 @@
#ifndef CC_RESOURCES_TASK_GRAPH_RUNNER_H_
#define CC_RESOURCES_TASK_GRAPH_RUNNER_H_
-#include <queue>
#include <string>
#include <vector>
#include "base/containers/scoped_ptr_hash_map.h"
#include "base/memory/ref_counted.h"
-#include "base/memory/scoped_ptr.h"
#include "base/synchronization/condition_variable.h"
#include "base/threading/simple_thread.h"
#include "cc/base/cc_export.h"
@@ -39,58 +37,49 @@ class CC_EXPORT Task : public base::RefCountedThreadSafe<Task> {
bool did_run_;
};
-} // namespace internal
-} // namespace cc
+// Dependencies are represented as edges in a task graph. Each graph node
+// is assigned a priority and a run count that matches the number of
+// dependencies.
+struct CC_EXPORT TaskGraph {
+ struct Node {
+ class TaskComparator {
+ public:
+ explicit TaskComparator(const Task* task) : task_(task) {}
-#if defined(COMPILER_GCC)
-namespace BASE_HASH_NAMESPACE {
-template <>
-struct hash<cc::internal::Task*> {
- size_t operator()(cc::internal::Task* ptr) const {
- return hash<size_t>()(reinterpret_cast<size_t>(ptr));
- }
-};
-} // namespace BASE_HASH_NAMESPACE
-#endif // COMPILER
+ bool operator()(const Node& node) const { return node.task == task_; }
-namespace cc {
-namespace internal {
+ private:
+ const Task* task_;
+ };
-// Dependencies are represented by edges in a task graph. A graph node
-// store edges as a vector of dependents. Each graph node is assigned
-// a priority and a run count that matches the number of dependencies.
-class CC_EXPORT GraphNode {
- public:
- typedef std::vector<GraphNode*> Vector;
- typedef base::ScopedPtrHashMap<Task*, GraphNode> Map;
+ typedef std::vector<Node> Vector;
- GraphNode(Task* task, unsigned priority);
- ~GraphNode();
+ Node(Task* task, unsigned priority, size_t dependencies)
+ : task(task), priority(priority), dependencies(dependencies) {}
- Task* task() { return task_; }
+ Task* task;
+ unsigned priority;
+ size_t dependencies;
+ };
- void add_dependent(GraphNode* dependent) {
- DCHECK(dependent);
- dependents_.push_back(dependent);
- }
- const Vector& dependents() const { return dependents_; }
+ struct Edge {
+ typedef std::vector<Edge> Vector;
- unsigned priority() const { return priority_; }
+ Edge(const Task* task, Task* dependent)
+ : task(task), dependent(dependent) {}
- unsigned num_dependencies() const { return num_dependencies_; }
- void add_dependency() { ++num_dependencies_; }
- void remove_dependency() {
- DCHECK(num_dependencies_);
- --num_dependencies_;
- }
+ const Task* task;
+ Task* dependent;
+ };
- private:
- Task* task_;
- Vector dependents_;
- unsigned priority_;
- unsigned num_dependencies_;
+ TaskGraph();
+ ~TaskGraph();
+
+ void Swap(TaskGraph* other);
+ void Reset();
- DISALLOW_COPY_AND_ASSIGN(GraphNode);
+ Node::Vector nodes;
+ Edge::Vector edges;
};
class TaskGraphRunner;
@@ -115,8 +104,6 @@ class CC_EXPORT NamespaceToken {
// might block and should not be used on a thread that needs to be responsive.
class CC_EXPORT TaskGraphRunner : public base::DelegateSimpleThread::Delegate {
public:
- typedef GraphNode::Map TaskGraph;
-
TaskGraphRunner(size_t num_threads, const std::string& thread_name_prefix);
virtual ~TaskGraphRunner();
@@ -128,7 +115,7 @@ class CC_EXPORT TaskGraphRunner : public base::DelegateSimpleThread::Delegate {
// no longer needed will be canceled unless already running. Canceled
// tasks are moved to |completed_tasks| without being run. The result
// is that once scheduled, a task is guaranteed to end up in the
- // |completed_tasks| queue even if it later get canceled by another
+ // |completed_tasks| queue even if it later gets canceled by another
// call to SetTaskGraph().
void SetTaskGraph(NamespaceToken token, TaskGraph* graph);
@@ -144,32 +131,41 @@ class CC_EXPORT TaskGraphRunner : public base::DelegateSimpleThread::Delegate {
bool RunTaskForTesting();
private:
+ struct PrioritizedTask {
+ typedef std::vector<PrioritizedTask> Vector;
+
+ PrioritizedTask(Task* task, unsigned priority)
+ : task(task), priority(priority) {}
+
+ Task* task;
+ unsigned priority;
+ };
+
struct TaskNamespace {
typedef std::vector<TaskNamespace*> Vector;
TaskNamespace();
~TaskNamespace();
- // This set contains all pending tasks.
- TaskGraph pending_tasks;
- // This set contains all currently running tasks.
- TaskGraph running_tasks;
+ // Current task graph.
+ TaskGraph graph;
+
+ // Ordered set of tasks that are ready to run.
+ PrioritizedTask::Vector ready_to_run_tasks;
+
// Completed tasks not yet collected by origin thread.
Task::Vector completed_tasks;
- // Ordered set of tasks that are ready to run.
- internal::GraphNode::Vector ready_to_run_tasks;
+
+ // Number of currently running tasks.
+ size_t num_running_tasks;
};
typedef base::ScopedPtrHashMap<int, TaskNamespace> TaskNamespaceMap;
- static bool CompareTaskPriority(const internal::GraphNode* a,
- const internal::GraphNode* b) {
+ static bool CompareTaskPriority(const PrioritizedTask& a,
+ const PrioritizedTask& b) {
// In this system, numerically lower priority is run first.
- if (a->priority() != b->priority())
- return a->priority() > b->priority();
-
- // Run task with most dependents first when priority is the same.
- return a->dependents().size() < b->dependents().size();
+ return a.priority > b.priority;
}
static bool CompareTaskNamespacePriority(const TaskNamespace* a,
@@ -185,9 +181,9 @@ class CC_EXPORT TaskGraphRunner : public base::DelegateSimpleThread::Delegate {
}
static bool HasFinishedRunningTasksInNamespace(
- TaskNamespace* task_namespace) {
- return task_namespace->pending_tasks.empty() &&
- task_namespace->running_tasks.empty();
+ const TaskNamespace* task_namespace) {
+ return !task_namespace->num_running_tasks &&
+ task_namespace->ready_to_run_tasks.empty();
}
// Overridden from base::DelegateSimpleThread:
@@ -224,6 +220,10 @@ class CC_EXPORT TaskGraphRunner : public base::DelegateSimpleThread::Delegate {
// loop index is 0.
unsigned next_thread_index_;
+ // This set contains all currently running tasks.
+ typedef std::vector<const Task*> TaskVector;
+ TaskVector running_tasks_;
+
// Set during shutdown. Tells workers to exit when no more tasks
// are pending.
bool shutdown_;
diff --git a/cc/resources/task_graph_runner_perftest.cc b/cc/resources/task_graph_runner_perftest.cc
index 80c7fdc..7590450 100644
--- a/cc/resources/task_graph_runner_perftest.cc
+++ b/cc/resources/task_graph_runner_perftest.cc
@@ -69,9 +69,12 @@ class TaskGraphRunnerPerfTest : public testing::Test {
CreateTasks(num_tasks, &tasks);
CreateTasks(num_leaf_tasks, &leaf_tasks);
+ // Avoid unnecessary heap allocations by reusing the same graph.
+ internal::TaskGraph graph;
+
timer_.Reset();
do {
- internal::GraphNode::Map graph;
+ graph.Reset();
BuildTaskGraph(top_level_tasks, tasks, leaf_tasks, &graph);
timer_.NextLap();
} while (!timer_.HasTimeLimitExpired());
@@ -95,20 +98,25 @@ class TaskGraphRunnerPerfTest : public testing::Test {
CreateTasks(num_tasks, &tasks);
CreateTasks(num_leaf_tasks, &leaf_tasks);
+ // Avoid unnecessary heap allocations by reusing the same graph and
+ // completed tasks vector.
+ internal::TaskGraph graph;
+ internal::Task::Vector completed_tasks;
+
timer_.Reset();
do {
- internal::GraphNode::Map graph;
+ graph.Reset();
BuildTaskGraph(top_level_tasks, tasks, leaf_tasks, &graph);
task_graph_runner_->SetTaskGraph(namespace_token_, &graph);
// Shouldn't be any tasks to collect as we reschedule the same set
// of tasks.
- DCHECK_EQ(0u, CollectCompletedTasks());
+ DCHECK_EQ(0u, CollectCompletedTasks(&completed_tasks));
timer_.NextLap();
} while (!timer_.HasTimeLimitExpired());
- internal::GraphNode::Map empty;
+ internal::TaskGraph empty;
task_graph_runner_->SetTaskGraph(namespace_token_, &empty);
- CollectCompletedTasks();
+ CollectCompletedTasks(&completed_tasks);
perf_test::PrintResult("schedule_tasks",
"",
@@ -132,23 +140,29 @@ class TaskGraphRunnerPerfTest : public testing::Test {
CreateTasks(num_leaf_tasks, &leaf_tasks[i]);
}
+ // Avoid unnecessary heap allocations by reusing the same graph and
+ // completed tasks vector.
+ internal::TaskGraph graph;
+ internal::Task::Vector completed_tasks;
+
int count = 0;
timer_.Reset();
do {
- internal::GraphNode::Map graph;
+ graph.Reset();
BuildTaskGraph(top_level_tasks[count % kNumVersions],
tasks[count % kNumVersions],
leaf_tasks[count % kNumVersions],
&graph);
task_graph_runner_->SetTaskGraph(namespace_token_, &graph);
- CollectCompletedTasks();
+ CollectCompletedTasks(&completed_tasks);
+ completed_tasks.clear();
++count;
timer_.NextLap();
} while (!timer_.HasTimeLimitExpired());
- internal::GraphNode::Map empty;
+ internal::TaskGraph empty;
task_graph_runner_->SetTaskGraph(namespace_token_, &empty);
- CollectCompletedTasks();
+ CollectCompletedTasks(&completed_tasks);
perf_test::PrintResult("schedule_alternate_tasks",
"",
@@ -169,14 +183,20 @@ class TaskGraphRunnerPerfTest : public testing::Test {
CreateTasks(num_tasks, &tasks);
CreateTasks(num_leaf_tasks, &leaf_tasks);
+ // Avoid unnecessary heap allocations by reusing the same graph and
+ // completed tasks vector.
+ internal::TaskGraph graph;
+ internal::Task::Vector completed_tasks;
+
timer_.Reset();
do {
- internal::GraphNode::Map graph;
+ graph.Reset();
BuildTaskGraph(top_level_tasks, tasks, leaf_tasks, &graph);
task_graph_runner_->SetTaskGraph(namespace_token_, &graph);
while (task_graph_runner_->RunTaskForTesting())
continue;
- CollectCompletedTasks();
+ CollectCompletedTasks(&completed_tasks);
+ completed_tasks.clear();
ResetTasks(&top_level_tasks);
ResetTasks(&tasks);
ResetTasks(&leaf_tasks);
@@ -204,66 +224,51 @@ class TaskGraphRunnerPerfTest : public testing::Test {
void BuildTaskGraph(const PerfTaskImpl::Vector& top_level_tasks,
const PerfTaskImpl::Vector& tasks,
const PerfTaskImpl::Vector& leaf_tasks,
- internal::GraphNode::Map* graph) {
- typedef std::vector<internal::GraphNode*> NodeVector;
+ internal::TaskGraph* graph) {
+ DCHECK(graph->nodes.empty());
+ DCHECK(graph->edges.empty());
- NodeVector top_level_nodes;
- top_level_nodes.reserve(top_level_tasks.size());
- for (PerfTaskImpl::Vector::const_iterator it = top_level_tasks.begin();
- it != top_level_tasks.end();
- ++it) {
- internal::Task* top_level_task = it->get();
- scoped_ptr<internal::GraphNode> top_level_node(
- new internal::GraphNode(top_level_task, 0u));
-
- top_level_nodes.push_back(top_level_node.get());
- graph->set(top_level_task, top_level_node.Pass());
- }
-
- NodeVector leaf_nodes;
- leaf_nodes.reserve(leaf_tasks.size());
for (PerfTaskImpl::Vector::const_iterator it = leaf_tasks.begin();
it != leaf_tasks.end();
++it) {
- internal::Task* leaf_task = it->get();
- scoped_ptr<internal::GraphNode> leaf_node(
- new internal::GraphNode(leaf_task, 0u));
-
- leaf_nodes.push_back(leaf_node.get());
- graph->set(leaf_task, leaf_node.Pass());
+ graph->nodes.push_back(internal::TaskGraph::Node(it->get(), 0u, 0u));
}
for (PerfTaskImpl::Vector::const_iterator it = tasks.begin();
it != tasks.end();
++it) {
- internal::Task* task = it->get();
- scoped_ptr<internal::GraphNode> node(new internal::GraphNode(task, 0u));
-
- for (NodeVector::iterator node_it = top_level_nodes.begin();
- node_it != top_level_nodes.end();
- ++node_it) {
- internal::GraphNode* top_level_node = *node_it;
- node->add_dependent(top_level_node);
- top_level_node->add_dependency();
+ graph->nodes.push_back(
+ internal::TaskGraph::Node(it->get(), 0u, leaf_tasks.size()));
+
+ for (PerfTaskImpl::Vector::const_iterator leaf_it = leaf_tasks.begin();
+ leaf_it != leaf_tasks.end();
+ ++leaf_it) {
+ graph->edges.push_back(
+ internal::TaskGraph::Edge(leaf_it->get(), it->get()));
}
- for (NodeVector::iterator node_it = leaf_nodes.begin();
- node_it != leaf_nodes.end();
- ++node_it) {
- internal::GraphNode* leaf_node = *node_it;
- leaf_node->add_dependent(node.get());
- node->add_dependency();
+ for (PerfTaskImpl::Vector::const_iterator top_level_it =
+ top_level_tasks.begin();
+ top_level_it != top_level_tasks.end();
+ ++top_level_it) {
+ graph->edges.push_back(
+ internal::TaskGraph::Edge(it->get(), top_level_it->get()));
}
+ }
- graph->set(task, node.Pass());
+ for (PerfTaskImpl::Vector::const_iterator it = top_level_tasks.begin();
+ it != top_level_tasks.end();
+ ++it) {
+ graph->nodes.push_back(
+ internal::TaskGraph::Node(it->get(), 0u, tasks.size()));
}
}
- size_t CollectCompletedTasks() {
- internal::Task::Vector completed_tasks;
+ size_t CollectCompletedTasks(internal::Task::Vector* completed_tasks) {
+ DCHECK(completed_tasks->empty());
task_graph_runner_->CollectCompletedTasks(namespace_token_,
- &completed_tasks);
- return completed_tasks.size();
+ completed_tasks);
+ return completed_tasks->size();
}
scoped_ptr<internal::TaskGraphRunner> task_graph_runner_;
diff --git a/cc/resources/task_graph_runner_unittest.cc b/cc/resources/task_graph_runner_unittest.cc
index 6539a1b..e9cb450 100644
--- a/cc/resources/task_graph_runner_unittest.cc
+++ b/cc/resources/task_graph_runner_unittest.cc
@@ -74,29 +74,27 @@ class TaskGraphRunnerTestBase {
void ScheduleTasks(int namespace_index, const std::vector<Task>& tasks) {
internal::Task::Vector new_tasks;
internal::Task::Vector new_dependents;
- internal::GraphNode::Map new_graph;
+ internal::TaskGraph new_graph;
for (std::vector<Task>::const_iterator it = tasks.begin();
it != tasks.end();
++it) {
scoped_refptr<FakeTaskImpl> new_task(
new FakeTaskImpl(this, it->namespace_index, it->id));
- scoped_ptr<internal::GraphNode> node(
- new internal::GraphNode(new_task.get(), it->priority));
-
+ new_graph.nodes.push_back(
+ internal::TaskGraph::Node(new_task.get(), it->priority, 0u));
for (unsigned i = 0; i < it->dependent_count; ++i) {
scoped_refptr<FakeDependentTaskImpl> new_dependent_task(
new FakeDependentTaskImpl(
this, it->namespace_index, it->dependent_id));
- scoped_ptr<internal::GraphNode> dependent_node(
- new internal::GraphNode(new_dependent_task.get(), it->priority));
- node->add_dependent(dependent_node.get());
- dependent_node->add_dependency();
- new_graph.set(new_dependent_task.get(), dependent_node.Pass());
+ new_graph.nodes.push_back(internal::TaskGraph::Node(
+ new_dependent_task.get(), it->priority, 1u));
+ new_graph.edges.push_back(internal::TaskGraph::Edge(
+ new_task.get(), new_dependent_task.get()));
+
new_dependents.push_back(new_dependent_task.get());
}
- new_graph.set(new_task.get(), node.Pass());
new_tasks.push_back(new_task.get());
}
@@ -289,44 +287,6 @@ TEST_F(TaskGraphRunnerSingleThreadTest, Priority) {
EXPECT_EQ(1u, on_task_completed_ids(i)[0]);
EXPECT_EQ(0u, on_task_completed_ids(i)[1]);
}
-
- for (int i = 0; i < kNamespaceCount; ++i)
- ResetIds(i);
-
- for (int i = 0; i < kNamespaceCount; ++i) {
- std::vector<Task> tasks;
- tasks.push_back(Task(i,
- 0u,
- 3u,
- 1u, // 1 dependent
- 1u)); // Priority 1
- tasks.push_back(Task(i,
- 1u,
- 4u,
- 2u, // 2 dependents
- 1u)); // Priority 1
- tasks.push_back(Task(i,
- 2u,
- 5u,
- 1u, // 1 dependent
- 0u)); // Priority 0
- ScheduleTasks(i, tasks);
- }
-
- for (int i = 0; i < kNamespaceCount; ++i) {
- RunAllTasks(i);
-
- // Check if tasks ran in order of priority and that task with more
- // dependents ran first when priority is the same.
- ASSERT_LE(3u, run_task_ids(i).size());
- EXPECT_EQ(2u, run_task_ids(i)[0]);
- EXPECT_EQ(5u, run_task_ids(i)[1]);
- EXPECT_EQ(1u, run_task_ids(i)[2]);
- ASSERT_EQ(3u, on_task_completed_ids(i).size());
- EXPECT_EQ(2u, on_task_completed_ids(i)[0]);
- EXPECT_EQ(1u, on_task_completed_ids(i)[1]);
- EXPECT_EQ(0u, on_task_completed_ids(i)[2]);
- }
}
} // namespace