summaryrefslogtreecommitdiffstats
path: root/cc
diff options
context:
space:
mode:
authorreveman@chromium.org <reveman@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2014-01-29 05:21:37 +0000
committerreveman@chromium.org <reveman@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2014-01-29 05:21:37 +0000
commit7d73f676c7a12a52236136c8681dfeb95dd550b6 (patch)
tree0fc8ddd25d861eb2ab80259a063d608d3a68079e /cc
parent44bab15b645a8dab8d391e212ab8d6976551f4bd (diff)
downloadchromium_src-7d73f676c7a12a52236136c8681dfeb95dd550b6.zip
chromium_src-7d73f676c7a12a52236136c8681dfeb95dd550b6.tar.gz
chromium_src-7d73f676c7a12a52236136c8681dfeb95dd550b6.tar.bz2
cc: Add useful TaskGraphRunner performance tests.
Current perf tests are not useful for profiling as output heavily depends on kernel scheduling. An improvement in CPU time is not guaranteed to result in a better result from these tests as they mostly measure context switching cost. This includes a small refactor to TaskGraphRunner that allow us to run tests on a single thread. The result is more predictable results and useful profiling output. The number of tasks used in tests have also been adjusted to more realistic values. BUG=338355 Review URL: https://codereview.chromium.org/147883003 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@247625 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'cc')
-rw-r--r--cc/resources/task_graph_runner.cc191
-rw-r--r--cc/resources/task_graph_runner.h8
-rw-r--r--cc/resources/task_graph_runner_perftest.cc335
3 files changed, 328 insertions, 206 deletions
diff --git a/cc/resources/task_graph_runner.cc b/cc/resources/task_graph_runner.cc
index 471f50c..f1ed738 100644
--- a/cc/resources/task_graph_runner.cc
+++ b/cc/resources/task_graph_runner.cc
@@ -265,6 +265,16 @@ void TaskGraphRunner::CollectCompletedTasks(NamespaceToken token,
namespaces_.erase(it);
}
+bool TaskGraphRunner::RunTaskForTesting() {
+ base::AutoLock lock(lock_);
+
+ if (ready_to_run_namespaces_.empty())
+ return false;
+
+ RunTaskWithLockAcquired(0);
+ return true;
+}
+
void TaskGraphRunner::Run() {
base::AutoLock lock(lock_);
@@ -282,105 +292,112 @@ void TaskGraphRunner::Run() {
continue;
}
- // Take top priority TaskNamespace from |ready_to_run_namespaces_|.
- std::pop_heap(ready_to_run_namespaces_.begin(),
- ready_to_run_namespaces_.end(),
- CompareTaskNamespacePriority);
- TaskNamespace* task_namespace = ready_to_run_namespaces_.back();
- ready_to_run_namespaces_.pop_back();
- DCHECK(!task_namespace->ready_to_run_tasks.empty());
-
- // Take top priority task from |ready_to_run_tasks|.
- std::pop_heap(task_namespace->ready_to_run_tasks.begin(),
- task_namespace->ready_to_run_tasks.end(),
- CompareTaskPriority);
- scoped_refptr<Task> task(task_namespace->ready_to_run_tasks.back()->task());
- task_namespace->ready_to_run_tasks.pop_back();
-
- // Add task namespace back to |ready_to_run_namespaces_| if not
- // empty after taking top priority task.
- if (!task_namespace->ready_to_run_tasks.empty()) {
- ready_to_run_namespaces_.push_back(task_namespace);
- std::push_heap(ready_to_run_namespaces_.begin(),
- ready_to_run_namespaces_.end(),
- CompareTaskNamespacePriority);
- }
+ RunTaskWithLockAcquired(thread_index);
+ }
- // Move task from |pending_tasks| to |running_tasks|.
- DCHECK(task_namespace->pending_tasks.contains(task.get()));
- DCHECK(!task_namespace->running_tasks.contains(task.get()));
- task_namespace->running_tasks.set(
- task.get(), task_namespace->pending_tasks.take_and_erase(task.get()));
+ // We noticed we should exit. Wake up the next worker so it knows it should
+ // exit as well (because the Shutdown() code only signals once).
+ has_ready_to_run_tasks_cv_.Signal();
+}
- // There may be more work available, so wake up another worker thread.
- has_ready_to_run_tasks_cv_.Signal();
+void TaskGraphRunner::RunTaskWithLockAcquired(int thread_index) {
+ lock_.AssertAcquired();
+ DCHECK(!ready_to_run_namespaces_.empty());
+
+ // Take top priority TaskNamespace from |ready_to_run_namespaces_|.
+ std::pop_heap(ready_to_run_namespaces_.begin(),
+ ready_to_run_namespaces_.end(),
+ CompareTaskNamespacePriority);
+ TaskNamespace* task_namespace = ready_to_run_namespaces_.back();
+ ready_to_run_namespaces_.pop_back();
+ DCHECK(!task_namespace->ready_to_run_tasks.empty());
+
+ // Take top priority task from |ready_to_run_tasks|.
+ std::pop_heap(task_namespace->ready_to_run_tasks.begin(),
+ task_namespace->ready_to_run_tasks.end(),
+ CompareTaskPriority);
+ scoped_refptr<Task> task(task_namespace->ready_to_run_tasks.back()->task());
+ task_namespace->ready_to_run_tasks.pop_back();
+
+ // Add task namespace back to |ready_to_run_namespaces_| if not
+ // empty after taking top priority task.
+ if (!task_namespace->ready_to_run_tasks.empty()) {
+ ready_to_run_namespaces_.push_back(task_namespace);
+ std::push_heap(ready_to_run_namespaces_.begin(),
+ ready_to_run_namespaces_.end(),
+ CompareTaskNamespacePriority);
+ }
- // Call WillRun() before releasing |lock_| and running task.
- task->WillRun();
+ // Move task from |pending_tasks| to |running_tasks|.
+ DCHECK(task_namespace->pending_tasks.contains(task.get()));
+ DCHECK(!task_namespace->running_tasks.contains(task.get()));
+ task_namespace->running_tasks.set(
+ task.get(), task_namespace->pending_tasks.take_and_erase(task.get()));
- {
- base::AutoUnlock unlock(lock_);
+ // There may be more work available, so wake up another worker thread.
+ has_ready_to_run_tasks_cv_.Signal();
- task->RunOnWorkerThread(thread_index);
- }
+ // Call WillRun() before releasing |lock_| and running task.
+ task->WillRun();
- // This will mark task as finished running.
- task->DidRun();
-
- // Now iterate over all dependents to remove dependency and check
- // if they are ready to run.
- scoped_ptr<GraphNode> node =
- task_namespace->running_tasks.take_and_erase(task.get());
- if (node) {
- bool ready_to_run_namespaces_has_heap_properties = true;
-
- for (GraphNode::Vector::const_iterator it = node->dependents().begin();
- it != node->dependents().end();
- ++it) {
- GraphNode* dependent_node = *it;
-
- dependent_node->remove_dependency();
- // Task is ready if it has no dependencies. Add it to
- // |ready_to_run_tasks_|.
- if (!dependent_node->num_dependencies()) {
- bool was_empty = task_namespace->ready_to_run_tasks.empty();
- task_namespace->ready_to_run_tasks.push_back(dependent_node);
- std::push_heap(task_namespace->ready_to_run_tasks.begin(),
- task_namespace->ready_to_run_tasks.end(),
- CompareTaskPriority);
- // Task namespace is ready if it has at least one ready
- // to run task. Add it to |ready_to_run_namespaces_| if
- // it just become ready.
- if (was_empty) {
- DCHECK(std::find(ready_to_run_namespaces_.begin(),
- ready_to_run_namespaces_.end(),
- task_namespace) == ready_to_run_namespaces_.end());
- ready_to_run_namespaces_.push_back(task_namespace);
- }
- ready_to_run_namespaces_has_heap_properties = false;
- }
- }
+ {
+ base::AutoUnlock unlock(lock_);
+
+ task->RunOnWorkerThread(thread_index);
+ }
- // Rearrange the task namespaces in |ready_to_run_namespaces_|
- // in such a way that they yet again form a heap.
- if (!ready_to_run_namespaces_has_heap_properties) {
- std::make_heap(ready_to_run_namespaces_.begin(),
- ready_to_run_namespaces_.end(),
- CompareTaskNamespacePriority);
+ // This will mark task as finished running.
+ task->DidRun();
+
+ // Now iterate over all dependents to remove dependency and check
+ // if they are ready to run.
+ scoped_ptr<GraphNode> node =
+ task_namespace->running_tasks.take_and_erase(task.get());
+ if (node) {
+ bool ready_to_run_namespaces_has_heap_properties = true;
+
+ for (GraphNode::Vector::const_iterator it = node->dependents().begin();
+ it != node->dependents().end();
+ ++it) {
+ GraphNode* dependent_node = *it;
+
+ dependent_node->remove_dependency();
+ // Task is ready if it has no dependencies. Add it to
+ // |ready_to_run_tasks_|.
+ if (!dependent_node->num_dependencies()) {
+ bool was_empty = task_namespace->ready_to_run_tasks.empty();
+ task_namespace->ready_to_run_tasks.push_back(dependent_node);
+ std::push_heap(task_namespace->ready_to_run_tasks.begin(),
+ task_namespace->ready_to_run_tasks.end(),
+ CompareTaskPriority);
+ // Task namespace is ready if it has at least one ready
+ // to run task. Add it to |ready_to_run_namespaces_| if
+ // it just become ready.
+ if (was_empty) {
+ DCHECK(std::find(ready_to_run_namespaces_.begin(),
+ ready_to_run_namespaces_.end(),
+ task_namespace) == ready_to_run_namespaces_.end());
+ ready_to_run_namespaces_.push_back(task_namespace);
+ }
+ ready_to_run_namespaces_has_heap_properties = false;
}
}
- // Finally add task to |completed_tasks_|.
- task_namespace->completed_tasks.push_back(task);
-
- // If namespace has finished running all tasks, wake up origin thread.
- if (HasFinishedRunningTasksInNamespace(task_namespace))
- has_namespaces_with_finished_running_tasks_cv_.Signal();
+ // Rearrange the task namespaces in |ready_to_run_namespaces_|
+ // in such a way that they yet again form a heap.
+ if (!ready_to_run_namespaces_has_heap_properties) {
+ std::make_heap(ready_to_run_namespaces_.begin(),
+ ready_to_run_namespaces_.end(),
+ CompareTaskNamespacePriority);
+ }
}
- // We noticed we should exit. Wake up the next worker so it knows it should
- // exit as well (because the Shutdown() code only signals once).
- has_ready_to_run_tasks_cv_.Signal();
+ // Finally add task to |completed_tasks_|.
+ task_namespace->completed_tasks.push_back(task);
+
+ // If namespace has finished running all tasks, wake up origin thread.
+ if (HasFinishedRunningTasksInNamespace(task_namespace))
+ has_namespaces_with_finished_running_tasks_cv_.Signal();
}
} // namespace internal
diff --git a/cc/resources/task_graph_runner.h b/cc/resources/task_graph_runner.h
index af7c116..eb03467 100644
--- a/cc/resources/task_graph_runner.h
+++ b/cc/resources/task_graph_runner.h
@@ -142,6 +142,10 @@ class CC_EXPORT TaskGraphRunner : public base::DelegateSimpleThread::Delegate {
void CollectCompletedTasks(NamespaceToken token,
Task::Vector* completed_tasks);
+ // Run one task on current thread. Returns false if no tasks are ready
+ // to run. This should only be used by tests.
+ bool RunTaskForTesting();
+
private:
struct TaskNamespace {
typedef std::vector<TaskNamespace*> Vector;
@@ -192,6 +196,10 @@ class CC_EXPORT TaskGraphRunner : public base::DelegateSimpleThread::Delegate {
// Overridden from base::DelegateSimpleThread:
virtual void Run() OVERRIDE;
+ // Run next task. Caller must acquire |lock_| prior to calling this
+ // function and make sure at least one task is ready to run.
+ void RunTaskWithLockAcquired(int thread_index);
+
// This lock protects all members of this class. Do not read or modify
// anything without holding this lock. Do not block while holding this
// lock.
diff --git a/cc/resources/task_graph_runner_perftest.cc b/cc/resources/task_graph_runner_perftest.cc
index 5e952a3..f47df14 100644
--- a/cc/resources/task_graph_runner_perftest.cc
+++ b/cc/resources/task_graph_runner_perftest.cc
@@ -4,6 +4,8 @@
#include "cc/resources/task_graph_runner.h"
+#include <vector>
+
#include "base/time/time.h"
#include "cc/base/completion_event.h"
#include "cc/test/lap_timer.h"
@@ -19,38 +21,22 @@ static const int kTimeCheckInterval = 10;
class PerfTaskImpl : public internal::Task {
public:
+ typedef std::vector<scoped_refptr<PerfTaskImpl> > Vector;
+
PerfTaskImpl() {}
// Overridden from internal::Task:
virtual void RunOnWorkerThread(unsigned thread_index) OVERRIDE {}
- private:
- virtual ~PerfTaskImpl() {}
-
- DISALLOW_COPY_AND_ASSIGN(PerfTaskImpl);
-};
-
-class PerfControlTaskImpl : public internal::Task {
- public:
- PerfControlTaskImpl() {}
-
- // Overridden from internal::Task:
- virtual void RunOnWorkerThread(unsigned thread_index) OVERRIDE {
- did_start_.Signal();
- can_finish_.Wait();
+ void Reset() {
+ did_schedule_ = false;
+ did_run_ = false;
}
- void WaitForTaskToStartRunning() { did_start_.Wait(); }
-
- void AllowTaskToFinish() { can_finish_.Signal(); }
-
private:
- virtual ~PerfControlTaskImpl() {}
-
- CompletionEvent did_start_;
- CompletionEvent can_finish_;
+ virtual ~PerfTaskImpl() {}
- DISALLOW_COPY_AND_ASSIGN(PerfControlTaskImpl);
+ DISALLOW_COPY_AND_ASSIGN(PerfTaskImpl);
};
class TaskGraphRunnerPerfTest : public testing::Test {
@@ -63,7 +49,8 @@ class TaskGraphRunnerPerfTest : public testing::Test {
// Overridden from testing::Test:
virtual void SetUp() OVERRIDE {
task_graph_runner_ =
- make_scoped_ptr(new internal::TaskGraphRunner(1, "PerfTest"));
+ make_scoped_ptr(new internal::TaskGraphRunner(0, // 0 worker threads
+ "PerfTest"));
namespace_token_ = task_graph_runner_->GetNamespaceToken();
}
virtual void TearDown() OVERRIDE { task_graph_runner_.reset(); }
@@ -74,23 +61,58 @@ class TaskGraphRunnerPerfTest : public testing::Test {
"*RESULT %s: %.2f runs/s\n", test_name.c_str(), timer_.LapsPerSecond());
}
+ void RunBuildTaskGraphTest(const std::string& test_name,
+ int num_top_level_tasks,
+ int num_tasks,
+ int num_leaf_tasks) {
+ PerfTaskImpl::Vector top_level_tasks;
+ PerfTaskImpl::Vector tasks;
+ PerfTaskImpl::Vector leaf_tasks;
+ CreateTasks(num_top_level_tasks, &top_level_tasks);
+ CreateTasks(num_tasks, &tasks);
+ CreateTasks(num_leaf_tasks, &leaf_tasks);
+
+ timer_.Reset();
+ do {
+ internal::GraphNode::Map graph;
+ BuildTaskGraph(top_level_tasks, tasks, leaf_tasks, &graph);
+ timer_.NextLap();
+ } while (!timer_.HasTimeLimitExpired());
+
+ perf_test::PrintResult("build_task_graph",
+ "",
+ test_name,
+ timer_.LapsPerSecond(),
+ "runs/s",
+ true);
+ }
+
void RunScheduleTasksTest(const std::string& test_name,
- unsigned max_depth,
- unsigned num_children_per_node) {
+ int num_top_level_tasks,
+ int num_tasks,
+ int num_leaf_tasks) {
+ PerfTaskImpl::Vector top_level_tasks;
+ PerfTaskImpl::Vector tasks;
+ PerfTaskImpl::Vector leaf_tasks;
+ CreateTasks(num_top_level_tasks, &top_level_tasks);
+ CreateTasks(num_tasks, &tasks);
+ CreateTasks(num_leaf_tasks, &leaf_tasks);
+
timer_.Reset();
do {
- scoped_refptr<PerfControlTaskImpl> leaf_task(new PerfControlTaskImpl);
- ScheduleTasks(NULL, leaf_task.get(), max_depth, num_children_per_node);
- leaf_task->WaitForTaskToStartRunning();
- ScheduleTasks(NULL, NULL, 0, 0);
- leaf_task->AllowTaskToFinish();
- task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_);
- internal::Task::Vector completed_tasks;
- task_graph_runner_->CollectCompletedTasks(namespace_token_,
- &completed_tasks);
+ internal::GraphNode::Map graph;
+ BuildTaskGraph(top_level_tasks, tasks, leaf_tasks, &graph);
+ task_graph_runner_->SetTaskGraph(namespace_token_, &graph);
+ // Shouldn't be any tasks to collect as we reschedule the same set
+ // of tasks.
+ DCHECK_EQ(0u, CollectCompletedTasks());
timer_.NextLap();
} while (!timer_.HasTimeLimitExpired());
+ internal::GraphNode::Map empty;
+ task_graph_runner_->SetTaskGraph(namespace_token_, &empty);
+ CollectCompletedTasks();
+
perf_test::PrintResult("schedule_tasks",
"",
test_name,
@@ -99,16 +121,68 @@ class TaskGraphRunnerPerfTest : public testing::Test {
true);
}
- void RunExecuteTasksTest(const std::string& test_name,
- unsigned max_depth,
- unsigned num_children_per_node) {
+ void RunScheduleAlternateTasksTest(const std::string& test_name,
+ int num_top_level_tasks,
+ int num_tasks,
+ int num_leaf_tasks) {
+ const int kNumVersions = 2;
+ PerfTaskImpl::Vector top_level_tasks[kNumVersions];
+ PerfTaskImpl::Vector tasks[kNumVersions];
+ PerfTaskImpl::Vector leaf_tasks[kNumVersions];
+ for (int i = 0; i < kNumVersions; ++i) {
+ CreateTasks(num_top_level_tasks, &top_level_tasks[i]);
+ CreateTasks(num_tasks, &tasks[i]);
+ CreateTasks(num_leaf_tasks, &leaf_tasks[i]);
+ }
+
+ int count = 0;
+ timer_.Reset();
+ do {
+ internal::GraphNode::Map graph;
+ BuildTaskGraph(top_level_tasks[count % kNumVersions],
+ tasks[count % kNumVersions],
+ leaf_tasks[count % kNumVersions],
+ &graph);
+ task_graph_runner_->SetTaskGraph(namespace_token_, &graph);
+ CollectCompletedTasks();
+ ++count;
+ timer_.NextLap();
+ } while (!timer_.HasTimeLimitExpired());
+
+ internal::GraphNode::Map empty;
+ task_graph_runner_->SetTaskGraph(namespace_token_, &empty);
+ CollectCompletedTasks();
+
+ perf_test::PrintResult("schedule_alternate_tasks",
+ "",
+ test_name,
+ timer_.LapsPerSecond(),
+ "runs/s",
+ true);
+ }
+
+ void RunScheduleAndExecuteTasksTest(const std::string& test_name,
+ int num_top_level_tasks,
+ int num_tasks,
+ int num_leaf_tasks) {
+ PerfTaskImpl::Vector top_level_tasks;
+ PerfTaskImpl::Vector tasks;
+ PerfTaskImpl::Vector leaf_tasks;
+ CreateTasks(num_top_level_tasks, &top_level_tasks);
+ CreateTasks(num_tasks, &tasks);
+ CreateTasks(num_leaf_tasks, &leaf_tasks);
+
timer_.Reset();
do {
- ScheduleTasks(NULL, NULL, max_depth, num_children_per_node);
- task_graph_runner_->WaitForTasksToFinishRunning(namespace_token_);
- internal::Task::Vector completed_tasks;
- task_graph_runner_->CollectCompletedTasks(namespace_token_,
- &completed_tasks);
+ internal::GraphNode::Map graph;
+ BuildTaskGraph(top_level_tasks, tasks, leaf_tasks, &graph);
+ task_graph_runner_->SetTaskGraph(namespace_token_, &graph);
+ while (task_graph_runner_->RunTaskForTesting())
+ continue;
+ CollectCompletedTasks();
+ ResetTasks(&top_level_tasks);
+ ResetTasks(&tasks);
+ ResetTasks(&leaf_tasks);
timer_.NextLap();
} while (!timer_.HasTimeLimitExpired());
@@ -117,100 +191,123 @@ class TaskGraphRunnerPerfTest : public testing::Test {
}
private:
- void ScheduleTasks(internal::Task* root_task,
- internal::Task* leaf_task,
- unsigned max_depth,
- unsigned num_children_per_node) {
- internal::Task::Vector tasks;
- internal::GraphNode::Map graph;
-
- scoped_ptr<internal::GraphNode> root_node;
- if (root_task)
- root_node = make_scoped_ptr(new internal::GraphNode(root_task, 0u));
-
- scoped_ptr<internal::GraphNode> leaf_node;
- if (leaf_task)
- leaf_node = make_scoped_ptr(new internal::GraphNode(leaf_task, 0u));
-
- if (max_depth) {
- BuildTaskGraph(&tasks,
- &graph,
- root_node.get(),
- leaf_node.get(),
- 0,
- max_depth,
- num_children_per_node);
- }
+ void CreateTasks(int num_tasks, PerfTaskImpl::Vector* tasks) {
+ for (int i = 0; i < num_tasks; ++i)
+ tasks->push_back(make_scoped_refptr(new PerfTaskImpl));
+ }
- if (leaf_node)
- graph.set(leaf_task, leaf_node.Pass());
+ void ResetTasks(PerfTaskImpl::Vector* tasks) {
+ for (PerfTaskImpl::Vector::iterator it = tasks->begin(); it != tasks->end();
+ ++it) {
+ PerfTaskImpl* task = it->get();
+ task->Reset();
+ }
+ }
- if (root_node)
- graph.set(root_task, root_node.Pass());
+ void BuildTaskGraph(const PerfTaskImpl::Vector& top_level_tasks,
+ const PerfTaskImpl::Vector& tasks,
+ const PerfTaskImpl::Vector& leaf_tasks,
+ internal::GraphNode::Map* graph) {
+ typedef std::vector<internal::GraphNode*> NodeVector;
+
+ NodeVector top_level_nodes;
+ top_level_nodes.reserve(top_level_tasks.size());
+ for (PerfTaskImpl::Vector::const_iterator it = top_level_tasks.begin();
+ it != top_level_tasks.end();
+ ++it) {
+ internal::Task* top_level_task = it->get();
+ scoped_ptr<internal::GraphNode> top_level_node(
+ new internal::GraphNode(top_level_task, 0u));
+
+ top_level_nodes.push_back(top_level_node.get());
+ graph->set(top_level_task, top_level_node.Pass());
+ }
- task_graph_runner_->SetTaskGraph(namespace_token_, &graph);
+ NodeVector leaf_nodes;
+ leaf_nodes.reserve(leaf_tasks.size());
+ for (PerfTaskImpl::Vector::const_iterator it = leaf_tasks.begin();
+ it != leaf_tasks.end();
+ ++it) {
+ internal::Task* leaf_task = it->get();
+ scoped_ptr<internal::GraphNode> leaf_node(
+ new internal::GraphNode(leaf_task, 0u));
+
+ leaf_nodes.push_back(leaf_node.get());
+ graph->set(leaf_task, leaf_node.Pass());
+ }
- tasks_.swap(tasks);
- }
+ for (PerfTaskImpl::Vector::const_iterator it = tasks.begin();
+ it != tasks.end();
+ ++it) {
+ internal::Task* task = it->get();
+ scoped_ptr<internal::GraphNode> node(new internal::GraphNode(task, 0u));
+
+ for (NodeVector::iterator node_it = top_level_nodes.begin();
+ node_it != top_level_nodes.end();
+ ++node_it) {
+ internal::GraphNode* top_level_node = *node_it;
+ node->add_dependent(top_level_node);
+ top_level_node->add_dependency();
+ }
- void BuildTaskGraph(internal::Task::Vector* tasks,
- internal::GraphNode::Map* graph,
- internal::GraphNode* dependent_node,
- internal::GraphNode* leaf_node,
- unsigned current_depth,
- unsigned max_depth,
- unsigned num_children_per_node) {
- scoped_refptr<PerfTaskImpl> task(new PerfTaskImpl);
- scoped_ptr<internal::GraphNode> node(
- new internal::GraphNode(task.get(), 0u));
-
- if (current_depth < max_depth) {
- for (unsigned i = 0; i < num_children_per_node; ++i) {
- BuildTaskGraph(tasks,
- graph,
- node.get(),
- leaf_node,
- current_depth + 1,
- max_depth,
- num_children_per_node);
+ for (NodeVector::iterator node_it = leaf_nodes.begin();
+ node_it != leaf_nodes.end();
+ ++node_it) {
+ internal::GraphNode* leaf_node = *node_it;
+ leaf_node->add_dependent(node.get());
+ node->add_dependency();
}
- } else if (leaf_node) {
- leaf_node->add_dependent(node.get());
- node->add_dependency();
- }
- if (dependent_node) {
- node->add_dependent(dependent_node);
- dependent_node->add_dependency();
+ graph->set(task, node.Pass());
}
- graph->set(task.get(), node.Pass());
- tasks->push_back(task.get());
+ }
+
+ size_t CollectCompletedTasks() {
+ internal::Task::Vector completed_tasks;
+ task_graph_runner_->CollectCompletedTasks(namespace_token_,
+ &completed_tasks);
+ return completed_tasks.size();
}
scoped_ptr<internal::TaskGraphRunner> task_graph_runner_;
internal::NamespaceToken namespace_token_;
- internal::Task::Vector tasks_;
LapTimer timer_;
};
+TEST_F(TaskGraphRunnerPerfTest, BuildTaskGraph) {
+ RunBuildTaskGraphTest("0_1_0", 0, 1, 0);
+ RunBuildTaskGraphTest("0_32_0", 0, 32, 0);
+ RunBuildTaskGraphTest("2_1_0", 2, 1, 0);
+ RunBuildTaskGraphTest("2_32_0", 2, 32, 0);
+ RunBuildTaskGraphTest("2_1_1", 2, 1, 1);
+ RunBuildTaskGraphTest("2_32_1", 2, 32, 1);
+}
+
TEST_F(TaskGraphRunnerPerfTest, ScheduleTasks) {
- RunScheduleTasksTest("1_10", 1, 10);
- RunScheduleTasksTest("1_1000", 1, 1000);
- RunScheduleTasksTest("2_10", 2, 10);
- RunScheduleTasksTest("5_5", 5, 5);
- RunScheduleTasksTest("10_2", 10, 2);
- RunScheduleTasksTest("1000_1", 1000, 1);
- RunScheduleTasksTest("10_1", 10, 1);
+ RunScheduleTasksTest("0_1_0", 0, 1, 0);
+ RunScheduleTasksTest("0_32_0", 0, 32, 0);
+ RunScheduleTasksTest("2_1_0", 2, 1, 0);
+ RunScheduleTasksTest("2_32_0", 2, 32, 0);
+ RunScheduleTasksTest("2_1_1", 2, 1, 1);
+ RunScheduleTasksTest("2_32_1", 2, 32, 1);
+}
+
+TEST_F(TaskGraphRunnerPerfTest, ScheduleAlternateTasks) {
+ RunScheduleAlternateTasksTest("0_1_0", 0, 1, 0);
+ RunScheduleAlternateTasksTest("0_32_0", 0, 32, 0);
+ RunScheduleAlternateTasksTest("2_1_0", 2, 1, 0);
+ RunScheduleAlternateTasksTest("2_32_0", 2, 32, 0);
+ RunScheduleAlternateTasksTest("2_1_1", 2, 1, 1);
+ RunScheduleAlternateTasksTest("2_32_1", 2, 32, 1);
}
-TEST_F(TaskGraphRunnerPerfTest, ExecuteTasks) {
- RunExecuteTasksTest("1_10", 1, 10);
- RunExecuteTasksTest("1_1000", 1, 1000);
- RunExecuteTasksTest("2_10", 2, 10);
- RunExecuteTasksTest("5_5", 5, 5);
- RunExecuteTasksTest("10_2", 10, 2);
- RunExecuteTasksTest("1000_1", 1000, 1);
- RunExecuteTasksTest("10_1", 10, 1);
+TEST_F(TaskGraphRunnerPerfTest, ScheduleAndExecuteTasks) {
+ RunScheduleAndExecuteTasksTest("0_1_0", 0, 1, 0);
+ RunScheduleAndExecuteTasksTest("0_32_0", 0, 32, 0);
+ RunScheduleAndExecuteTasksTest("2_1_0", 2, 1, 0);
+ RunScheduleAndExecuteTasksTest("2_32_0", 2, 32, 0);
+ RunScheduleAndExecuteTasksTest("2_1_1", 2, 1, 1);
+ RunScheduleAndExecuteTasksTest("2_32_1", 2, 32, 1);
}
} // namespace