summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--cc/resources/image_raster_worker_pool.cc109
-rw-r--r--cc/resources/image_raster_worker_pool.h16
-rw-r--r--cc/resources/pixel_buffer_raster_worker_pool.cc308
-rw-r--r--cc/resources/pixel_buffer_raster_worker_pool.h20
-rw-r--r--cc/resources/raster_worker_pool.cc205
-rw-r--r--cc/resources/raster_worker_pool.h59
-rw-r--r--cc/resources/raster_worker_pool_perftest.cc41
-rw-r--r--cc/resources/raster_worker_pool_unittest.cc2
-rw-r--r--cc/resources/tile.h13
-rw-r--r--cc/resources/tile_manager.cc234
-rw-r--r--cc/resources/tile_manager.h12
-rw-r--r--cc/resources/tile_manager_unittest.cc56
-rw-r--r--cc/resources/worker_pool.cc44
-rw-r--r--cc/resources/worker_pool.h74
-rw-r--r--cc/resources/worker_pool_perftest.cc77
-rw-r--r--cc/resources/worker_pool_unittest.cc14
-rw-r--r--cc/test/fake_tile_manager.cc13
-rw-r--r--cc/test/fake_tile_manager.h5
-rw-r--r--cc/test/layer_tree_test.cc6
-rw-r--r--cc/test/layer_tree_test.h1
-rw-r--r--cc/trees/layer_tree_host_impl.cc2
-rw-r--r--cc/trees/layer_tree_host_unittest.cc8
-rw-r--r--cc/trees/layer_tree_host_unittest_scroll.cc5
23 files changed, 924 insertions, 400 deletions
diff --git a/cc/resources/image_raster_worker_pool.cc b/cc/resources/image_raster_worker_pool.cc
index 1d96b27..6a08e70 100644
--- a/cc/resources/image_raster_worker_pool.cc
+++ b/cc/resources/image_raster_worker_pool.cc
@@ -5,6 +5,8 @@
#include "cc/resources/image_raster_worker_pool.h"
#include "base/debug/trace_event.h"
+#include "base/values.h"
+#include "cc/debug/traced_value.h"
#include "cc/resources/resource.h"
#include "third_party/skia/include/core/SkDevice.h"
@@ -28,6 +30,7 @@ class ImageWorkerPoolTaskImpl : public internal::WorkerPoolTask {
// Overridden from internal::WorkerPoolTask:
virtual void RunOnWorkerThread(unsigned thread_index) OVERRIDE {
+ TRACE_EVENT0("cc", "ImageWorkerPoolTaskImpl::RunOnWorkerThread");
if (!buffer_)
return;
@@ -59,7 +62,9 @@ class ImageWorkerPoolTaskImpl : public internal::WorkerPoolTask {
ImageRasterWorkerPool::ImageRasterWorkerPool(
ResourceProvider* resource_provider, size_t num_threads)
- : RasterWorkerPool(resource_provider, num_threads) {
+ : RasterWorkerPool(resource_provider, num_threads),
+ raster_tasks_pending_(false),
+ raster_tasks_required_for_activation_pending_(false) {
}
ImageRasterWorkerPool::~ImageRasterWorkerPool() {
@@ -71,7 +76,31 @@ void ImageRasterWorkerPool::ScheduleTasks(RasterTask::Queue* queue) {
RasterWorkerPool::SetRasterTasks(queue);
- RasterTaskGraph graph;
+ if (!raster_tasks_pending_)
+ TRACE_EVENT_ASYNC_BEGIN0("cc", "ScheduledTasks", this);
+
+ raster_tasks_pending_ = true;
+ raster_tasks_required_for_activation_pending_ = true;
+
+ unsigned priority = 0u;
+ TaskGraph graph;
+
+ scoped_refptr<internal::WorkerPoolTask>
+ new_raster_required_for_activation_finished_task(
+ CreateRasterRequiredForActivationFinishedTask());
+ internal::GraphNode* raster_required_for_activation_finished_node =
+ CreateGraphNodeForTask(
+ new_raster_required_for_activation_finished_task.get(),
+ priority++,
+ &graph);
+
+ scoped_refptr<internal::WorkerPoolTask> new_raster_finished_task(
+ CreateRasterFinishedTask());
+ internal::GraphNode* raster_finished_node =
+ CreateGraphNodeForTask(new_raster_finished_task.get(),
+ priority++,
+ &graph);
+
for (RasterTaskVector::const_iterator it = raster_tasks().begin();
it != raster_tasks().end(); ++it) {
internal::RasterWorkerPoolTask* task = it->get();
@@ -79,7 +108,14 @@ void ImageRasterWorkerPool::ScheduleTasks(RasterTask::Queue* queue) {
TaskMap::iterator image_it = image_tasks_.find(task);
if (image_it != image_tasks_.end()) {
internal::WorkerPoolTask* image_task = image_it->second.get();
- graph.InsertRasterTask(image_task, task->dependencies());
+ CreateGraphNodeForImageTask(
+ image_task,
+ task->dependencies(),
+ priority++,
+ IsRasterTaskRequiredForActivation(task),
+ raster_required_for_activation_finished_node,
+ raster_finished_node,
+ &graph);
continue;
}
@@ -99,10 +135,41 @@ void ImageRasterWorkerPool::ScheduleTasks(RasterTask::Queue* queue) {
base::Unretained(this),
make_scoped_refptr(task))));
image_tasks_[task] = new_image_task;
- graph.InsertRasterTask(new_image_task.get(), task->dependencies());
+ CreateGraphNodeForImageTask(
+ new_image_task.get(),
+ task->dependencies(),
+ priority++,
+ IsRasterTaskRequiredForActivation(task),
+ raster_required_for_activation_finished_node,
+ raster_finished_node,
+ &graph);
}
- SetRasterTaskGraph(&graph);
+ SetTaskGraph(&graph);
+
+ set_raster_finished_task(new_raster_finished_task);
+ set_raster_required_for_activation_finished_task(
+ new_raster_required_for_activation_finished_task);
+
+ TRACE_EVENT_ASYNC_STEP1(
+ "cc", "ScheduledTasks", this, "rasterizing",
+ "state", TracedValue::FromValue(StateAsValue().release()));
+}
+
+void ImageRasterWorkerPool::OnRasterTasksFinished() {
+ DCHECK(raster_tasks_pending_);
+ raster_tasks_pending_ = false;
+ TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this);
+ client()->DidFinishedRunningTasks();
+}
+
+void ImageRasterWorkerPool::OnRasterTasksRequiredForActivationFinished() {
+ DCHECK(raster_tasks_required_for_activation_pending_);
+ raster_tasks_required_for_activation_pending_ = false;
+ TRACE_EVENT_ASYNC_STEP1(
+ "cc", "ScheduledTasks", this, "rasterizing",
+ "state", TracedValue::FromValue(StateAsValue().release()));
+ client()->DidFinishedRunningTasksRequiredForActivation();
}
void ImageRasterWorkerPool::OnRasterTaskCompleted(
@@ -127,4 +194,36 @@ void ImageRasterWorkerPool::OnRasterTaskCompleted(
image_tasks_.erase(task.get());
}
+scoped_ptr<base::Value> ImageRasterWorkerPool::StateAsValue() const {
+ scoped_ptr<base::DictionaryValue> state(new base::DictionaryValue);
+
+ state->SetBoolean("tasks_required_for_activation_pending",
+ raster_tasks_required_for_activation_pending_);
+ state->Set("scheduled_state", ScheduledStateAsValue().release());
+ return state.PassAs<base::Value>();
+}
+
+// static
+void ImageRasterWorkerPool::CreateGraphNodeForImageTask(
+ internal::WorkerPoolTask* image_task,
+ const TaskVector& decode_tasks,
+ unsigned priority,
+ bool is_required_for_activation,
+ internal::GraphNode* raster_required_for_activation_finished_node,
+ internal::GraphNode* raster_finished_node,
+ TaskGraph* graph) {
+ internal::GraphNode* image_node = CreateGraphNodeForRasterTask(image_task,
+ decode_tasks,
+ priority,
+ graph);
+
+ if (is_required_for_activation) {
+ raster_required_for_activation_finished_node->add_dependency();
+ image_node->add_dependent(raster_required_for_activation_finished_node);
+ }
+
+ raster_finished_node->add_dependency();
+ image_node->add_dependent(raster_finished_node);
+}
+
} // namespace cc
diff --git a/cc/resources/image_raster_worker_pool.h b/cc/resources/image_raster_worker_pool.h
index 453b3a3..d4f4d74 100644
--- a/cc/resources/image_raster_worker_pool.h
+++ b/cc/resources/image_raster_worker_pool.h
@@ -21,6 +21,8 @@ class CC_EXPORT ImageRasterWorkerPool : public RasterWorkerPool {
// Overridden from RasterWorkerPool:
virtual void ScheduleTasks(RasterTask::Queue* queue) OVERRIDE;
+ virtual void OnRasterTasksFinished() OVERRIDE;
+ virtual void OnRasterTasksRequiredForActivationFinished() OVERRIDE;
private:
ImageRasterWorkerPool(ResourceProvider* resource_provider,
@@ -29,8 +31,22 @@ class CC_EXPORT ImageRasterWorkerPool : public RasterWorkerPool {
void OnRasterTaskCompleted(
scoped_refptr<internal::RasterWorkerPoolTask> task, bool was_canceled);
+ scoped_ptr<base::Value> StateAsValue() const;
+
+ static void CreateGraphNodeForImageTask(
+ internal::WorkerPoolTask* image_task,
+ const TaskVector& decode_tasks,
+ unsigned priority,
+ bool is_required_for_activation,
+ internal::GraphNode* raster_required_for_activation_finished_node,
+ internal::GraphNode* raster_finished_node,
+ TaskGraph* graph);
+
TaskMap image_tasks_;
+ bool raster_tasks_pending_;
+ bool raster_tasks_required_for_activation_pending_;
+
DISALLOW_COPY_AND_ASSIGN(ImageRasterWorkerPool);
};
diff --git a/cc/resources/pixel_buffer_raster_worker_pool.cc b/cc/resources/pixel_buffer_raster_worker_pool.cc
index bbdb532..3d87411 100644
--- a/cc/resources/pixel_buffer_raster_worker_pool.cc
+++ b/cc/resources/pixel_buffer_raster_worker_pool.cc
@@ -4,7 +4,10 @@
#include "cc/resources/pixel_buffer_raster_worker_pool.h"
+#include "base/containers/stack_container.h"
#include "base/debug/trace_event.h"
+#include "base/values.h"
+#include "cc/debug/traced_value.h"
#include "cc/resources/resource.h"
#include "third_party/skia/include/core/SkDevice.h"
@@ -77,18 +80,37 @@ const size_t kMaxPendingUploadBytes = 16 * 2 * kMaxBytesUploadedPerMs;
const int kCheckForCompletedRasterTasksDelayMs = 6;
-const size_t kMaxPendingRasterBytes =
- kMaxBytesUploadedPerMs * kCheckForCompletedRasterTasksDelayMs;
+const size_t kMaxScheduledRasterTasks = 48;
+
+typedef base::StackVector<internal::GraphNode*,
+ kMaxScheduledRasterTasks> NodeVector;
+
+void AddDependenciesToGraphNode(
+ internal::GraphNode* node,
+ const NodeVector::ContainerType& dependencies) {
+ for (NodeVector::ContainerType::const_iterator it = dependencies.begin();
+ it != dependencies.end(); ++it) {
+ internal::GraphNode* dependency = *it;
+
+ node->add_dependency();
+ dependency->add_dependent(node);
+ }
+}
} // namespace
PixelBufferRasterWorkerPool::PixelBufferRasterWorkerPool(
ResourceProvider* resource_provider,
- size_t num_threads) : RasterWorkerPool(resource_provider, num_threads),
- shutdown_(false),
- bytes_pending_upload_(0),
- has_performed_uploads_since_last_flush_(false),
- check_for_completed_raster_tasks_pending_(false) {
+ size_t num_threads)
+ : RasterWorkerPool(resource_provider, num_threads),
+ shutdown_(false),
+ scheduled_raster_task_count_(0),
+ bytes_pending_upload_(0),
+ has_performed_uploads_since_last_flush_(false),
+ check_for_completed_raster_tasks_pending_(false),
+ should_notify_client_if_no_tasks_are_pending_(false),
+ should_notify_client_if_no_tasks_required_for_activation_are_pending_(
+ false) {
}
PixelBufferRasterWorkerPool::~PixelBufferRasterWorkerPool() {
@@ -102,7 +124,10 @@ PixelBufferRasterWorkerPool::~PixelBufferRasterWorkerPool() {
void PixelBufferRasterWorkerPool::Shutdown() {
shutdown_ = true;
RasterWorkerPool::Shutdown();
- CheckForCompletedRasterTasks();
+ RasterWorkerPool::CheckForCompletedTasks();
+ CheckForCompletedUploads();
+ check_for_completed_raster_tasks_callback_.Cancel();
+ check_for_completed_raster_tasks_pending_ = false;
for (TaskMap::iterator it = pixel_buffer_tasks_.begin();
it != pixel_buffer_tasks_.end(); ++it) {
internal::RasterWorkerPoolTask* task = it->first;
@@ -114,6 +139,7 @@ void PixelBufferRasterWorkerPool::Shutdown() {
completed_tasks_.push_back(task);
}
}
+ DCHECK_EQ(completed_tasks_.size(), pixel_buffer_tasks_.size());
}
void PixelBufferRasterWorkerPool::ScheduleTasks(RasterTask::Queue* queue) {
@@ -121,6 +147,12 @@ void PixelBufferRasterWorkerPool::ScheduleTasks(RasterTask::Queue* queue) {
RasterWorkerPool::SetRasterTasks(queue);
+ if (!should_notify_client_if_no_tasks_are_pending_)
+ TRACE_EVENT_ASYNC_BEGIN0("cc", "ScheduledTasks", this);
+
+ should_notify_client_if_no_tasks_are_pending_ = true;
+ should_notify_client_if_no_tasks_required_for_activation_are_pending_ = true;
+
// Build new pixel buffer task set.
TaskMap new_pixel_buffer_tasks;
for (RasterTaskVector::const_iterator it = raster_tasks().begin();
@@ -160,13 +192,35 @@ void PixelBufferRasterWorkerPool::ScheduleTasks(RasterTask::Queue* queue) {
}
}
+ tasks_required_for_activation_.clear();
+ for (TaskMap::iterator it = new_pixel_buffer_tasks.begin();
+ it != new_pixel_buffer_tasks.end(); ++it) {
+ internal::RasterWorkerPoolTask* task = it->first;
+ if (IsRasterTaskRequiredForActivation(task))
+ tasks_required_for_activation_.insert(task);
+ }
+
pixel_buffer_tasks_.swap(new_pixel_buffer_tasks);
- // This will schedule more tasks after checking for completed raster
- // tasks. It's worth checking for completed tasks when ScheduleTasks()
- // is called as priorities might have changed and this allows us to
- // schedule as many new top priority tasks as possible.
- CheckForCompletedRasterTasks();
+ // Check for completed tasks when ScheduleTasks() is called as
+ // priorities might have changed and this maximizes the number
+ // of top priority tasks that are scheduled.
+ RasterWorkerPool::CheckForCompletedTasks();
+ CheckForCompletedUploads();
+ FlushUploads();
+
+ // Schedule new tasks.
+ ScheduleMoreTasks();
+
+ // Cancel any pending check for completed raster tasks and schedule
+ // another check.
+ check_for_completed_raster_tasks_callback_.Cancel();
+ check_for_completed_raster_tasks_pending_ = false;
+ ScheduleCheckForCompletedRasterTasks();
+
+ TRACE_EVENT_ASYNC_STEP1(
+ "cc", "ScheduledTasks", this, StateName(),
+ "state", TracedValue::FromValue(StateAsValue().release()));
}
void PixelBufferRasterWorkerPool::CheckForCompletedTasks() {
@@ -176,8 +230,11 @@ void PixelBufferRasterWorkerPool::CheckForCompletedTasks() {
CheckForCompletedUploads();
FlushUploads();
- while (!completed_tasks_.empty()) {
- internal::RasterWorkerPoolTask* task = completed_tasks_.front().get();
+ TaskDeque completed_tasks;
+ completed_tasks_.swap(completed_tasks);
+
+ while (!completed_tasks.empty()) {
+ internal::RasterWorkerPoolTask* task = completed_tasks.front().get();
DCHECK(pixel_buffer_tasks_.find(task) != pixel_buffer_tasks_.end());
pixel_buffer_tasks_.erase(task);
@@ -186,14 +243,33 @@ void PixelBufferRasterWorkerPool::CheckForCompletedTasks() {
task->CompleteOnOriginThread();
task->DidComplete();
- completed_tasks_.pop_front();
+ completed_tasks.pop_front();
}
}
void PixelBufferRasterWorkerPool::OnRasterTasksFinished() {
- // Call CheckForCompletedTasks() when we've finished running all raster
- // tasks needed since last time ScheduleMoreTasks() was called. This
- // reduces latency when processing only a small number of raster tasks.
+ // |should_notify_client_if_no_tasks_are_pending_| can be set to false as
+ // a result of a scheduled CheckForCompletedRasterTasks() call. No need to
+ // perform another check in that case as we've already notified the client.
+ if (!should_notify_client_if_no_tasks_are_pending_)
+ return;
+
+ // Call CheckForCompletedRasterTasks() when we've finished running all
+ // raster tasks needed since last time ScheduleTasks() was called.
+ // This reduces latency between the time when all tasks have finished
+ // running and the time when the client is notified.
+ CheckForCompletedRasterTasks();
+}
+
+void PixelBufferRasterWorkerPool::OnRasterTasksRequiredForActivationFinished() {
+ // Analogous to OnRasterTasksFinished(), there's no need to call
+ // CheckForCompletedRasterTasks() if the client has already been notified.
+ if (!should_notify_client_if_no_tasks_required_for_activation_are_pending_)
+ return;
+
+ // This reduces latency between the time when all tasks required for
+ // activation have finished running and the time when the client is
+ // notified.
CheckForCompletedRasterTasks();
}
@@ -272,6 +348,8 @@ void PixelBufferRasterWorkerPool::CheckForCompletedUploads() {
task) == completed_tasks_.end());
completed_tasks_.push_back(task);
+ tasks_required_for_activation_.erase(task);
+
tasks_with_completed_uploads.pop_front();
}
}
@@ -294,6 +372,8 @@ void PixelBufferRasterWorkerPool::CheckForCompletedRasterTasks() {
TRACE_EVENT0(
"cc", "PixelBufferRasterWorkerPool::CheckForCompletedRasterTasks");
+ DCHECK(should_notify_client_if_no_tasks_are_pending_);
+
check_for_completed_raster_tasks_callback_.Cancel();
check_for_completed_raster_tasks_pending_ = false;
@@ -301,21 +381,55 @@ void PixelBufferRasterWorkerPool::CheckForCompletedRasterTasks() {
CheckForCompletedUploads();
FlushUploads();
- ScheduleMoreTasks();
-
- // Make sure another check for completed uploads is scheduled
- // while there is still pending uploads left.
- if (!tasks_with_pending_upload_.empty())
+ // Determine what client notifications to generate.
+ bool will_notify_client_that_no_tasks_required_for_activation_are_pending =
+ (should_notify_client_if_no_tasks_required_for_activation_are_pending_ &&
+ !HasPendingTasksRequiredForActivation());
+ bool will_notify_client_that_no_tasks_are_pending =
+ (should_notify_client_if_no_tasks_are_pending_ &&
+ !HasPendingTasks());
+
+ // Adjust the need to generate notifications before scheduling more tasks.
+ should_notify_client_if_no_tasks_required_for_activation_are_pending_ &=
+ !will_notify_client_that_no_tasks_required_for_activation_are_pending;
+ should_notify_client_if_no_tasks_are_pending_ &=
+ !will_notify_client_that_no_tasks_are_pending;
+
+ if (PendingRasterTaskCount())
+ ScheduleMoreTasks();
+
+ TRACE_EVENT_ASYNC_STEP1(
+ "cc", "ScheduledTasks", this, StateName(),
+ "state", TracedValue::FromValue(StateAsValue().release()));
+
+ // Schedule another check for completed raster tasks while there are
+ // pending raster tasks or pending uploads.
+ if (HasPendingTasks())
ScheduleCheckForCompletedRasterTasks();
+
+ // Generate client notifications.
+ if (will_notify_client_that_no_tasks_required_for_activation_are_pending)
+ client()->DidFinishedRunningTasksRequiredForActivation();
+ if (will_notify_client_that_no_tasks_are_pending) {
+ TRACE_EVENT_ASYNC_END0("cc", "ScheduledTasks", this);
+ client()->DidFinishedRunningTasks();
+ }
}
void PixelBufferRasterWorkerPool::ScheduleMoreTasks() {
TRACE_EVENT0("cc", "PixelBufferRasterWorkerPool::ScheduleMoreTasks");
+ enum RasterTaskType {
+ PREPAINT_TYPE = 0,
+ REQUIRED_FOR_ACTIVATION_TYPE = 1,
+ NUM_TYPES = 2
+ };
+ NodeVector tasks[NUM_TYPES];
+ unsigned priority = 2u; // 0-1 reserved for RasterFinished tasks.
+ TaskGraph graph;
+
size_t bytes_pending_upload = bytes_pending_upload_;
- size_t bytes_pending_raster = 0;
- RasterTaskGraph graph;
for (RasterTaskVector::const_iterator it = raster_tasks().begin();
it != raster_tasks().end(); ++it) {
internal::RasterWorkerPoolTask* task = it->get();
@@ -347,21 +461,28 @@ void PixelBufferRasterWorkerPool::ScheduleMoreTasks() {
continue;
}
- // Throttle raster tasks based on bytes pending if raster has not
- // finished.
- size_t new_bytes_pending_raster = bytes_pending_raster;
- new_bytes_pending_raster += task->resource()->bytes();
- if (new_bytes_pending_raster > kMaxPendingRasterBytes)
+ // Throttle raster tasks based on kMaxScheduledRasterTasks.
+ size_t scheduled_raster_task_count =
+ tasks[PREPAINT_TYPE].container().size() +
+ tasks[REQUIRED_FOR_ACTIVATION_TYPE].container().size();
+ if (scheduled_raster_task_count >= kMaxScheduledRasterTasks)
break;
- // Update both |bytes_pending_raster| and |bytes_pending_upload|
- // now that task has cleared all throttling limits.
- bytes_pending_raster = new_bytes_pending_raster;
+ // Update |bytes_pending_upload| now that task has cleared all
+ // throttling limits.
bytes_pending_upload = new_bytes_pending_upload;
+ RasterTaskType type = IsRasterTaskRequiredForActivation(task) ?
+ REQUIRED_FOR_ACTIVATION_TYPE :
+ PREPAINT_TYPE;
+
// Use existing pixel buffer task if available.
if (pixel_buffer_task) {
- graph.InsertRasterTask(pixel_buffer_task, task->dependencies());
+ tasks[type].container().push_back(
+ CreateGraphNodeForRasterTask(pixel_buffer_task,
+ task->dependencies(),
+ priority++,
+ &graph));
continue;
}
@@ -382,16 +503,67 @@ void PixelBufferRasterWorkerPool::ScheduleMoreTasks() {
base::Unretained(this),
make_scoped_refptr(task))));
pixel_buffer_tasks_[task] = new_pixel_buffer_task;
- graph.InsertRasterTask(new_pixel_buffer_task.get(), task->dependencies());
+ tasks[type].container().push_back(
+ CreateGraphNodeForRasterTask(new_pixel_buffer_task.get(),
+ task->dependencies(),
+ priority++,
+ &graph));
}
- SetRasterTaskGraph(&graph);
+ scoped_refptr<internal::WorkerPoolTask>
+ new_raster_required_for_activation_finished_task;
+
+ size_t scheduled_raster_task_required_for_activation_count =
+ tasks[REQUIRED_FOR_ACTIVATION_TYPE].container().size();
+ DCHECK_LE(scheduled_raster_task_required_for_activation_count,
+ tasks_required_for_activation_.size());
+ // Schedule OnRasterTasksRequiredForActivationFinished call only when
+ // notification is pending and throttling is not preventing all pending
+ // tasks required for activation from being scheduled.
+ if (scheduled_raster_task_required_for_activation_count ==
+ tasks_required_for_activation_.size() &&
+ should_notify_client_if_no_tasks_required_for_activation_are_pending_) {
+ new_raster_required_for_activation_finished_task =
+ CreateRasterRequiredForActivationFinishedTask();
+ internal::GraphNode* raster_required_for_activation_finished_node =
+ CreateGraphNodeForTask(
+ new_raster_required_for_activation_finished_task.get(),
+ 0u, // Priority 0
+ &graph);
+ AddDependenciesToGraphNode(
+ raster_required_for_activation_finished_node,
+ tasks[REQUIRED_FOR_ACTIVATION_TYPE].container());
+ }
- // At least one task that could need an upload is now pending, schedule
- // a check for completed raster tasks to ensure this upload is dispatched
- // without too much latency.
- if (bytes_pending_raster)
- ScheduleCheckForCompletedRasterTasks();
+ scoped_refptr<internal::WorkerPoolTask> new_raster_finished_task;
+
+ size_t scheduled_raster_task_count =
+ tasks[PREPAINT_TYPE].container().size() +
+ tasks[REQUIRED_FOR_ACTIVATION_TYPE].container().size();
+ DCHECK_LE(scheduled_raster_task_count, PendingRasterTaskCount());
+ // Schedule OnRasterTasksFinished call only when notification is pending
+ // and throttling is not preventing all pending tasks from being scheduled.
+ if (scheduled_raster_task_count == PendingRasterTaskCount() &&
+ should_notify_client_if_no_tasks_are_pending_) {
+ new_raster_finished_task = CreateRasterFinishedTask();
+ internal::GraphNode* raster_finished_node =
+ CreateGraphNodeForTask(new_raster_finished_task.get(),
+ 1u, // Priority 1
+ &graph);
+ for (unsigned type = 0; type < NUM_TYPES; ++type) {
+ AddDependenciesToGraphNode(
+ raster_finished_node,
+ tasks[type].container());
+ }
+ }
+
+ SetTaskGraph(&graph);
+
+ scheduled_raster_task_count_ = scheduled_raster_task_count;
+
+ set_raster_finished_task(new_raster_finished_task);
+ set_raster_required_for_activation_finished_task(
+ new_raster_required_for_activation_finished_task);
}
void PixelBufferRasterWorkerPool::OnRasterTaskCompleted(
@@ -414,6 +586,7 @@ void PixelBufferRasterWorkerPool::OnRasterTaskCompleted(
completed_tasks_.end(),
task) == completed_tasks_.end());
completed_tasks_.push_back(task);
+ tasks_required_for_activation_.erase(task);
return;
}
@@ -424,4 +597,55 @@ void PixelBufferRasterWorkerPool::OnRasterTaskCompleted(
tasks_with_pending_upload_.push_back(task);
}
+unsigned PixelBufferRasterWorkerPool::PendingRasterTaskCount() const {
+ unsigned num_completed_raster_tasks =
+ tasks_with_pending_upload_.size() + completed_tasks_.size();
+ DCHECK_GE(pixel_buffer_tasks_.size(), num_completed_raster_tasks);
+ return pixel_buffer_tasks_.size() - num_completed_raster_tasks;
+}
+
+bool PixelBufferRasterWorkerPool::HasPendingTasks() const {
+ return PendingRasterTaskCount() || !tasks_with_pending_upload_.empty();
+}
+
+bool PixelBufferRasterWorkerPool::HasPendingTasksRequiredForActivation() const {
+ return !tasks_required_for_activation_.empty();
+}
+
+const char* PixelBufferRasterWorkerPool::StateName() const {
+ if (scheduled_raster_task_count_)
+ return "rasterizing";
+ if (PendingRasterTaskCount())
+ return "throttled";
+ if (!tasks_with_pending_upload_.empty())
+ return "waiting_for_uploads";
+
+ return "finishing";
+}
+
+scoped_ptr<base::Value> PixelBufferRasterWorkerPool::StateAsValue() const {
+ scoped_ptr<base::DictionaryValue> state(new base::DictionaryValue);
+
+ state->SetInteger("completed_count", completed_tasks_.size());
+ state->SetInteger("pending_count", pixel_buffer_tasks_.size());
+ state->SetInteger("pending_upload_count", tasks_with_pending_upload_.size());
+ state->SetInteger("required_for_activation_count",
+ tasks_required_for_activation_.size());
+ state->Set("scheduled_state", ScheduledStateAsValue().release());
+ state->Set("throttle_state", ThrottleStateAsValue().release());
+ return state.PassAs<base::Value>();
+}
+
+scoped_ptr<base::Value> PixelBufferRasterWorkerPool::ThrottleStateAsValue()
+ const {
+ scoped_ptr<base::DictionaryValue> throttle_state(new base::DictionaryValue);
+
+ throttle_state->SetInteger("bytes_available_for_upload",
+ kMaxPendingUploadBytes - bytes_pending_upload_);
+ throttle_state->SetInteger("bytes_pending_upload", bytes_pending_upload_);
+ throttle_state->SetInteger("scheduled_raster_task_count",
+ scheduled_raster_task_count_);
+ return throttle_state.PassAs<base::Value>();
+}
+
} // namespace cc
diff --git a/cc/resources/pixel_buffer_raster_worker_pool.h b/cc/resources/pixel_buffer_raster_worker_pool.h
index be609a8..d9613f7 100644
--- a/cc/resources/pixel_buffer_raster_worker_pool.h
+++ b/cc/resources/pixel_buffer_raster_worker_pool.h
@@ -7,6 +7,7 @@
#include <deque>
#include <set>
+#include <vector>
#include "cc/resources/raster_worker_pool.h"
@@ -28,14 +29,13 @@ class CC_EXPORT PixelBufferRasterWorkerPool : public RasterWorkerPool {
// Overridden from RasterWorkerPool:
virtual void ScheduleTasks(RasterTask::Queue* queue) OVERRIDE;
+ virtual void OnRasterTasksFinished() OVERRIDE;
+ virtual void OnRasterTasksRequiredForActivationFinished() OVERRIDE;
private:
PixelBufferRasterWorkerPool(ResourceProvider* resource_provider,
size_t num_threads);
- // Overridden from RasterWorkerPool:
- virtual void OnRasterTasksFinished() OVERRIDE;
-
void FlushUploads();
void CheckForCompletedUploads();
void ScheduleCheckForCompletedRasterTasks();
@@ -46,6 +46,13 @@ class CC_EXPORT PixelBufferRasterWorkerPool : public RasterWorkerPool {
bool was_canceled,
bool needs_upload);
void DidCompleteRasterTask(internal::RasterWorkerPoolTask* task);
+ unsigned PendingRasterTaskCount() const;
+ bool HasPendingTasks() const;
+ bool HasPendingTasksRequiredForActivation() const;
+
+ const char* StateName() const;
+ scoped_ptr<base::Value> StateAsValue() const;
+ scoped_ptr<base::Value> ThrottleStateAsValue() const;
bool shutdown_;
@@ -55,11 +62,18 @@ class CC_EXPORT PixelBufferRasterWorkerPool : public RasterWorkerPool {
TaskDeque tasks_with_pending_upload_;
TaskDeque completed_tasks_;
+ typedef std::set<internal::RasterWorkerPoolTask*> TaskSet;
+ TaskSet tasks_required_for_activation_;
+
+ size_t scheduled_raster_task_count_;
size_t bytes_pending_upload_;
bool has_performed_uploads_since_last_flush_;
base::CancelableClosure check_for_completed_raster_tasks_callback_;
bool check_for_completed_raster_tasks_pending_;
+ bool should_notify_client_if_no_tasks_are_pending_;
+ bool should_notify_client_if_no_tasks_required_for_activation_are_pending_;
+
DISALLOW_COPY_AND_ASSIGN(PixelBufferRasterWorkerPool);
};
diff --git a/cc/resources/raster_worker_pool.cc b/cc/resources/raster_worker_pool.cc
index b1ab210..754e964 100644
--- a/cc/resources/raster_worker_pool.cc
+++ b/cc/resources/raster_worker_pool.cc
@@ -228,30 +228,38 @@ class ImageDecodeWorkerPoolTaskImpl : public internal::WorkerPoolTask {
class RasterFinishedWorkerPoolTaskImpl : public internal::WorkerPoolTask {
public:
+ typedef base::Callback<void(const internal::WorkerPoolTask* source)>
+ Callback;
+
RasterFinishedWorkerPoolTaskImpl(
- base::MessageLoopProxy* origin_loop,
- const base::Closure& on_raster_finished_callback)
- : origin_loop_(origin_loop),
+ const Callback& on_raster_finished_callback)
+ : origin_loop_(base::MessageLoopProxy::current().get()),
on_raster_finished_callback_(on_raster_finished_callback) {
}
// Overridden from internal::WorkerPoolTask:
virtual void RunOnWorkerThread(unsigned thread_index) OVERRIDE {
- origin_loop_->PostTask(FROM_HERE, on_raster_finished_callback_);
+ TRACE_EVENT0("cc", "RasterFinishedWorkerPoolTaskImpl::RunOnWorkerThread");
+ origin_loop_->PostTask(
+ FROM_HERE,
+ base::Bind(&RasterFinishedWorkerPoolTaskImpl::RunOnOriginThread,
+ this));
}
virtual void CompleteOnOriginThread() OVERRIDE {}
private:
virtual ~RasterFinishedWorkerPoolTaskImpl() {}
+ void RunOnOriginThread() const {
+ on_raster_finished_callback_.Run(this);
+ }
+
scoped_refptr<base::MessageLoopProxy> origin_loop_;
- const base::Closure on_raster_finished_callback_;
+ const Callback on_raster_finished_callback_;
DISALLOW_COPY_AND_ASSIGN(RasterFinishedWorkerPoolTaskImpl);
};
-void Noop() {}
-
const char* kWorkerThreadNamePrefix = "CompositorRaster";
} // namespace
@@ -363,56 +371,6 @@ void RasterWorkerPool::RasterTask::Reset() {
RasterWorkerPool::RasterTask::~RasterTask() {
}
-RasterWorkerPool::RasterTaskGraph::RasterTaskGraph()
- : raster_finished_node_(new GraphNode),
- next_priority_(1u) {
-}
-
-RasterWorkerPool::RasterTaskGraph::~RasterTaskGraph() {
-}
-
-void RasterWorkerPool::RasterTaskGraph::InsertRasterTask(
- internal::WorkerPoolTask* raster_task,
- const TaskVector& decode_tasks) {
- DCHECK(!raster_task->HasCompleted());
- DCHECK(graph_.find(raster_task) == graph_.end());
-
- scoped_ptr<GraphNode> raster_node(new GraphNode);
- raster_node->set_task(raster_task);
- raster_node->set_priority(next_priority_++);
-
- // Insert image decode tasks.
- for (TaskVector::const_iterator it = decode_tasks.begin();
- it != decode_tasks.end(); ++it) {
- internal::WorkerPoolTask* decode_task = it->get();
-
- // Skip if already decoded.
- if (decode_task->HasCompleted())
- continue;
-
- raster_node->add_dependency();
-
- // Check if decode task already exists in graph.
- GraphNodeMap::iterator decode_it = graph_.find(decode_task);
- if (decode_it != graph_.end()) {
- GraphNode* decode_node = decode_it->second;
- decode_node->add_dependent(raster_node.get());
- continue;
- }
-
- scoped_ptr<GraphNode> decode_node(new GraphNode);
- decode_node->set_task(decode_task);
- decode_node->set_priority(next_priority_++);
- decode_node->add_dependent(raster_node.get());
- graph_.set(decode_task, decode_node.Pass());
- }
-
- raster_finished_node_->add_dependency();
- raster_node->add_dependent(raster_finished_node_.get());
-
- graph_.set(raster_task, raster_node.Pass());
-}
-
// static
RasterWorkerPool::RasterTask RasterWorkerPool::CreateRasterTask(
const Resource* resource,
@@ -452,8 +410,7 @@ RasterWorkerPool::RasterWorkerPool(ResourceProvider* resource_provider,
: WorkerPool(num_threads, kWorkerThreadNamePrefix),
client_(NULL),
resource_provider_(resource_provider),
- weak_ptr_factory_(this),
- schedule_raster_tasks_count_(0) {
+ weak_ptr_factory_(this) {
}
RasterWorkerPool::~RasterWorkerPool() {
@@ -464,11 +421,10 @@ void RasterWorkerPool::SetClient(RasterWorkerPoolClient* client) {
}
void RasterWorkerPool::Shutdown() {
+ raster_tasks_.clear();
TaskGraph empty;
SetTaskGraph(&empty);
WorkerPool::Shutdown();
- raster_tasks_.clear();
- // Cancel any pending OnRasterFinished callback.
weak_ptr_factory_.InvalidateWeakPtrs();
}
@@ -478,48 +434,107 @@ void RasterWorkerPool::SetRasterTasks(RasterTask::Queue* queue) {
queue->tasks_required_for_activation_);
}
-void RasterWorkerPool::SetRasterTaskGraph(RasterTaskGraph* graph) {
- scoped_ptr<GraphNode> raster_finished_node(
- graph->raster_finished_node_.Pass());
- TaskGraph new_graph;
- new_graph.swap(graph->graph_);
+bool RasterWorkerPool::IsRasterTaskRequiredForActivation(
+ internal::RasterWorkerPoolTask* task) const {
+ return
+ raster_tasks_required_for_activation_.find(task) !=
+ raster_tasks_required_for_activation_.end();
+}
- if (new_graph.empty()) {
- SetTaskGraph(&new_graph);
- raster_finished_task_ = NULL;
+scoped_refptr<internal::WorkerPoolTask>
+ RasterWorkerPool::CreateRasterFinishedTask() {
+ return make_scoped_refptr(
+ new RasterFinishedWorkerPoolTaskImpl(
+ base::Bind(&RasterWorkerPool::OnRasterFinished,
+ weak_ptr_factory_.GetWeakPtr())));
+}
+
+scoped_refptr<internal::WorkerPoolTask>
+ RasterWorkerPool::CreateRasterRequiredForActivationFinishedTask() {
+ return make_scoped_refptr(
+ new RasterFinishedWorkerPoolTaskImpl(
+ base::Bind(&RasterWorkerPool::OnRasterRequiredForActivationFinished,
+ weak_ptr_factory_.GetWeakPtr())));
+}
+
+void RasterWorkerPool::OnRasterFinished(
+ const internal::WorkerPoolTask* source) {
+ TRACE_EVENT0("cc", "RasterWorkerPool::OnRasterFinished");
+
+ // Early out if current |raster_finished_task_| is not the source.
+ if (source != raster_finished_task_.get())
return;
- }
- ++schedule_raster_tasks_count_;
+ OnRasterTasksFinished();
+}
- scoped_refptr<internal::WorkerPoolTask> new_raster_finished_task(
- new RasterFinishedWorkerPoolTaskImpl(
- base::MessageLoopProxy::current().get(),
- base::Bind(&RasterWorkerPool::OnRasterFinished,
- weak_ptr_factory_.GetWeakPtr(),
- schedule_raster_tasks_count_)));
- raster_finished_node->set_task(new_raster_finished_task.get());
- // Insert "raster finished" task before switching to new graph.
- new_graph.set(new_raster_finished_task.get(), raster_finished_node.Pass());
- SetTaskGraph(&new_graph);
- raster_finished_task_.swap(new_raster_finished_task);
+void RasterWorkerPool::OnRasterRequiredForActivationFinished(
+ const internal::WorkerPoolTask* source) {
+ TRACE_EVENT0("cc", "RasterWorkerPool::OnRasterRequiredForActivationFinished");
+
+ // Early out if current |raster_required_for_activation_finished_task_|
+ // is not the source.
+ if (source != raster_required_for_activation_finished_task_.get())
+ return;
+
+ OnRasterTasksRequiredForActivationFinished();
}
-bool RasterWorkerPool::IsRasterTaskRequiredForActivation(
- internal::RasterWorkerPoolTask* task) const {
- return
- raster_tasks_required_for_activation_.find(task) !=
- raster_tasks_required_for_activation_.end();
+scoped_ptr<base::Value> RasterWorkerPool::ScheduledStateAsValue() const {
+ scoped_ptr<base::DictionaryValue> scheduled_state(new base::DictionaryValue);
+ scheduled_state->SetInteger("task_count", raster_tasks_.size());
+ scheduled_state->SetInteger("task_required_for_activation_count",
+ raster_tasks_required_for_activation_.size());
+ return scheduled_state.PassAs<base::Value>();
}
-void RasterWorkerPool::OnRasterFinished(int64 schedule_raster_tasks_count) {
- TRACE_EVENT1("cc", "RasterWorkerPool::OnRasterFinished",
- "schedule_raster_tasks_count", schedule_raster_tasks_count);
- DCHECK_GE(schedule_raster_tasks_count_, schedule_raster_tasks_count);
- // Call OnRasterTasksFinished() when we've finished running all raster
- // tasks needed since last time SetRasterTaskGraph() was called.
- if (schedule_raster_tasks_count_ == schedule_raster_tasks_count)
- OnRasterTasksFinished();
+// static
+internal::GraphNode* RasterWorkerPool::CreateGraphNodeForTask(
+ internal::WorkerPoolTask* task,
+ unsigned priority,
+ TaskGraph* graph) {
+ internal::GraphNode* node = new internal::GraphNode(task, priority);
+ DCHECK(graph->find(task) == graph->end());
+ graph->set(task, make_scoped_ptr(node));
+ return node;
+}
+
+// static
+internal::GraphNode* RasterWorkerPool::CreateGraphNodeForRasterTask(
+ internal::WorkerPoolTask* raster_task,
+ const TaskVector& decode_tasks,
+ unsigned priority,
+ TaskGraph* graph) {
+ DCHECK(!raster_task->HasCompleted());
+
+ internal::GraphNode* raster_node = CreateGraphNodeForTask(
+ raster_task, priority, graph);
+
+ // Insert image decode tasks.
+ for (TaskVector::const_iterator it = decode_tasks.begin();
+ it != decode_tasks.end(); ++it) {
+ internal::WorkerPoolTask* decode_task = it->get();
+
+ // Skip if already decoded.
+ if (decode_task->HasCompleted())
+ continue;
+
+ raster_node->add_dependency();
+
+ // Check if decode task already exists in graph.
+ GraphNodeMap::iterator decode_it = graph->find(decode_task);
+ if (decode_it != graph->end()) {
+ internal::GraphNode* decode_node = decode_it->second;
+ decode_node->add_dependent(raster_node);
+ continue;
+ }
+
+ internal::GraphNode* decode_node = CreateGraphNodeForTask(
+ decode_task, priority, graph);
+ decode_node->add_dependent(raster_node);
+ }
+
+ return raster_node;
}
} // namespace cc
diff --git a/cc/resources/raster_worker_pool.h b/cc/resources/raster_worker_pool.h
index 3101376..0ad519e 100644
--- a/cc/resources/raster_worker_pool.h
+++ b/cc/resources/raster_worker_pool.h
@@ -105,6 +105,8 @@ struct RasterTaskMetadata {
class CC_EXPORT RasterWorkerPoolClient {
public:
virtual bool ShouldForceTasksRequiredForActivationToComplete() const = 0;
+ virtual void DidFinishedRunningTasks() = 0;
+ virtual void DidFinishedRunningTasksRequiredForActivation() = 0;
protected:
virtual ~RasterWorkerPoolClient() {}
@@ -230,31 +232,12 @@ class CC_EXPORT RasterWorkerPool : public WorkerPool {
typedef base::hash_map<TaskMapKey,
scoped_refptr<internal::WorkerPoolTask> > TaskMap;
- class CC_EXPORT RasterTaskGraph {
- public:
- RasterTaskGraph();
- ~RasterTaskGraph();
-
- void InsertRasterTask(internal::WorkerPoolTask* raster_task,
- const TaskVector& decode_tasks);
-
- private:
- friend class RasterWorkerPool;
-
- TaskGraph graph_;
- scoped_refptr<internal::WorkerPoolTask> raster_finished_task_;
- scoped_ptr<GraphNode> raster_finished_node_;
- unsigned next_priority_;
-
- DISALLOW_COPY_AND_ASSIGN(RasterTaskGraph);
- };
-
RasterWorkerPool(ResourceProvider* resource_provider, size_t num_threads);
- virtual void OnRasterTasksFinished() {}
+ virtual void OnRasterTasksFinished() = 0;
+ virtual void OnRasterTasksRequiredForActivationFinished() = 0;
void SetRasterTasks(RasterTask::Queue* queue);
- void SetRasterTaskGraph(RasterTaskGraph* graph);
bool IsRasterTaskRequiredForActivation(
internal::RasterWorkerPoolTask* task) const;
@@ -263,9 +246,38 @@ class CC_EXPORT RasterWorkerPool : public WorkerPool {
const RasterTask::Queue::TaskVector& raster_tasks() const {
return raster_tasks_;
}
+ void set_raster_finished_task(
+ scoped_refptr<internal::WorkerPoolTask> raster_finished_task) {
+ raster_finished_task_ = raster_finished_task;
+ }
+ void set_raster_required_for_activation_finished_task(
+ scoped_refptr<internal::WorkerPoolTask>
+ raster_required_for_activation_finished_task) {
+ raster_required_for_activation_finished_task_ =
+ raster_required_for_activation_finished_task;
+ }
+
+ scoped_refptr<internal::WorkerPoolTask> CreateRasterFinishedTask();
+ scoped_refptr<internal::WorkerPoolTask>
+ CreateRasterRequiredForActivationFinishedTask();
+
+ scoped_ptr<base::Value> ScheduledStateAsValue() const;
+
+ static internal::GraphNode* CreateGraphNodeForTask(
+ internal::WorkerPoolTask* task,
+ unsigned priority,
+ TaskGraph* graph);
+
+ static internal::GraphNode* CreateGraphNodeForRasterTask(
+ internal::WorkerPoolTask* raster_task,
+ const TaskVector& decode_tasks,
+ unsigned priority,
+ TaskGraph* graph);
private:
- void OnRasterFinished(int64 schedule_raster_tasks_count);
+ void OnRasterFinished(const internal::WorkerPoolTask* source);
+ void OnRasterRequiredForActivationFinished(
+ const internal::WorkerPoolTask* source);
RasterWorkerPoolClient* client_;
ResourceProvider* resource_provider_;
@@ -274,7 +286,8 @@ class CC_EXPORT RasterWorkerPool : public WorkerPool {
base::WeakPtrFactory<RasterWorkerPool> weak_ptr_factory_;
scoped_refptr<internal::WorkerPoolTask> raster_finished_task_;
- int64 schedule_raster_tasks_count_;
+ scoped_refptr<internal::WorkerPoolTask>
+ raster_required_for_activation_finished_task_;
};
} // namespace cc
diff --git a/cc/resources/raster_worker_pool_perftest.cc b/cc/resources/raster_worker_pool_perftest.cc
index f991845..c8f1d84 100644
--- a/cc/resources/raster_worker_pool_perftest.cc
+++ b/cc/resources/raster_worker_pool_perftest.cc
@@ -38,6 +38,12 @@ class PerfRasterWorkerPool : public RasterWorkerPool {
virtual void ScheduleTasks(RasterTask::Queue* queue) OVERRIDE {
NOTREACHED();
}
+ virtual void OnRasterTasksFinished() OVERRIDE {
+ NOTREACHED();
+ }
+ virtual void OnRasterTasksRequiredForActivationFinished() OVERRIDE {
+ NOTREACHED();
+ }
void SetRasterTasks(RasterTask::Queue* queue) {
RasterWorkerPool::SetRasterTasks(queue);
@@ -56,7 +62,24 @@ class PerfRasterWorkerPool : public RasterWorkerPool {
}
void BuildTaskGraph() {
- RasterTaskGraph graph;
+ unsigned priority = 0;
+ TaskGraph graph;
+
+ scoped_refptr<internal::WorkerPoolTask>
+ raster_required_for_activation_finished_task(
+ CreateRasterRequiredForActivationFinishedTask());
+ internal::GraphNode* raster_required_for_activation_finished_node =
+ CreateGraphNodeForTask(
+ raster_required_for_activation_finished_task.get(),
+ priority++,
+ &graph);
+
+ scoped_refptr<internal::WorkerPoolTask> raster_finished_task(
+ CreateRasterFinishedTask());
+ internal::GraphNode* raster_finished_node =
+ CreateGraphNodeForTask(raster_finished_task.get(),
+ priority++,
+ &graph);
for (RasterTaskVector::const_iterator it = raster_tasks().begin();
it != raster_tasks().end(); ++it) {
@@ -66,7 +89,21 @@ class PerfRasterWorkerPool : public RasterWorkerPool {
DCHECK(perf_it != perf_tasks_.end());
if (perf_it != perf_tasks_.end()) {
internal::WorkerPoolTask* perf_task = perf_it->second.get();
- graph.InsertRasterTask(perf_task, task->dependencies());
+
+ internal::GraphNode* perf_node =
+ CreateGraphNodeForRasterTask(perf_task,
+ task->dependencies(),
+ priority++,
+ &graph);
+
+ if (IsRasterTaskRequiredForActivation(task)) {
+ raster_required_for_activation_finished_node->add_dependency();
+ perf_node->add_dependent(
+ raster_required_for_activation_finished_node);
+ }
+
+ raster_finished_node->add_dependency();
+ perf_node->add_dependent(raster_finished_node);
}
}
}
diff --git a/cc/resources/raster_worker_pool_unittest.cc b/cc/resources/raster_worker_pool_unittest.cc
index 7b82b3f..7e794cc 100644
--- a/cc/resources/raster_worker_pool_unittest.cc
+++ b/cc/resources/raster_worker_pool_unittest.cc
@@ -79,6 +79,8 @@ class RasterWorkerPoolTest : public testing::Test,
OVERRIDE {
return false;
}
+ virtual void DidFinishedRunningTasks() OVERRIDE {}
+ virtual void DidFinishedRunningTasksRequiredForActivation() OVERRIDE {}
virtual void BeginTest() = 0;
virtual void AfterTest() = 0;
diff --git a/cc/resources/tile.h b/cc/resources/tile.h
index 497aff71..625f95c 100644
--- a/cc/resources/tile.h
+++ b/cc/resources/tile.h
@@ -107,18 +107,6 @@ class CC_EXPORT Tile : public base::RefCounted<Tile> {
picture_pile_ = pile;
}
- // For test only methods.
- bool HasRasterTaskForTesting() const {
- for (int mode = 0; mode < NUM_RASTER_MODES; ++mode) {
- if (!managed_state().tile_versions[mode].raster_task_.is_null())
- return true;
- }
- return false;
- }
- void ResetRasterTaskForTesting() {
- for (int mode = 0; mode < NUM_RASTER_MODES; ++mode)
- managed_state().tile_versions[mode].raster_task_.Reset();
- }
RasterMode GetRasterModeForTesting() const {
return managed_state().raster_mode;
}
@@ -126,6 +114,7 @@ class CC_EXPORT Tile : public base::RefCounted<Tile> {
private:
// Methods called by by tile manager.
friend class TileManager;
+ friend class FakeTileManager;
friend class BinComparator;
ManagedTileState& managed_state() { return managed_state_; }
const ManagedTileState& managed_state() const { return managed_state_; }
diff --git a/cc/resources/tile_manager.cc b/cc/resources/tile_manager.cc
index 3eb7bc2..54e5ed9 100644
--- a/cc/resources/tile_manager.cc
+++ b/cc/resources/tile_manager.cc
@@ -166,6 +166,7 @@ void TileManager::UnregisterTile(Tile* tile) {
tiles_that_need_to_be_rasterized_.erase(raster_iter);
tiles_that_need_to_be_initialized_for_activation_.erase(tile);
+ oom_tiles_that_need_to_be_initialized_for_activation_.erase(tile);
DCHECK(std::find(tiles_.begin(), tiles_.end(), tile) != tiles_.end());
FreeResourcesForTile(tile);
@@ -176,6 +177,54 @@ bool TileManager::ShouldForceTasksRequiredForActivationToComplete() const {
return GlobalState().tree_priority != SMOOTHNESS_TAKES_PRIORITY;
}
+void TileManager::DidFinishedRunningTasks() {
+ // When OOM, keep re-assigning memory until we reach a steady state
+ // where top-priority tiles are initialized.
+ if (!memory_stats_from_last_assign_.bytes_over)
+ return;
+
+ raster_worker_pool_->CheckForCompletedTasks();
+
+ AssignGpuMemoryToTiles();
+
+ if (!oom_tiles_that_need_to_be_initialized_for_activation_.empty())
+ ReassignGpuMemoryToOOMTilesRequiredForActivation();
+
+ // |tiles_that_need_to_be_rasterized_| will be empty when we reach a
+ // steady memory state. Keep scheduling tasks until we reach this state.
+ if (!tiles_that_need_to_be_rasterized_.empty()) {
+ ScheduleTasks();
+ return;
+ }
+
+ // Use on-demand raster for any tiles that have not been been assigned
+ // memory after reaching a steady memory state.
+ for (TileSet::iterator it =
+ oom_tiles_that_need_to_be_initialized_for_activation_.begin();
+ it != oom_tiles_that_need_to_be_initialized_for_activation_.end();
+ ++it) {
+ Tile* tile = *it;
+ ManagedTileState& mts = tile->managed_state();
+ mts.tile_versions[mts.raster_mode].set_rasterize_on_demand();
+ }
+ oom_tiles_that_need_to_be_initialized_for_activation_.clear();
+
+ DCHECK_EQ(0u, tiles_that_need_to_be_initialized_for_activation_.size());
+ client_->NotifyReadyToActivate();
+}
+
+void TileManager::DidFinishedRunningTasksRequiredForActivation() {
+ // This is only a true indication that all tiles required for
+ // activation are initialized when no tiles are OOM. We need to
+ // wait for DidFinishRunningTasks() to be called, try to re-assign
+ // memory and in worst case use on-demand raster when tiles
+ // required for activation are OOM.
+ if (!oom_tiles_that_need_to_be_initialized_for_activation_.empty())
+ return;
+
+ client_->NotifyReadyToActivate();
+}
+
class BinComparator {
public:
bool operator() (const Tile* a, const Tile* b) const {
@@ -302,10 +351,6 @@ void TileManager::ManageTiles() {
AssignGpuMemoryToTiles();
CleanUpUnusedImageDecodeTasks();
- // This could have changed after AssignGpuMemoryToTiles.
- if (AreTilesRequiredForActivationReady())
- client_->NotifyReadyToActivate();
-
TRACE_EVENT_INSTANT1(
"cc", "DidManage", TRACE_EVENT_SCOPE_THREAD,
"state", TracedValue::FromValue(BasicStateAsValue().release()));
@@ -316,6 +361,11 @@ void TileManager::ManageTiles() {
void TileManager::CheckForCompletedTileUploads() {
raster_worker_pool_->CheckForCompletedTasks();
+
+ if (did_initialize_visible_tile_) {
+ client_->DidInitializeVisibleTile();
+ did_initialize_visible_tile_ = false;
+ }
}
void TileManager::GetMemoryStats(
@@ -413,6 +463,7 @@ void TileManager::AssignGpuMemoryToTiles() {
// the needs-to-be-rasterized queue.
tiles_that_need_to_be_rasterized_.clear();
tiles_that_need_to_be_initialized_for_activation_.clear();
+ oom_tiles_that_need_to_be_initialized_for_activation_.clear();
size_t bytes_releasable = 0;
for (TileVector::const_iterator it = tiles_.begin();
@@ -435,10 +486,9 @@ void TileManager::AssignGpuMemoryToTiles() {
size_t bytes_allocatable =
std::max(static_cast<int64>(0), bytes_available);
- size_t bytes_that_exceeded_memory_budget_in_now_bin = 0;
+ size_t bytes_that_exceeded_memory_budget = 0;
size_t bytes_left = bytes_allocatable;
- size_t bytes_oom_in_now_bin_on_pending_tree = 0;
- TileVector tiles_requiring_memory_but_oomed;
+ size_t bytes_oom_tiles_that_need_to_be_initialized_for_activation = 0;
bool higher_priority_tile_oomed = false;
for (TileVector::iterator it = tiles_.begin();
it != tiles_.end();
@@ -476,13 +526,22 @@ void TileManager::AssignGpuMemoryToTiles() {
// Tile is OOM.
if (tile_bytes > bytes_left) {
- mts.tile_versions[mts.raster_mode].set_rasterize_on_demand();
- if (mts.tree_bin[PENDING_TREE] == NOW_BIN) {
- tiles_requiring_memory_but_oomed.push_back(tile);
- bytes_oom_in_now_bin_on_pending_tree += tile_bytes;
+ if (tile->required_for_activation()) {
+ // Immediately mark tiles for on-demand raster once the amount
+ // of memory for oom tiles required for activation exceeds our
+ // memory limit.
+ if (bytes_oom_tiles_that_need_to_be_initialized_for_activation <
+ global_state_.memory_limit_in_bytes) {
+ oom_tiles_that_need_to_be_initialized_for_activation_.insert(tile);
+ bytes_oom_tiles_that_need_to_be_initialized_for_activation +=
+ tile_bytes;
+ } else {
+ tile_version.set_rasterize_on_demand();
+ }
}
FreeResourcesForTile(tile);
higher_priority_tile_oomed = true;
+ bytes_that_exceeded_memory_budget += tile_bytes;
continue;
}
@@ -507,85 +566,94 @@ void TileManager::AssignGpuMemoryToTiles() {
}
}
+ ever_exceeded_memory_budget_ |= bytes_that_exceeded_memory_budget > 0;
+ if (ever_exceeded_memory_budget_) {
+ TRACE_COUNTER_ID2("cc", "over_memory_budget", this,
+ "budget", global_state_.memory_limit_in_bytes,
+ "over", bytes_that_exceeded_memory_budget);
+ }
+ memory_stats_from_last_assign_.total_budget_in_bytes =
+ global_state_.memory_limit_in_bytes;
+ memory_stats_from_last_assign_.bytes_allocated =
+ bytes_allocatable - bytes_left;
+ memory_stats_from_last_assign_.bytes_unreleasable =
+ bytes_allocatable - bytes_releasable;
+ memory_stats_from_last_assign_.bytes_over =
+ bytes_that_exceeded_memory_budget;
+}
+
+void TileManager::ReassignGpuMemoryToOOMTilesRequiredForActivation() {
+ TRACE_EVENT0(
+ "cc", "TileManager::ReassignGpuMemoryToOOMTilesRequiredForActivation");
+
+ size_t bytes_oom_for_required_tiles = 0;
+ TileVector tiles_requiring_memory_but_oomed;
+ for (TileVector::iterator it = tiles_.begin(); it != tiles_.end(); ++it) {
+ Tile* tile = *it;
+ if (oom_tiles_that_need_to_be_initialized_for_activation_.find(tile) ==
+ oom_tiles_that_need_to_be_initialized_for_activation_.end())
+ continue;
+
+ tiles_requiring_memory_but_oomed.push_back(tile);
+ bytes_oom_for_required_tiles += tile->bytes_consumed_if_allocated();
+ }
+
+ if (tiles_requiring_memory_but_oomed.empty())
+ return;
+
// In OOM situation, we iterate tiles_, remove the memory for active tree
- // and not the now bin. And give them to bytes_oom_in_now_bin_on_pending_tree
- if (!tiles_requiring_memory_but_oomed.empty()) {
- size_t bytes_freed = 0;
- for (TileVector::iterator it = tiles_.begin(); it != tiles_.end(); ++it) {
- Tile* tile = *it;
- ManagedTileState& mts = tile->managed_state();
+ // and not the now bin. And give them to bytes_oom_for_required_tiles
+ size_t bytes_freed = 0;
+ for (TileVector::reverse_iterator it = tiles_.rbegin();
+ it != tiles_.rend(); ++it) {
+ Tile* tile = *it;
+ ManagedTileState& mts = tile->managed_state();
+ if (mts.tree_bin[PENDING_TREE] == NEVER_BIN &&
+ mts.tree_bin[ACTIVE_TREE] != NOW_BIN) {
ManagedTileState::TileVersion& tile_version =
mts.tile_versions[mts.raster_mode];
- if (mts.tree_bin[PENDING_TREE] == NEVER_BIN &&
- mts.tree_bin[ACTIVE_TREE] != NOW_BIN) {
- size_t bytes_that_can_be_freed = 0;
-
- // If the tile is in the to-rasterize list, but it has no task,
- // then it means that we have assigned memory for it.
- TileVector::iterator raster_it =
- std::find(tiles_that_need_to_be_rasterized_.begin(),
- tiles_that_need_to_be_rasterized_.end(),
- tile);
- if (raster_it != tiles_that_need_to_be_rasterized_.end() &&
- tile_version.raster_task_.is_null()) {
- bytes_that_can_be_freed += tile->bytes_consumed_if_allocated();
- }
- // Also consider all of the completed resources for freeing.
- for (int mode = 0; mode < NUM_RASTER_MODES; ++mode) {
- ManagedTileState::TileVersion& tile_version =
- mts.tile_versions[mode];
- if (tile_version.resource_) {
- DCHECK(!tile->required_for_activation());
- bytes_that_can_be_freed += tile->bytes_consumed_if_allocated();
- }
- }
+ // If the tile is in the to-rasterize list, but it has no task,
+ // then it means that we have assigned memory for it.
+ TileVector::iterator raster_it =
+ std::find(tiles_that_need_to_be_rasterized_.begin(),
+ tiles_that_need_to_be_rasterized_.end(),
+ tile);
+ if (raster_it != tiles_that_need_to_be_rasterized_.end() &&
+ tile_version.raster_task_.is_null()) {
+ bytes_freed += tile->bytes_consumed_if_allocated();
+ tiles_that_need_to_be_rasterized_.erase(raster_it);
+ }
- // If we can free anything, then do so.
- if (bytes_that_can_be_freed > 0) {
- FreeResourcesForTile(tile);
- bytes_freed += bytes_that_can_be_freed;
- mts.tile_versions[mts.raster_mode].set_rasterize_on_demand();
- if (raster_it != tiles_that_need_to_be_rasterized_.end())
- tiles_that_need_to_be_rasterized_.erase(raster_it);
+ // Also consider all of the completed resources for freeing.
+ for (int mode = 0; mode < NUM_RASTER_MODES; ++mode) {
+ if (mts.tile_versions[mode].resource_) {
+ DCHECK(!tile->required_for_activation());
+ FreeResourceForTile(tile, static_cast<RasterMode>(mode));
+ bytes_freed += tile->bytes_consumed_if_allocated();
}
}
-
- if (bytes_oom_in_now_bin_on_pending_tree <= bytes_freed)
- break;
}
- for (TileVector::iterator it = tiles_requiring_memory_but_oomed.begin();
- it != tiles_requiring_memory_but_oomed.end() && bytes_freed > 0;
- ++it) {
- Tile* tile = *it;
- ManagedTileState& mts = tile->managed_state();
- size_t bytes_needed = tile->bytes_consumed_if_allocated();
- if (bytes_needed > bytes_freed)
- continue;
- mts.tile_versions[mts.raster_mode].set_use_resource();
- bytes_freed -= bytes_needed;
- tiles_that_need_to_be_rasterized_.push_back(tile);
- if (tile->required_for_activation())
- AddRequiredTileForActivation(tile);
- }
+ if (bytes_oom_for_required_tiles <= bytes_freed)
+ break;
}
- ever_exceeded_memory_budget_ |=
- bytes_that_exceeded_memory_budget_in_now_bin > 0;
- if (ever_exceeded_memory_budget_) {
- TRACE_COUNTER_ID2("cc", "over_memory_budget", this,
- "budget", global_state_.memory_limit_in_bytes,
- "over", bytes_that_exceeded_memory_budget_in_now_bin);
+ for (TileVector::iterator it = tiles_requiring_memory_but_oomed.begin();
+ it != tiles_requiring_memory_but_oomed.end() && bytes_freed > 0;
+ ++it) {
+ Tile* tile = *it;
+ ManagedTileState& mts = tile->managed_state();
+ size_t bytes_needed = tile->bytes_consumed_if_allocated();
+ if (bytes_needed > bytes_freed)
+ continue;
+ mts.tile_versions[mts.raster_mode].set_use_resource();
+ bytes_freed -= bytes_needed;
+ tiles_that_need_to_be_rasterized_.push_back(tile);
+ DCHECK(tile->required_for_activation());
+ AddRequiredTileForActivation(tile);
+ oom_tiles_that_need_to_be_initialized_for_activation_.erase(tile);
}
- memory_stats_from_last_assign_.total_budget_in_bytes =
- global_state_.memory_limit_in_bytes;
- memory_stats_from_last_assign_.bytes_allocated =
- bytes_allocatable - bytes_left;
- memory_stats_from_last_assign_.bytes_unreleasable =
- bytes_allocatable - bytes_releasable;
- memory_stats_from_last_assign_.bytes_over =
- bytes_that_exceeded_memory_budget_in_now_bin;
}
void TileManager::CleanUpUnusedImageDecodeTasks() {
@@ -636,7 +704,8 @@ void TileManager::FreeUnusedResourcesForTile(Tile* tile) {
}
void TileManager::ScheduleTasks() {
- TRACE_EVENT0("cc", "TileManager::ScheduleTasks");
+ TRACE_EVENT1("cc", "TileManager::ScheduleTasks",
+ "count", tiles_that_need_to_be_rasterized_.size());
RasterWorkerPool::RasterTask::Queue tasks;
// Build a new task queue containing all task currently needed. Tasks
@@ -666,8 +735,6 @@ void TileManager::ScheduleTasks() {
RasterWorkerPool::Task TileManager::CreateImageDecodeTask(
Tile* tile, skia::LazyPixelRef* pixel_ref) {
- TRACE_EVENT0("cc", "TileManager::CreateImageDecodeTask");
-
return RasterWorkerPool::CreateImageDecodeTask(
pixel_ref,
tile->layer_id(),
@@ -692,8 +759,6 @@ RasterTaskMetadata TileManager::GetRasterTaskMetadata(
}
RasterWorkerPool::RasterTask TileManager::CreateRasterTask(Tile* tile) {
- TRACE_EVENT0("cc", "TileManager::CreateRasterTask");
-
ManagedTileState& mts = tile->managed_state();
scoped_ptr<ResourcePool::Resource> resource =
@@ -806,9 +871,6 @@ void TileManager::DidFinishTileInitialization(Tile* tile) {
// if it was marked as being required after being dispatched for
// rasterization but before AssignGPUMemory was called again.
tiles_that_need_to_be_initialized_for_activation_.erase(tile);
-
- if (AreTilesRequiredForActivationReady())
- client_->NotifyReadyToActivate();
}
}
diff --git a/cc/resources/tile_manager.h b/cc/resources/tile_manager.h
index 7bec1c4..2633a0e 100644
--- a/cc/resources/tile_manager.h
+++ b/cc/resources/tile_manager.h
@@ -87,7 +87,8 @@ class CC_EXPORT TileManager : public RasterWorkerPoolClient {
}
bool AreTilesRequiredForActivationReady() const {
- return tiles_that_need_to_be_initialized_for_activation_.empty();
+ return tiles_that_need_to_be_initialized_for_activation_.empty() &&
+ oom_tiles_that_need_to_be_initialized_for_activation_.empty();
}
protected:
@@ -106,10 +107,18 @@ class CC_EXPORT TileManager : public RasterWorkerPoolClient {
// Overriden from RasterWorkerPoolClient:
virtual bool ShouldForceTasksRequiredForActivationToComplete() const
OVERRIDE;
+ virtual void DidFinishedRunningTasks() OVERRIDE;
+ virtual void DidFinishedRunningTasksRequiredForActivation() OVERRIDE;
// Virtual for test
virtual void ScheduleTasks();
+ const std::vector<Tile*>& tiles_that_need_to_be_rasterized() const {
+ return tiles_that_need_to_be_rasterized_;
+ }
+
+ void ReassignGpuMemoryToOOMTilesRequiredForActivation();
+
private:
void OnImageDecodeTaskCompleted(
int layer_id,
@@ -151,6 +160,7 @@ class CC_EXPORT TileManager : public RasterWorkerPoolClient {
TileVector tiles_that_need_to_be_rasterized_;
typedef std::set<Tile*> TileSet;
TileSet tiles_that_need_to_be_initialized_for_activation_;
+ TileSet oom_tiles_that_need_to_be_initialized_for_activation_;
bool ever_exceeded_memory_budget_;
MemoryHistory::Entry memory_stats_from_last_assign_;
diff --git a/cc/resources/tile_manager_unittest.cc b/cc/resources/tile_manager_unittest.cc
index 1014c98..29e45ba 100644
--- a/cc/resources/tile_manager_unittest.cc
+++ b/cc/resources/tile_manager_unittest.cc
@@ -49,6 +49,16 @@ class TilePriorityForNowBin : public TilePriority {
0) {}
};
+class TilePriorityRequiredForActivation : public TilePriority {
+ public:
+ TilePriorityRequiredForActivation() : TilePriority(
+ HIGH_RESOLUTION,
+ 0,
+ 0) {
+ required_for_activation = true;
+ }
+};
+
class TileManagerTest : public testing::Test {
public:
typedef std::vector<scoped_refptr<Tile> > TileVector;
@@ -110,9 +120,8 @@ class TileManagerTest : public testing::Test {
for (TileVector::const_iterator it = tiles.begin();
it != tiles.end();
++it) {
- if ((*it)->HasRasterTaskForTesting())
+ if (tile_manager_->HasBeenAssignedMemory(*it))
++has_memory_count;
- (*it)->ResetRasterTaskForTesting();
}
return has_memory_count;
}
@@ -124,7 +133,6 @@ class TileManagerTest : public testing::Test {
++it) {
if ((*it)->GetRasterModeForTesting() == HIGH_QUALITY_RASTER_MODE)
++has_lcd_count;
- (*it)->ResetRasterTaskForTesting();
}
return has_lcd_count;
}
@@ -222,18 +230,24 @@ TEST_F(TileManagerTest, EnoughMemoryAllowNothing) {
}
TEST_F(TileManagerTest, PartialOOMMemoryToPending) {
- // 5 tiles on active tree eventually bin, 5 tiles on pending tree now bin,
- // but only enough memory for 8 tiles. The result is all pending tree tiles
- // get memory, and 3 of the active tree tiles get memory.
+ // 5 tiles on active tree eventually bin, 5 tiles on pending tree that are
+ // required for activation, but only enough memory for 8 tiles. The result
+ // is all pending tree tiles get memory, and 3 of the active tree tiles
+ // get memory.
Initialize(8, ALLOW_ANYTHING, SMOOTHNESS_TAKES_PRIORITY);
TileVector active_tree_tiles =
CreateTiles(5, TilePriorityForEventualBin(), TilePriority());
TileVector pending_tree_tiles =
- CreateTiles(5, TilePriority(), TilePriorityForNowBin());
+ CreateTiles(5, TilePriority(), TilePriorityRequiredForActivation());
tile_manager()->ManageTiles();
+ EXPECT_EQ(5, AssignedMemoryCount(active_tree_tiles));
+ EXPECT_EQ(3, AssignedMemoryCount(pending_tree_tiles));
+
+ tile_manager()->ReassignMemoryToOOMTilesRequiredForActivation();
+
EXPECT_EQ(3, AssignedMemoryCount(active_tree_tiles));
EXPECT_EQ(5, AssignedMemoryCount(pending_tree_tiles));
}
@@ -256,35 +270,47 @@ TEST_F(TileManagerTest, PartialOOMMemoryToActive) {
}
TEST_F(TileManagerTest, TotalOOMMemoryToPending) {
- // 5 tiles on active tree eventually bin, 5 tiles on pending tree now bin,
- // but only enough memory for 4 tiles. The result is 4 pending tree tiles
- // get memory, and none of the active tree tiles get memory.
+ // 5 tiles on active tree eventually bin, 5 tiles on pending tree that are
+ // required for activation, but only enough memory for 4 tiles. The result
+ // is 4 pending tree tiles get memory, and none of the active tree tiles
+ // get memory.
Initialize(4, ALLOW_ANYTHING, SMOOTHNESS_TAKES_PRIORITY);
TileVector active_tree_tiles =
CreateTiles(5, TilePriorityForEventualBin(), TilePriority());
TileVector pending_tree_tiles =
- CreateTiles(5, TilePriority(), TilePriorityForNowBin());
+ CreateTiles(5, TilePriority(), TilePriorityRequiredForActivation());
tile_manager()->ManageTiles();
+ EXPECT_EQ(4, AssignedMemoryCount(active_tree_tiles));
+ EXPECT_EQ(0, AssignedMemoryCount(pending_tree_tiles));
+
+ tile_manager()->ReassignMemoryToOOMTilesRequiredForActivation();
+
EXPECT_EQ(0, AssignedMemoryCount(active_tree_tiles));
EXPECT_EQ(4, AssignedMemoryCount(pending_tree_tiles));
}
TEST_F(TileManagerTest, TotalOOMActiveSoonMemoryToPending) {
- // 5 tiles on active tree soon bin, 5 tiles on pending tree now bin,
- // but only enough memory for 4 tiles. The result is 4 pending tree tiles
- // get memory, and none of the active tree tiles get memory.
+ // 5 tiles on active tree soon bin, 5 tiles on pending tree that are
+ // required for activation, but only enough memory for 4 tiles. The result
+ // is 4 pending tree tiles get memory, and none of the active tree tiles
+ // get memory.
Initialize(4, ALLOW_ANYTHING, SMOOTHNESS_TAKES_PRIORITY);
TileVector active_tree_tiles =
CreateTiles(5, TilePriorityForSoonBin(), TilePriority());
TileVector pending_tree_tiles =
- CreateTiles(5, TilePriority(), TilePriorityForNowBin());
+ CreateTiles(5, TilePriority(), TilePriorityRequiredForActivation());
tile_manager()->ManageTiles();
+ EXPECT_EQ(4, AssignedMemoryCount(active_tree_tiles));
+ EXPECT_EQ(0, AssignedMemoryCount(pending_tree_tiles));
+
+ tile_manager()->ReassignMemoryToOOMTilesRequiredForActivation();
+
EXPECT_EQ(0, AssignedMemoryCount(active_tree_tiles));
EXPECT_EQ(4, AssignedMemoryCount(pending_tree_tiles));
}
diff --git a/cc/resources/worker_pool.cc b/cc/resources/worker_pool.cc
index c2dba6c..b4e96f6 100644
--- a/cc/resources/worker_pool.cc
+++ b/cc/resources/worker_pool.cc
@@ -64,6 +64,15 @@ bool WorkerPoolTask::HasCompleted() const {
return did_complete_;
}
+GraphNode::GraphNode(internal::WorkerPoolTask* task, unsigned priority)
+ : task_(task),
+ priority_(priority),
+ num_dependencies_(0) {
+}
+
+GraphNode::~GraphNode() {
+}
+
} // namespace internal
// Internal to the worker pool. Any data or logic that needs to be
@@ -90,8 +99,8 @@ class WorkerPool::Inner : public base::DelegateSimpleThread::Delegate {
private:
class PriorityComparator {
public:
- bool operator()(const GraphNode* a,
- const GraphNode* b) {
+ bool operator()(const internal::GraphNode* a,
+ const internal::GraphNode* b) {
// In this system, numerically lower priority is run first.
if (a->priority() != b->priority())
return a->priority() > b->priority();
@@ -125,8 +134,8 @@ class WorkerPool::Inner : public base::DelegateSimpleThread::Delegate {
GraphNodeMap pending_tasks_;
// Ordered set of tasks that are ready to run.
- typedef std::priority_queue<GraphNode*,
- std::vector<GraphNode*>,
+ typedef std::priority_queue<internal::GraphNode*,
+ std::vector<internal::GraphNode*>,
PriorityComparator> TaskQueue;
TaskQueue ready_to_run_tasks_;
@@ -216,11 +225,13 @@ void WorkerPool::Inner::SetTaskGraph(TaskGraph* graph) {
it != completed_tasks_.end(); ++it) {
internal::WorkerPoolTask* task = it->get();
- scoped_ptr<GraphNode> node = new_pending_tasks.take_and_erase(task);
+ scoped_ptr<internal::GraphNode> node = new_pending_tasks.take_and_erase(
+ task);
if (node) {
- for (GraphNode::Vector::const_iterator it = node->dependents().begin();
+ for (internal::GraphNode::Vector::const_iterator it =
+ node->dependents().begin();
it != node->dependents().end(); ++it) {
- GraphNode* dependent_node = *it;
+ internal::GraphNode* dependent_node = *it;
dependent_node->remove_dependency();
}
}
@@ -243,7 +254,7 @@ void WorkerPool::Inner::SetTaskGraph(TaskGraph* graph) {
it != new_pending_tasks.end(); ++it) {
internal::WorkerPoolTask* task = it->first;
DCHECK(task);
- GraphNode* node = it->second;
+ internal::GraphNode* node = it->second;
// Completed tasks should not exist in |new_pending_tasks|.
DCHECK(!task->HasFinishedRunning());
@@ -336,11 +347,13 @@ void WorkerPool::Inner::Run() {
// Now iterate over all dependents to remove dependency and check
// if they are ready to run.
- scoped_ptr<GraphNode> node = running_tasks_.take_and_erase(task.get());
+ scoped_ptr<internal::GraphNode> node = running_tasks_.take_and_erase(
+ task.get());
if (node) {
- for (GraphNode::Vector::const_iterator it = node->dependents().begin();
+ for (internal::GraphNode::Vector::const_iterator it =
+ node->dependents().begin();
it != node->dependents().end(); ++it) {
- GraphNode* dependent_node = *it;
+ internal::GraphNode* dependent_node = *it;
dependent_node->remove_dependency();
// Task is ready if it has no dependencies. Add it to
@@ -359,15 +372,6 @@ void WorkerPool::Inner::Run() {
has_ready_to_run_tasks_cv_.Signal();
}
-WorkerPool::GraphNode::GraphNode()
- : task_(NULL),
- priority_(0),
- num_dependencies_(0) {
-}
-
-WorkerPool::GraphNode::~GraphNode() {
-}
-
WorkerPool::WorkerPool(size_t num_threads,
const std::string& thread_name_prefix)
: in_dispatch_completion_callbacks_(false),
diff --git a/cc/resources/worker_pool.h b/cc/resources/worker_pool.h
index 57bc774..01fb6252 100644
--- a/cc/resources/worker_pool.h
+++ b/cc/resources/worker_pool.h
@@ -47,6 +47,39 @@ class CC_EXPORT WorkerPoolTask
bool did_complete_;
};
+class CC_EXPORT GraphNode {
+ public:
+ typedef std::vector<GraphNode*> Vector;
+
+ GraphNode(internal::WorkerPoolTask* task, unsigned priority);
+ ~GraphNode();
+
+ WorkerPoolTask* task() { return task_; }
+
+ void add_dependent(GraphNode* dependent) {
+ DCHECK(dependent);
+ dependents_.push_back(dependent);
+ }
+ const Vector& dependents() const { return dependents_; }
+
+ unsigned priority() const { return priority_; }
+
+ unsigned num_dependencies() const { return num_dependencies_; }
+ void add_dependency() { ++num_dependencies_; }
+ void remove_dependency() {
+ DCHECK(num_dependencies_);
+ --num_dependencies_;
+ }
+
+ private:
+ WorkerPoolTask* task_;
+ Vector dependents_;
+ unsigned priority_;
+ unsigned num_dependencies_;
+
+ DISALLOW_COPY_AND_ASSIGN(GraphNode);
+};
+
} // namespace internal
} // namespace cc
@@ -76,49 +109,12 @@ class CC_EXPORT WorkerPool {
virtual void CheckForCompletedTasks();
protected:
- class CC_EXPORT GraphNode {
- public:
- typedef std::vector<GraphNode*> Vector;
-
- GraphNode();
- ~GraphNode();
-
- void set_task(internal::WorkerPoolTask* task) { task_ = task; }
- internal::WorkerPoolTask* task() { return task_; }
-
- void add_dependent(GraphNode* dependent) {
- DCHECK(dependent);
- dependents_.push_back(dependent);
- }
- const Vector& dependents() const {
- return dependents_;
- }
-
- void set_priority(unsigned priority) { priority_ = priority; }
- unsigned priority() const { return priority_; }
-
- unsigned num_dependencies() const {
- return num_dependencies_;
- }
- void add_dependency() { ++num_dependencies_; }
- void remove_dependency() {
- DCHECK(num_dependencies_);
- --num_dependencies_;
- }
-
- private:
- internal::WorkerPoolTask* task_;
- Vector dependents_;
- unsigned priority_;
- unsigned num_dependencies_;
-
- DISALLOW_COPY_AND_ASSIGN(GraphNode);
- };
// A task graph contains a unique set of tasks with edges between
// dependencies pointing in the direction of the dependents. Each task
// need to be assigned a unique priority and a run count that matches
// the number of dependencies.
- typedef ScopedPtrHashMap<internal::WorkerPoolTask*, GraphNode> GraphNodeMap;
+ typedef ScopedPtrHashMap<internal::WorkerPoolTask*, internal::GraphNode>
+ GraphNodeMap;
typedef GraphNodeMap TaskGraph;
WorkerPool(size_t num_threads, const std::string& thread_name_prefix);
diff --git a/cc/resources/worker_pool_perftest.cc b/cc/resources/worker_pool_perftest.cc
index d708cf5..aee1f24 100644
--- a/cc/resources/worker_pool_perftest.cc
+++ b/cc/resources/worker_pool_perftest.cc
@@ -69,41 +69,31 @@ class PerfWorkerPool : public WorkerPool {
unsigned max_depth,
unsigned num_children_per_node) {
TaskVector tasks;
- unsigned priority = 0u;
TaskGraph graph;
- scoped_ptr<GraphNode> root_node;
- if (root_task) {
- root_node = make_scoped_ptr(new GraphNode);
- root_node->set_task(root_task);
- }
+ scoped_ptr<internal::GraphNode> root_node;
+ if (root_task)
+ root_node = make_scoped_ptr(new internal::GraphNode(root_task, 0u));
- scoped_ptr<GraphNode> leaf_node;
- if (leaf_task) {
- leaf_node = make_scoped_ptr(new GraphNode);
- leaf_node->set_task(leaf_task);
- }
+ scoped_ptr<internal::GraphNode> leaf_node;
+ if (leaf_task)
+ leaf_node = make_scoped_ptr(new internal::GraphNode(leaf_task, 0u));
if (max_depth) {
- priority = BuildTaskGraph(&tasks,
- &graph,
- root_node.get(),
- leaf_node.get(),
- priority,
- 0,
- max_depth,
- num_children_per_node);
+ BuildTaskGraph(&tasks,
+ &graph,
+ root_node.get(),
+ leaf_node.get(),
+ 0,
+ max_depth,
+ num_children_per_node);
}
- if (leaf_node) {
- leaf_node->set_priority(priority++);
+ if (leaf_node)
graph.set(leaf_task, leaf_node.Pass());
- }
- if (root_node) {
- root_node->set_priority(priority++);
+ if (root_node)
graph.set(root_task, root_node.Pass());
- }
SetTaskGraph(&graph);
@@ -113,28 +103,26 @@ class PerfWorkerPool : public WorkerPool {
private:
typedef std::vector<scoped_refptr<internal::WorkerPoolTask> > TaskVector;
- unsigned BuildTaskGraph(TaskVector* tasks,
- TaskGraph* graph,
- GraphNode* dependent_node,
- GraphNode* leaf_node,
- unsigned priority,
- unsigned current_depth,
- unsigned max_depth,
- unsigned num_children_per_node) {
+ void BuildTaskGraph(TaskVector* tasks,
+ TaskGraph* graph,
+ internal::GraphNode* dependent_node,
+ internal::GraphNode* leaf_node,
+ unsigned current_depth,
+ unsigned max_depth,
+ unsigned num_children_per_node) {
scoped_refptr<PerfWorkerPoolTaskImpl> task(new PerfWorkerPoolTaskImpl);
- scoped_ptr<GraphNode> node(new GraphNode);
- node->set_task(task.get());
+ scoped_ptr<internal::GraphNode> node(
+ new internal::GraphNode(task.get(), 0u));
if (current_depth < max_depth) {
for (unsigned i = 0; i < num_children_per_node; ++i) {
- priority = BuildTaskGraph(tasks,
- graph,
- node.get(),
- leaf_node,
- priority,
- current_depth + 1,
- max_depth,
- num_children_per_node);
+ BuildTaskGraph(tasks,
+ graph,
+ node.get(),
+ leaf_node,
+ current_depth + 1,
+ max_depth,
+ num_children_per_node);
}
} else if (leaf_node) {
leaf_node->add_dependent(node.get());
@@ -145,11 +133,8 @@ class PerfWorkerPool : public WorkerPool {
node->add_dependent(dependent_node);
dependent_node->add_dependency();
}
- node->set_priority(priority);
graph->set(task.get(), node.Pass());
tasks->push_back(task.get());
-
- return priority + 1;
}
TaskVector tasks_;
diff --git a/cc/resources/worker_pool_unittest.cc b/cc/resources/worker_pool_unittest.cc
index e2b2491..f7c5a95 100644
--- a/cc/resources/worker_pool_unittest.cc
+++ b/cc/resources/worker_pool_unittest.cc
@@ -77,24 +77,22 @@ class FakeWorkerPool : public WorkerPool {
base::Bind(&FakeWorkerPool::OnTasksCompleted,
base::Unretained(this)),
base::Closure()));
- scoped_ptr<GraphNode> completion_node(new GraphNode);
- completion_node->set_task(new_completion_task.get());
+ scoped_ptr<internal::GraphNode> completion_node(
+ new internal::GraphNode(new_completion_task.get(), 0u));
for (std::vector<Task>::const_iterator it = tasks.begin();
it != tasks.end(); ++it) {
scoped_refptr<FakeWorkerPoolTaskImpl> new_task(
new FakeWorkerPoolTaskImpl(it->callback, it->reply));
- scoped_ptr<GraphNode> node(new GraphNode);
- node->set_task(new_task.get());
- node->set_priority(it->priority);
+ scoped_ptr<internal::GraphNode> node(
+ new internal::GraphNode(new_task.get(), it->priority));
DCHECK(it->dependent_count);
for (unsigned i = 0; i < it->dependent_count; ++i) {
scoped_refptr<FakeWorkerPoolTaskImpl> new_dependent_task(
new FakeWorkerPoolTaskImpl(it->dependent, base::Closure()));
- scoped_ptr<GraphNode> dependent_node(new GraphNode);
- dependent_node->set_task(new_dependent_task.get());
- dependent_node->set_priority(it->priority);
+ scoped_ptr<internal::GraphNode> dependent_node(
+ new internal::GraphNode(new_dependent_task.get(), it->priority));
dependent_node->add_dependent(completion_node.get());
completion_node->add_dependency();
node->add_dependent(dependent_node.get());
diff --git a/cc/test/fake_tile_manager.cc b/cc/test/fake_tile_manager.cc
index 79a5e31..7afda91 100644
--- a/cc/test/fake_tile_manager.cc
+++ b/cc/test/fake_tile_manager.cc
@@ -15,6 +15,8 @@ class FakeRasterWorkerPool : public RasterWorkerPool {
FakeRasterWorkerPool() : RasterWorkerPool(NULL, 1) {}
virtual void ScheduleTasks(RasterTask::Queue* queue) OVERRIDE {}
+ virtual void OnRasterTasksFinished() OVERRIDE {}
+ virtual void OnRasterTasksRequiredForActivationFinished() OVERRIDE {}
};
} // namespace
@@ -35,4 +37,15 @@ FakeTileManager::FakeTileManager(TileManagerClient* client,
1,
NULL,
resource_provider->best_texture_format()) {}
+
+void FakeTileManager::ReassignMemoryToOOMTilesRequiredForActivation() {
+ ReassignGpuMemoryToOOMTilesRequiredForActivation();
+}
+
+bool FakeTileManager::HasBeenAssignedMemory(Tile* tile) {
+ return std::find(tiles_that_need_to_be_rasterized().begin(),
+ tiles_that_need_to_be_rasterized().end(),
+ tile) != tiles_that_need_to_be_rasterized().end();
+}
+
} // namespace cc
diff --git a/cc/test/fake_tile_manager.h b/cc/test/fake_tile_manager.h
index a061ee0..f79ea54 100644
--- a/cc/test/fake_tile_manager.h
+++ b/cc/test/fake_tile_manager.h
@@ -15,6 +15,11 @@ class FakeTileManager : public TileManager {
FakeTileManager(TileManagerClient* client,
ResourceProvider* resource_provider);
+ virtual void ScheduleTasks() OVERRIDE {}
+
+ void ReassignMemoryToOOMTilesRequiredForActivation();
+ bool HasBeenAssignedMemory(Tile* tile);
+
virtual ~FakeTileManager() { }
};
diff --git a/cc/test/layer_tree_test.cc b/cc/test/layer_tree_test.cc
index 1d4a797..6a43505 100644
--- a/cc/test/layer_tree_test.cc
+++ b/cc/test/layer_tree_test.cc
@@ -41,6 +41,10 @@ bool TestHooks::CanActivatePendingTree(LayerTreeHostImpl* host_impl) {
return true;
}
+bool TestHooks::CanActivatePendingTreeIfNeeded(LayerTreeHostImpl* host_impl) {
+ return true;
+}
+
// Adapts LayerTreeHostImpl for test. Runs real code, then invokes test hooks.
class LayerTreeHostImplForTesting : public LayerTreeHostImpl {
public:
@@ -114,7 +118,7 @@ class LayerTreeHostImplForTesting : public LayerTreeHostImpl {
if (!pending_tree())
return;
- if (!test_hooks_->CanActivatePendingTree(this))
+ if (!test_hooks_->CanActivatePendingTreeIfNeeded(this))
return;
LayerTreeHostImpl::ActivatePendingTreeIfNeeded();
diff --git a/cc/test/layer_tree_test.h b/cc/test/layer_tree_test.h
index 1dbb861..958405a7 100644
--- a/cc/test/layer_tree_test.h
+++ b/cc/test/layer_tree_test.h
@@ -64,6 +64,7 @@ class TestHooks : public AnimationDelegate {
virtual void ScheduleComposite() {}
virtual void DidDeferCommit() {}
virtual bool CanActivatePendingTree(LayerTreeHostImpl* host_impl);
+ virtual bool CanActivatePendingTreeIfNeeded(LayerTreeHostImpl* host_impl);
virtual void DidSetVisibleOnImplTree(LayerTreeHostImpl* host_impl,
bool visible) {}
diff --git a/cc/trees/layer_tree_host_impl.cc b/cc/trees/layer_tree_host_impl.cc
index 56562b5..a2585bc 100644
--- a/cc/trees/layer_tree_host_impl.cc
+++ b/cc/trees/layer_tree_host_impl.cc
@@ -1348,8 +1348,6 @@ void LayerTreeHostImpl::ActivatePendingTreeIfNeeded() {
DCHECK(pending_tree_);
CHECK(settings_.impl_side_painting);
- // This call may activate the tree.
- CheckForCompletedTileUploads();
if (!pending_tree_)
return;
diff --git a/cc/trees/layer_tree_host_unittest.cc b/cc/trees/layer_tree_host_unittest.cc
index 849cb38..cb7b68d 100644
--- a/cc/trees/layer_tree_host_unittest.cc
+++ b/cc/trees/layer_tree_host_unittest.cc
@@ -750,6 +750,14 @@ class LayerTreeHostTestFrameTimeUpdatesAfterActivationFails
}
virtual bool CanActivatePendingTree(LayerTreeHostImpl* impl) OVERRIDE {
+ if (frame_ >= 1)
+ return true;
+
+ return false;
+ }
+
+ virtual bool CanActivatePendingTreeIfNeeded(LayerTreeHostImpl* impl)
+ OVERRIDE {
frame_++;
if (frame_ == 1) {
first_frame_time_ = impl->CurrentFrameTimeTicks();
diff --git a/cc/trees/layer_tree_host_unittest_scroll.cc b/cc/trees/layer_tree_host_unittest_scroll.cc
index 040fece..47393f9 100644
--- a/cc/trees/layer_tree_host_unittest_scroll.cc
+++ b/cc/trees/layer_tree_host_unittest_scroll.cc
@@ -570,6 +570,11 @@ class ImplSidePaintingScrollTestSimple : public ImplSidePaintingScrollTest {
return can_activate_;
}
+ virtual bool CanActivatePendingTreeIfNeeded(LayerTreeHostImpl* impl)
+ OVERRIDE {
+ return can_activate_;
+ }
+
virtual void CommitCompleteOnThread(LayerTreeHostImpl* impl) OVERRIDE {
// We force a second draw here of the first commit before activating
// the second commit.