summaryrefslogtreecommitdiffstats
path: root/gpu
diff options
context:
space:
mode:
authorapatrick@chromium.org <apatrick@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-01-27 22:41:25 +0000
committerapatrick@chromium.org <apatrick@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-01-27 22:41:25 +0000
commit9d2942dce0c0f28570714665d7bc714a20712582 (patch)
treecc75f89c82bc3fc9b7deb88b01c917031538904d /gpu
parentaeff803571b1bed52b2f5edcb033e56dfdb3b464 (diff)
downloadchromium_src-9d2942dce0c0f28570714665d7bc714a20712582.zip
chromium_src-9d2942dce0c0f28570714665d7bc714a20712582.tar.gz
chromium_src-9d2942dce0c0f28570714665d7bc714a20712582.tar.bz2
Force the GPU process to reschedule itself after a timeout so it responds to IPCs.
BUG=111514 Review URL: https://chromiumcodereview.appspot.com/9295021 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@119504 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'gpu')
-rw-r--r--gpu/command_buffer/service/gpu_scheduler.cc53
-rw-r--r--gpu/command_buffer/service/gpu_scheduler.h12
2 files changed, 61 insertions, 4 deletions
diff --git a/gpu/command_buffer/service/gpu_scheduler.cc b/gpu/command_buffer/service/gpu_scheduler.cc
index cff93f2..7df1b16 100644
--- a/gpu/command_buffer/service/gpu_scheduler.cc
+++ b/gpu/command_buffer/service/gpu_scheduler.cc
@@ -18,6 +18,10 @@ using ::base::SharedMemory;
namespace gpu {
+namespace {
+const int64 kRescheduleTimeOutDelay = 100;
+}
+
GpuScheduler::GpuScheduler(
CommandBuffer* command_buffer,
AsyncAPIInterface* handler,
@@ -26,7 +30,9 @@ GpuScheduler::GpuScheduler(
handler_(handler),
decoder_(decoder),
parser_(NULL),
- unscheduled_count_(0) {
+ unscheduled_count_(0),
+ rescheduled_count_(0),
+ reschedule_task_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) {
}
GpuScheduler::~GpuScheduler() {
@@ -86,12 +92,40 @@ void GpuScheduler::SetScheduled(bool scheduled) {
"new unscheduled_count_",
unscheduled_count_ + (scheduled? -1 : 1));
if (scheduled) {
- --unscheduled_count_;
+ // If the scheduler was rescheduled after a timeout, ignore the subsequent
+ // calls to SetScheduled when they eventually arrive until they are all
+ // accounted for.
+ if (rescheduled_count_ > 0) {
+ --rescheduled_count_;
+ return;
+ } else {
+ --unscheduled_count_;
+ }
+
DCHECK_GE(unscheduled_count_, 0);
- if (unscheduled_count_ == 0 && !scheduled_callback_.is_null())
- scheduled_callback_.Run();
+ if (unscheduled_count_ == 0) {
+ // When the scheduler transitions from the unscheduled to the scheduled
+ // state, cancel the task that would reschedule it after a timeout.
+ reschedule_task_factory_.InvalidateWeakPtrs();
+
+ if (!scheduled_callback_.is_null())
+ scheduled_callback_.Run();
+ }
} else {
+ if (unscheduled_count_ == 0) {
+#if defined(OS_WIN)
+ // When the scheduler transitions from scheduled to unscheduled, post a
+ // delayed task that it will force it back into a scheduled state after a
+ // timeout.
+ MessageLoop::current()->PostDelayedTask(
+ FROM_HERE,
+ base::Bind(&GpuScheduler::RescheduleTimeOut,
+ reschedule_task_factory_.GetWeakPtr()),
+ base::TimeDelta::FromMilliseconds(kRescheduleTimeOutDelay));
+#endif
+ }
+
++unscheduled_count_;
}
}
@@ -184,6 +218,17 @@ bool GpuScheduler::PollUnscheduleFences() {
return true;
}
+void GpuScheduler::RescheduleTimeOut() {
+ int new_count = unscheduled_count_ + rescheduled_count_;
+
+ rescheduled_count_ = 0;
+
+ while (unscheduled_count_)
+ SetScheduled(true);
+
+ rescheduled_count_ = new_count;
+}
+
GpuScheduler::UnscheduleFence::UnscheduleFence(
gfx::GLFence* fence_, base::Closure task_): fence(fence_), task(task_) {
}
diff --git a/gpu/command_buffer/service/gpu_scheduler.h b/gpu/command_buffer/service/gpu_scheduler.h
index 46afc60..7a7bdff 100644
--- a/gpu/command_buffer/service/gpu_scheduler.h
+++ b/gpu/command_buffer/service/gpu_scheduler.h
@@ -75,6 +75,10 @@ class GpuScheduler
// by them and returns whether all fences were complete.
bool PollUnscheduleFences();
+ // Artificially reschedule if the scheduler is still unscheduled after a
+ // timeout.
+ void RescheduleTimeOut();
+
// The GpuScheduler holds a weak reference to the CommandBuffer. The
// CommandBuffer owns the GpuScheduler and holds a strong reference to it
// through the ProcessCommands callback.
@@ -96,6 +100,14 @@ class GpuScheduler
// Greater than zero if this is waiting to be rescheduled before continuing.
int unscheduled_count_;
+ // The number of times this scheduler has been artificially rescheduled on
+ // account of a timeout.
+ int rescheduled_count_;
+
+ // A factory for outstanding rescheduling tasks that is invalidated whenever
+ // the scheduler is rescheduled.
+ base::WeakPtrFactory<GpuScheduler> reschedule_task_factory_;
+
// The GpuScheduler will unschedule itself in the event that further GL calls
// are issued to it before all these fences have been crossed by the GPU.
struct UnscheduleFence {