summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--components/mus/gles2/command_buffer_driver.cc2
-rw-r--r--components/mus/gles2/command_buffer_local.cc2
-rw-r--r--content/common/gpu/gpu_channel.cc5
-rw-r--r--content/common/gpu/gpu_channel.h8
-rw-r--r--content/common/gpu/gpu_command_buffer_stub.cc41
-rw-r--r--content/common/gpu/gpu_command_buffer_stub.h8
-rw-r--r--gpu/command_buffer/service/gpu_scheduler.cc92
-rw-r--r--gpu/command_buffer/service/gpu_scheduler.h25
8 files changed, 52 insertions, 131 deletions
diff --git a/components/mus/gles2/command_buffer_driver.cc b/components/mus/gles2/command_buffer_driver.cc
index 814787e..d634fcb 100644
--- a/components/mus/gles2/command_buffer_driver.cc
+++ b/components/mus/gles2/command_buffer_driver.cc
@@ -288,7 +288,7 @@ bool CommandBufferDriver::OnWaitSyncPoint(uint32_t sync_point) {
gpu_state_->sync_point_manager()->AddSyncPointCallback(
sync_point, base::Bind(&CommandBufferDriver::OnSyncPointRetired,
weak_factory_.GetWeakPtr()));
- return scheduler_->IsScheduled();
+ return scheduler_->scheduled();
}
void CommandBufferDriver::OnSyncPointRetired() {
diff --git a/components/mus/gles2/command_buffer_local.cc b/components/mus/gles2/command_buffer_local.cc
index f9e073c..27442cb 100644
--- a/components/mus/gles2/command_buffer_local.cc
+++ b/components/mus/gles2/command_buffer_local.cc
@@ -250,7 +250,7 @@ bool CommandBufferLocal::OnWaitSyncPoint(uint32_t sync_point) {
gpu_state_->sync_point_manager()->AddSyncPointCallback(
sync_point, base::Bind(&CommandBufferLocal::OnSyncPointRetired,
weak_factory_.GetWeakPtr()));
- return scheduler_->IsScheduled();
+ return scheduler_->scheduled();
}
void CommandBufferLocal::OnParseError() {
diff --git a/content/common/gpu/gpu_channel.cc b/content/common/gpu/gpu_channel.cc
index 902a5f2..49d086c 100644
--- a/content/common/gpu/gpu_channel.cc
+++ b/content/common/gpu/gpu_channel.cc
@@ -683,7 +683,8 @@ void GpuChannel::OnRemoveSubscription(unsigned int target) {
new GpuHostMsg_RemoveSubscription(client_id_, target));
}
-void GpuChannel::StubSchedulingChanged(bool scheduled) {
+void GpuChannel::OnStubSchedulingChanged(GpuCommandBufferStub* stub,
+ bool scheduled) {
bool a_stub_was_descheduled = num_stubs_descheduled_ > 0;
if (scheduled) {
num_stubs_descheduled_--;
@@ -1009,7 +1010,7 @@ void GpuChannel::OnDestroyCommandBuffer(int32 route_id) {
// stub, we need to make sure to reschedule the GpuChannel here.
if (!stub->IsScheduled()) {
// This stub won't get a chance to reschedule, so update the count now.
- StubSchedulingChanged(true);
+ OnStubSchedulingChanged(stub.get(), true);
}
}
diff --git a/content/common/gpu/gpu_channel.h b/content/common/gpu/gpu_channel.h
index d2ed46c..e111924 100644
--- a/content/common/gpu/gpu_channel.h
+++ b/content/common/gpu/gpu_channel.h
@@ -108,16 +108,10 @@ class CONTENT_EXPORT GpuChannel
void OnAddSubscription(unsigned int target) override;
void OnRemoveSubscription(unsigned int target) override;
- // This is called when a command buffer transitions from the unscheduled
- // state to the scheduled state, which potentially means the channel
- // transitions from the unscheduled to the scheduled state. When this occurs
- // deferred IPC messaged are handled.
- void OnScheduled();
-
// This is called when a command buffer transitions between scheduled and
// descheduled states. When any stub is descheduled, we stop preempting
// other channels.
- void StubSchedulingChanged(bool scheduled);
+ void OnStubSchedulingChanged(GpuCommandBufferStub* stub, bool scheduled);
CreateCommandBufferResult CreateViewCommandBuffer(
const gfx::GLSurfaceHandle& window,
diff --git a/content/common/gpu/gpu_command_buffer_stub.cc b/content/common/gpu/gpu_command_buffer_stub.cc
index 9db0f8e..3388f33 100644
--- a/content/common/gpu/gpu_command_buffer_stub.cc
+++ b/content/common/gpu/gpu_command_buffer_stub.cc
@@ -203,7 +203,7 @@ GpuCommandBufferStub::GpuCommandBufferStub(
last_flush_count_(0),
last_memory_allocation_valid_(false),
watchdog_(watchdog),
- sync_point_wait_count_(0),
+ waiting_for_sync_point_(false),
previous_processed_num_(0),
active_url_(active_url),
total_gpu_memory_(0) {
@@ -333,7 +333,7 @@ bool GpuCommandBufferStub::Send(IPC::Message* message) {
}
bool GpuCommandBufferStub::IsScheduled() {
- return (!scheduler_.get() || scheduler_->IsScheduled());
+ return (!scheduler_.get() || scheduler_->scheduled());
}
void GpuCommandBufferStub::PollWork() {
@@ -425,8 +425,7 @@ void GpuCommandBufferStub::ScheduleDelayedWork(base::TimeDelta delay) {
// for more work at the rate idle work is performed. This also ensures
// that idle work is done as efficiently as possible without any
// unnecessary delays.
- if (scheduler_.get() &&
- scheduler_->IsScheduled() &&
+ if (scheduler_.get() && scheduler_->scheduled() &&
scheduler_->HasMoreIdleWork()) {
delay = base::TimeDelta();
}
@@ -650,9 +649,8 @@ void GpuCommandBufferStub::OnInitialize(
base::Unretained(scheduler_.get())));
command_buffer_->SetParseErrorCallback(
base::Bind(&GpuCommandBufferStub::OnParseError, base::Unretained(this)));
- scheduler_->SetSchedulingChangedCallback(
- base::Bind(&GpuChannel::StubSchedulingChanged,
- base::Unretained(channel_)));
+ scheduler_->SetSchedulingChangedCallback(base::Bind(
+ &GpuCommandBufferStub::OnSchedulingChanged, base::Unretained(this)));
if (watchdog_) {
scheduler_->SetCommandProcessedCallback(
@@ -748,6 +746,12 @@ void GpuCommandBufferStub::OnParseError() {
CheckContextLost();
}
+void GpuCommandBufferStub::OnSchedulingChanged(bool scheduled) {
+ TRACE_EVENT1("gpu", "GpuCommandBufferStub::OnSchedulingChanged", "scheduled",
+ scheduled);
+ channel_->OnStubSchedulingChanged(this, scheduled);
+}
+
void GpuCommandBufferStub::OnWaitForTokenInRange(int32 start,
int32 end,
IPC::Message* reply_message) {
@@ -930,6 +934,8 @@ void GpuCommandBufferStub::OnRetireSyncPoint(uint32 sync_point) {
}
bool GpuCommandBufferStub::OnWaitSyncPoint(uint32 sync_point) {
+ DCHECK(!waiting_for_sync_point_);
+ DCHECK(scheduler_->scheduled());
if (!sync_point)
return true;
GpuChannelManager* manager = channel_->gpu_channel_manager();
@@ -938,27 +944,26 @@ bool GpuCommandBufferStub::OnWaitSyncPoint(uint32 sync_point) {
return true;
}
- if (sync_point_wait_count_ == 0) {
- TRACE_EVENT_ASYNC_BEGIN1("gpu", "WaitSyncPoint", this,
- "GpuCommandBufferStub", this);
- }
+ TRACE_EVENT_ASYNC_BEGIN1("gpu", "WaitSyncPoint", this, "GpuCommandBufferStub",
+ this);
+
scheduler_->SetScheduled(false);
- ++sync_point_wait_count_;
+ waiting_for_sync_point_ = true;
manager->sync_point_manager()->AddSyncPointCallback(
sync_point,
base::Bind(&RunOnThread, task_runner_,
base::Bind(&GpuCommandBufferStub::OnWaitSyncPointCompleted,
this->AsWeakPtr(), sync_point)));
- return scheduler_->IsScheduled();
+ return !waiting_for_sync_point_;
}
void GpuCommandBufferStub::OnWaitSyncPointCompleted(uint32 sync_point) {
+ DCHECK(waiting_for_sync_point_);
+ DCHECK(!scheduler_->scheduled());
+ TRACE_EVENT_ASYNC_END1("gpu", "WaitSyncPoint", this, "GpuCommandBufferStub",
+ this);
PullTextureUpdates(sync_point);
- --sync_point_wait_count_;
- if (sync_point_wait_count_ == 0) {
- TRACE_EVENT_ASYNC_END1("gpu", "WaitSyncPoint", this,
- "GpuCommandBufferStub", this);
- }
+ waiting_for_sync_point_ = false;
scheduler_->SetScheduled(true);
}
diff --git a/content/common/gpu/gpu_command_buffer_stub.h b/content/common/gpu/gpu_command_buffer_stub.h
index d2c3f4f..9e1e15c 100644
--- a/content/common/gpu/gpu_command_buffer_stub.h
+++ b/content/common/gpu/gpu_command_buffer_stub.h
@@ -216,11 +216,13 @@ class GpuCommandBufferStub
gfx::BufferFormat format,
uint32 internalformat);
void OnDestroyImage(int32 id);
+ void OnCreateStreamTexture(uint32 texture_id,
+ int32 stream_id,
+ bool* succeeded);
void OnCommandProcessed();
void OnParseError();
- void OnCreateStreamTexture(
- uint32 texture_id, int32 stream_id, bool* succeeded);
+ void OnSchedulingChanged(bool scheduled);
void ReportState();
@@ -283,7 +285,7 @@ class GpuCommandBufferStub
// A queue of sync points associated with this stub.
std::deque<uint32> sync_points_;
- int sync_point_wait_count_;
+ bool waiting_for_sync_point_;
base::TimeTicks process_delayed_work_time_;
uint32_t previous_processed_num_;
diff --git a/gpu/command_buffer/service/gpu_scheduler.cc b/gpu/command_buffer/service/gpu_scheduler.cc
index a859619..85aaa30 100644
--- a/gpu/command_buffer/service/gpu_scheduler.cc
+++ b/gpu/command_buffer/service/gpu_scheduler.cc
@@ -15,10 +15,6 @@
#include "ui/gl/gl_fence.h"
#include "ui/gl/gl_switches.h"
-#if defined(OS_WIN)
-#include "base/win/windows_version.h"
-#endif
-
using ::base::SharedMemory;
namespace gpu {
@@ -33,13 +29,10 @@ GpuScheduler::GpuScheduler(CommandBufferServiceBase* command_buffer,
: command_buffer_(command_buffer),
handler_(handler),
decoder_(decoder),
- unscheduled_count_(0),
- rescheduled_count_(0),
- was_preempted_(false),
- reschedule_task_factory_(this) {}
+ scheduled_(true),
+ was_preempted_(false) {}
-GpuScheduler::~GpuScheduler() {
-}
+GpuScheduler::~GpuScheduler() {}
void GpuScheduler::PutChanged() {
TRACE_EVENT1(
@@ -58,10 +51,6 @@ void GpuScheduler::PutChanged() {
if (state.error != error::kNoError)
return;
- // One of the unschedule fence tasks might have unscheduled us.
- if (!IsScheduled())
- return;
-
base::TimeTicks begin_time(base::TimeTicks::Now());
error::Error error = error::kNoError;
if (decoder_)
@@ -70,12 +59,12 @@ void GpuScheduler::PutChanged() {
if (IsPreempted())
break;
- DCHECK(IsScheduled());
+ DCHECK(scheduled());
error = parser_->ProcessCommands(CommandParser::kParseCommandsSlice);
if (error == error::kDeferCommandUntilLater) {
- DCHECK_GT(unscheduled_count_, 0);
+ DCHECK(!scheduled());
break;
}
@@ -93,7 +82,7 @@ void GpuScheduler::PutChanged() {
if (!command_processed_callback_.is_null())
command_processed_callback_.Run();
- if (unscheduled_count_ > 0)
+ if (!scheduled())
break;
}
@@ -108,57 +97,13 @@ void GpuScheduler::PutChanged() {
}
void GpuScheduler::SetScheduled(bool scheduled) {
- TRACE_EVENT2("gpu", "GpuScheduler:SetScheduled", "this", this,
- "new unscheduled_count_",
- unscheduled_count_ + (scheduled? -1 : 1));
- if (scheduled) {
- // If the scheduler was rescheduled after a timeout, ignore the subsequent
- // calls to SetScheduled when they eventually arrive until they are all
- // accounted for.
- if (rescheduled_count_ > 0) {
- --rescheduled_count_;
- return;
- } else {
- --unscheduled_count_;
- }
-
- DCHECK_GE(unscheduled_count_, 0);
-
- if (unscheduled_count_ == 0) {
- TRACE_EVENT_ASYNC_END1("gpu", "ProcessingSwap", this,
- "GpuScheduler", this);
- // When the scheduler transitions from the unscheduled to the scheduled
- // state, cancel the task that would reschedule it after a timeout.
- reschedule_task_factory_.InvalidateWeakPtrs();
-
- if (!scheduling_changed_callback_.is_null())
- scheduling_changed_callback_.Run(true);
- }
- } else {
- ++unscheduled_count_;
- if (unscheduled_count_ == 1) {
- TRACE_EVENT_ASYNC_BEGIN1("gpu", "ProcessingSwap", this,
- "GpuScheduler", this);
-#if defined(OS_WIN)
- if (base::win::GetVersion() < base::win::VERSION_VISTA) {
- // When the scheduler transitions from scheduled to unscheduled, post a
- // delayed task that it will force it back into a scheduled state after
- // a timeout. This should only be necessary on pre-Vista.
- base::MessageLoop::current()->PostDelayedTask(
- FROM_HERE,
- base::Bind(&GpuScheduler::RescheduleTimeOut,
- reschedule_task_factory_.GetWeakPtr()),
- base::TimeDelta::FromMilliseconds(kRescheduleTimeOutDelay));
- }
-#endif
- if (!scheduling_changed_callback_.is_null())
- scheduling_changed_callback_.Run(false);
- }
- }
-}
-
-bool GpuScheduler::IsScheduled() {
- return unscheduled_count_ == 0;
+ TRACE_EVENT2("gpu", "GpuScheduler:SetScheduled", "this", this, "scheduled",
+ scheduled);
+ if (scheduled_ == scheduled)
+ return;
+ scheduled_ = scheduled;
+ if (!scheduling_changed_callback_.is_null())
+ scheduling_changed_callback_.Run(scheduled);
}
bool GpuScheduler::HasPendingQueries() const {
@@ -244,15 +189,4 @@ void GpuScheduler::PerformIdleWork() {
decoder_->PerformIdleWork();
}
-void GpuScheduler::RescheduleTimeOut() {
- int new_count = unscheduled_count_ + rescheduled_count_;
-
- rescheduled_count_ = 0;
-
- while (unscheduled_count_)
- SetScheduled(true);
-
- rescheduled_count_ = new_count;
-}
-
} // namespace gpu
diff --git a/gpu/command_buffer/service/gpu_scheduler.h b/gpu/command_buffer/service/gpu_scheduler.h
index c8d3826..11a6d31 100644
--- a/gpu/command_buffer/service/gpu_scheduler.h
+++ b/gpu/command_buffer/service/gpu_scheduler.h
@@ -65,13 +65,10 @@ class GPU_EXPORT GpuScheduler
}
// Sets whether commands should be processed by this scheduler. Setting to
- // false unschedules. Setting to true reschedules. Whether or not the
- // scheduler is currently scheduled is "reference counted". Every call with
- // false must eventually be paired by a call with true.
- void SetScheduled(bool is_scheduled);
+ // false unschedules. Setting to true reschedules.
+ void SetScheduled(bool scheduled);
- // Returns whether the scheduler is currently able to process more commands.
- bool IsScheduled();
+ bool scheduled() const { return scheduled_; }
// Returns whether the scheduler needs to be polled again in the future to
// process pending queries.
@@ -109,10 +106,6 @@ class GPU_EXPORT GpuScheduler
}
private:
- // Artificially reschedule if the scheduler is still unscheduled after a
- // timeout.
- void RescheduleTimeOut();
-
bool IsPreempted();
// The GpuScheduler holds a weak reference to the CommandBuffer. The
@@ -133,12 +126,8 @@ class GPU_EXPORT GpuScheduler
// This should be an argument to the constructor.
scoped_ptr<CommandParser> parser_;
- // Greater than zero if this is waiting to be rescheduled before continuing.
- int unscheduled_count_;
-
- // The number of times this scheduler has been artificially rescheduled on
- // account of a timeout.
- int rescheduled_count_;
+ // Whether the scheduler is currently able to process more commands.
+ bool scheduled_;
SchedulingChangedCallback scheduling_changed_callback_;
base::Closure descheduled_callback_;
@@ -148,10 +137,6 @@ class GPU_EXPORT GpuScheduler
scoped_refptr<PreemptionFlag> preemption_flag_;
bool was_preempted_;
- // A factory for outstanding rescheduling tasks that is invalidated whenever
- // the scheduler is rescheduled.
- base::WeakPtrFactory<GpuScheduler> reschedule_task_factory_;
-
DISALLOW_COPY_AND_ASSIGN(GpuScheduler);
};