diff options
author | jbauman@chromium.org <jbauman@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2014-04-07 23:21:46 +0000 |
---|---|---|
committer | jbauman@chromium.org <jbauman@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2014-04-07 23:21:46 +0000 |
commit | 7a94d7bcd3fbf62553d80c39e1eceabd3a5fc136 (patch) | |
tree | 9a549e2b9ce02db98ef760f5e8d79c7fb7bd1aa1 /content/common | |
parent | e44a19c58a50b005ace49fdb4a3d3a9d9392017f (diff) | |
download | chromium_src-7a94d7bcd3fbf62553d80c39e1eceabd3a5fc136.zip chromium_src-7a94d7bcd3fbf62553d80c39e1eceabd3a5fc136.tar.gz chromium_src-7a94d7bcd3fbf62553d80c39e1eceabd3a5fc136.tar.bz2 |
Add WaitForToken and WaitForGetOffset messages.
These let the proxy wait for commands to complete without spinning on a GetStateFast message.
BUG=349632
Review URL: https://codereview.chromium.org/215033004
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@262246 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'content/common')
-rw-r--r-- | content/common/gpu/client/command_buffer_proxy_impl.cc | 20 | ||||
-rw-r--r-- | content/common/gpu/gpu_channel.cc | 30 | ||||
-rw-r--r-- | content/common/gpu/gpu_channel.h | 1 | ||||
-rw-r--r-- | content/common/gpu/gpu_command_buffer_stub.cc | 101 | ||||
-rw-r--r-- | content/common/gpu/gpu_command_buffer_stub.h | 11 | ||||
-rw-r--r-- | content/common/gpu/gpu_messages.h | 12 |
6 files changed, 120 insertions, 55 deletions
diff --git a/content/common/gpu/client/command_buffer_proxy_impl.cc b/content/common/gpu/client/command_buffer_proxy_impl.cc index 2335bbe..ef4bdaf 100644 --- a/content/common/gpu/client/command_buffer_proxy_impl.cc +++ b/content/common/gpu/client/command_buffer_proxy_impl.cc @@ -218,13 +218,15 @@ void CommandBufferProxyImpl::WaitForTokenInRange(int32 start, int32 end) { "end", end); TryUpdateState(); - while (!InRange(start, end, last_state_.token) && - last_state_.error == gpu::error::kNoError) { + if (!InRange(start, end, last_state_.token) && + last_state_.error == gpu::error::kNoError) { gpu::CommandBuffer::State state; - if (Send(new GpuCommandBufferMsg_GetStateFast(route_id_, &state))) + if (Send(new GpuCommandBufferMsg_WaitForTokenInRange( + route_id_, start, end, &state))) OnUpdateState(state); - TryUpdateState(); } + DCHECK(InRange(start, end, last_state_.token) || + last_state_.error != gpu::error::kNoError); } void CommandBufferProxyImpl::WaitForGetOffsetInRange(int32 start, int32 end) { @@ -235,13 +237,15 @@ void CommandBufferProxyImpl::WaitForGetOffsetInRange(int32 start, int32 end) { "end", end); TryUpdateState(); - while (!InRange(start, end, last_state_.get_offset) && - last_state_.error == gpu::error::kNoError) { + if (!InRange(start, end, last_state_.get_offset) && + last_state_.error == gpu::error::kNoError) { gpu::CommandBuffer::State state; - if (Send(new GpuCommandBufferMsg_GetStateFast(route_id_, &state))) + if (Send(new GpuCommandBufferMsg_WaitForGetOffsetInRange( + route_id_, start, end, &state))) OnUpdateState(state); - TryUpdateState(); } + DCHECK(InRange(start, end, last_state_.get_offset) || + last_state_.error != gpu::error::kNoError); } void CommandBufferProxyImpl::SetGetBuffer(int32 shm_id) { diff --git a/content/common/gpu/gpu_channel.cc b/content/common/gpu/gpu_channel.cc index 915c379..346736a 100644 --- a/content/common/gpu/gpu_channel.cc +++ b/content/common/gpu/gpu_channel.cc @@ -400,7 +400,6 @@ GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager, watchdog_(watchdog), software_(software), handle_messages_scheduled_(false), - processed_get_state_fast_(false), currently_processing_message_(NULL), weak_factory_(this), num_stubs_descheduled_(0) { @@ -459,27 +458,11 @@ bool GpuChannel::OnMessageReceived(const IPC::Message& message) { << " with type " << message.type(); } - if (message.type() == GpuCommandBufferMsg_GetStateFast::ID) { - if (processed_get_state_fast_) { - // Require a non-GetStateFast message in between two GetStateFast - // messages, to ensure progress is made. - std::deque<IPC::Message*>::iterator point = deferred_messages_.begin(); - - while (point != deferred_messages_.end() && - (*point)->type() == GpuCommandBufferMsg_GetStateFast::ID) { - ++point; - } - - if (point != deferred_messages_.end()) { - ++point; - } - - deferred_messages_.insert(point, new IPC::Message(message)); - } else { - // Move GetStateFast commands to the head of the queue, so the renderer - // doesn't have to wait any longer than necessary. - deferred_messages_.push_front(new IPC::Message(message)); - } + if (message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID || + message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) { + // Move Wait commands to the head of the queue, so the renderer + // doesn't have to wait any longer than necessary. + deferred_messages_.push_front(new IPC::Message(message)); } else { deferred_messages_.push_back(new IPC::Message(message)); } @@ -736,9 +719,6 @@ void GpuChannel::HandleMessage() { deferred_messages_.pop_front(); bool message_processed = true; - processed_get_state_fast_ = - (message->type() == GpuCommandBufferMsg_GetStateFast::ID); - currently_processing_message_ = message.get(); bool result; if (message->routing_id() == MSG_ROUTING_CONTROL) diff --git a/content/common/gpu/gpu_channel.h b/content/common/gpu/gpu_channel.h index 6ebdb0f..1b9319d 100644 --- a/content/common/gpu/gpu_channel.h +++ b/content/common/gpu/gpu_channel.h @@ -218,7 +218,6 @@ class GpuChannel : public IPC::Listener, GpuWatchdog* watchdog_; bool software_; bool handle_messages_scheduled_; - bool processed_get_state_fast_; IPC::Message* currently_processing_message_; base::WeakPtrFactory<GpuChannel> weak_factory_; diff --git a/content/common/gpu/gpu_command_buffer_stub.cc b/content/common/gpu/gpu_command_buffer_stub.cc index 68e4733..0a6e5b4 100644 --- a/content/common/gpu/gpu_command_buffer_stub.cc +++ b/content/common/gpu/gpu_command_buffer_stub.cc @@ -46,6 +46,15 @@ #endif namespace content { +struct WaitForCommandState { + WaitForCommandState(int32 start, int32 end, IPC::Message* reply) + : start(start), end(end), reply(reply) {} + + int32 start; + int32 end; + scoped_ptr<IPC::Message> reply; +}; + namespace { // The GpuCommandBufferMemoryTracker class provides a bridge between the @@ -181,9 +190,9 @@ bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) { // messages directed at the command buffer. This ensures that the message // handler can assume that the context is current (not necessary for // Echo, RetireSyncPoint, or WaitSyncPoint). - if (decoder_.get() && - message.type() != GpuCommandBufferMsg_Echo::ID && - message.type() != GpuCommandBufferMsg_GetStateFast::ID && + if (decoder_.get() && message.type() != GpuCommandBufferMsg_Echo::ID && + message.type() != GpuCommandBufferMsg_WaitForTokenInRange::ID && + message.type() != GpuCommandBufferMsg_WaitForGetOffsetInRange::ID && message.type() != GpuCommandBufferMsg_RetireSyncPoint::ID && message.type() != GpuCommandBufferMsg_SetLatencyInfo::ID) { if (!MakeCurrent()) @@ -202,8 +211,10 @@ bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) { OnProduceFrontBuffer); IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Echo, OnEcho); IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_GetState, OnGetState); - IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_GetStateFast, - OnGetStateFast); + IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForTokenInRange, + OnWaitForTokenInRange); + IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForGetOffsetInRange, + OnWaitForGetOffsetInRange); IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_AsyncFlush, OnAsyncFlush); IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetLatencyInfo, OnSetLatencyInfo); IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Rescheduled, OnRescheduled); @@ -237,6 +248,8 @@ bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) { IPC_MESSAGE_UNHANDLED(handled = false) IPC_END_MESSAGE_MAP() + CheckCompleteWaits(); + // Ensure that any delayed work that was created will be handled. ScheduleDelayedWork(kHandleMoreWorkPeriodMs); @@ -353,6 +366,14 @@ bool GpuCommandBufferStub::MakeCurrent() { } void GpuCommandBufferStub::Destroy() { + if (wait_for_token_) { + Send(wait_for_token_->reply.release()); + wait_for_token_.reset(); + } + if (wait_for_get_offset_) { + Send(wait_for_get_offset_->reply.release()); + wait_for_get_offset_.reset(); + } if (handle_.is_null() && !active_url_.is_empty()) { GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager(); gpu_channel_manager->Send(new GpuHostMsg_DidDestroyOffscreenContext( @@ -657,19 +678,65 @@ void GpuCommandBufferStub::OnParseError() { CheckContextLost(); } -void GpuCommandBufferStub::OnGetStateFast(IPC::Message* reply_message) { - TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnGetStateFast"); +void GpuCommandBufferStub::OnWaitForTokenInRange(int32 start, + int32 end, + IPC::Message* reply_message) { + TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnWaitForTokenInRange"); DCHECK(command_buffer_.get()); CheckContextLost(); - gpu::CommandBuffer::State state = command_buffer_->GetState(); - GpuCommandBufferMsg_GetStateFast::WriteReplyParams(reply_message, state); - Send(reply_message); + if (wait_for_token_) + LOG(ERROR) << "Got WaitForToken command while currently waiting for token."; + wait_for_token_ = + make_scoped_ptr(new WaitForCommandState(start, end, reply_message)); + CheckCompleteWaits(); } -void GpuCommandBufferStub::OnAsyncFlush(int32 put_offset, - uint32 flush_count) { - TRACE_EVENT1("gpu", "GpuCommandBufferStub::OnAsyncFlush", - "put_offset", put_offset); +void GpuCommandBufferStub::OnWaitForGetOffsetInRange( + int32 start, + int32 end, + IPC::Message* reply_message) { + TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnWaitForGetOffsetInRange"); + DCHECK(command_buffer_.get()); + CheckContextLost(); + if (wait_for_get_offset_) { + LOG(ERROR) + << "Got WaitForGetOffset command while currently waiting for offset."; + } + wait_for_get_offset_ = + make_scoped_ptr(new WaitForCommandState(start, end, reply_message)); + CheckCompleteWaits(); +} + +void GpuCommandBufferStub::CheckCompleteWaits() { + if (wait_for_token_ || wait_for_get_offset_) { + gpu::CommandBuffer::State state = command_buffer_->GetState(); + if (wait_for_token_ && + (gpu::CommandBuffer::InRange( + wait_for_token_->start, wait_for_token_->end, state.token) || + state.error != gpu::error::kNoError)) { + ReportState(); + GpuCommandBufferMsg_WaitForTokenInRange::WriteReplyParams( + wait_for_token_->reply.get(), state); + Send(wait_for_token_->reply.release()); + wait_for_token_.reset(); + } + if (wait_for_get_offset_ && + (gpu::CommandBuffer::InRange(wait_for_get_offset_->start, + wait_for_get_offset_->end, + state.get_offset) || + state.error != gpu::error::kNoError)) { + ReportState(); + GpuCommandBufferMsg_WaitForGetOffsetInRange::WriteReplyParams( + wait_for_get_offset_->reply.get(), state); + Send(wait_for_get_offset_->reply.release()); + wait_for_get_offset_.reset(); + } + } +} + +void GpuCommandBufferStub::OnAsyncFlush(int32 put_offset, uint32 flush_count) { + TRACE_EVENT1( + "gpu", "GpuCommandBufferStub::OnAsyncFlush", "put_offset", put_offset); DCHECK(command_buffer_.get()); if (flush_count - last_flush_count_ < 0x8000000U) { last_flush_count_ = flush_count; @@ -725,10 +792,7 @@ void GpuCommandBufferStub::OnCommandProcessed() { watchdog_->CheckArmed(); } -void GpuCommandBufferStub::ReportState() { - if (!CheckContextLost()) - command_buffer_->UpdateState(); -} +void GpuCommandBufferStub::ReportState() { command_buffer_->UpdateState(); } void GpuCommandBufferStub::PutChanged() { FastSetActiveURL(active_url_, active_url_hash_); @@ -981,6 +1045,7 @@ bool GpuCommandBufferStub::CheckContextLost() { (gfx::GLContext::LosesAllContextsOnContextLost() || use_virtualized_gl_context_)) channel_->LoseAllContexts(); + CheckCompleteWaits(); return was_lost; } diff --git a/content/common/gpu/gpu_command_buffer_stub.h b/content/common/gpu/gpu_command_buffer_stub.h index b7fc958..34790dc 100644 --- a/content/common/gpu/gpu_command_buffer_stub.h +++ b/content/common/gpu/gpu_command_buffer_stub.h @@ -46,6 +46,7 @@ class GpuChannel; class GpuVideoDecodeAccelerator; class GpuVideoEncodeAccelerator; class GpuWatchdog; +struct WaitForCommandState; class GpuCommandBufferStub : public GpuMemoryManagerClient, @@ -160,7 +161,12 @@ class GpuCommandBufferStub void OnSetGetBuffer(int32 shm_id, IPC::Message* reply_message); void OnProduceFrontBuffer(const gpu::Mailbox& mailbox); void OnGetState(IPC::Message* reply_message); - void OnGetStateFast(IPC::Message* reply_message); + void OnWaitForTokenInRange(int32 start, + int32 end, + IPC::Message* reply_message); + void OnWaitForGetOffsetInRange(int32 start, + int32 end, + IPC::Message* reply_message); void OnAsyncFlush(int32 put_offset, uint32 flush_count); void OnEcho(const IPC::Message& message); void OnRescheduled(); @@ -218,6 +224,7 @@ class GpuCommandBufferStub void ScheduleDelayedWork(int64 delay); bool CheckContextLost(); + void CheckCompleteWaits(); // The lifetime of objects of this class is managed by a GpuChannel. The // GpuChannels destroy all the GpuCommandBufferStubs that they own when they @@ -270,6 +277,8 @@ class GpuCommandBufferStub size_t active_url_hash_; size_t total_gpu_memory_; + scoped_ptr<WaitForCommandState> wait_for_token_; + scoped_ptr<WaitForCommandState> wait_for_get_offset_; DISALLOW_COPY_AND_ASSIGN(GpuCommandBufferStub); }; diff --git a/content/common/gpu/gpu_messages.h b/content/common/gpu/gpu_messages.h index f38494c..9a9bcdd 100644 --- a/content/common/gpu/gpu_messages.h +++ b/content/common/gpu/gpu_messages.h @@ -524,8 +524,16 @@ IPC_MESSAGE_ROUTED1(GpuCommandBufferMsg_ProduceFrontBuffer, IPC_SYNC_MESSAGE_ROUTED0_1(GpuCommandBufferMsg_GetState, gpu::CommandBuffer::State /* state */) -// Get the current state of the command buffer, as fast as possible. -IPC_SYNC_MESSAGE_ROUTED0_1(GpuCommandBufferMsg_GetStateFast, +// Wait until the token is in a specific range, inclusive. +IPC_SYNC_MESSAGE_ROUTED2_1(GpuCommandBufferMsg_WaitForTokenInRange, + int32 /* start */, + int32 /* end */, + gpu::CommandBuffer::State /* state */) + +// Wait until the get offset is in a specific range, inclusive. +IPC_SYNC_MESSAGE_ROUTED2_1(GpuCommandBufferMsg_WaitForGetOffsetInRange, + int32 /* start */, + int32 /* end */, gpu::CommandBuffer::State /* state */) // Asynchronously synchronize the put and get offsets of both processes. |