diff options
author | jbauman@chromium.org <jbauman@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2011-10-10 21:14:34 +0000 |
---|---|---|
committer | jbauman@chromium.org <jbauman@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2011-10-10 21:14:34 +0000 |
commit | 451e966ba1bf1d93191898c4d94eb42bdee20cc7 (patch) | |
tree | 74f94da4a386affee17d01ba0d6bec82d9c115e3 | |
parent | 4d0c9d0ef09699a7e3018e1cdb211d01194b3c33 (diff) | |
download | chromium_src-451e966ba1bf1d93191898c4d94eb42bdee20cc7.zip chromium_src-451e966ba1bf1d93191898c4d94eb42bdee20cc7.tar.gz chromium_src-451e966ba1bf1d93191898c4d94eb42bdee20cc7.tar.bz2 |
Allow FlushSyncs to short circuit
All messages on a channel are now executed in order, so Flushs have to wait for all previous AsyncFlushs to finish. This is generally unnecessary, so we replace Flush with a GetStateFast message. All commands are queued up when they enter the gpu process, and GetStateFast commands are moved to the head of the line.
BUG=
TEST=trybots, webgl conformance tests
Review URL: http://codereview.chromium.org/8198017
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@104786 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r-- | content/common/gpu/gpu_channel.cc | 94 | ||||
-rw-r--r-- | content/common/gpu/gpu_channel.h | 5 | ||||
-rw-r--r-- | content/common/gpu/gpu_command_buffer_stub.cc | 58 | ||||
-rw-r--r-- | content/common/gpu/gpu_command_buffer_stub.h | 5 | ||||
-rw-r--r-- | content/common/gpu/gpu_messages.h | 8 | ||||
-rw-r--r-- | content/renderer/gpu/command_buffer_proxy.cc | 15 |
6 files changed, 74 insertions, 111 deletions
diff --git a/content/common/gpu/gpu_channel.cc b/content/common/gpu/gpu_channel.cc index 3da36f4..f1489e2 100644 --- a/content/common/gpu/gpu_channel.cc +++ b/content/common/gpu/gpu_channel.cc @@ -36,6 +36,7 @@ GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager, share_group_(new gfx::GLShareGroup), watchdog_(watchdog), software_(software), + handle_messages_scheduled_(false), task_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) { DCHECK(gpu_channel_manager); DCHECK(renderer_id); @@ -76,42 +77,16 @@ bool GpuChannel::OnMessageReceived(const IPC::Message& message) { message.type() != GpuChannelMsg_Echo::ID) return OnControlMessageReceived(message); - // If the channel is unscheduled, defer sync and async messages until it is - // rescheduled. Also, even if the channel is scheduled, do not allow newly - // received messages to be handled before previously received deferred ones; - // append them to the deferred queue as well. - if (!IsScheduled() || !deferred_messages_.empty()) { - deferred_messages_.push(new IPC::Message(message)); - return true; + if (message.type() == GpuCommandBufferMsg_GetStateFast::ID) { + // Move GetStateFast commands to the head of the queue, so the renderer + // doesn't have to wait any longer than necessary. + deferred_messages_.push_front(new IPC::Message(message)); + } else { + deferred_messages_.push_back(new IPC::Message(message)); } - // Handle deferred control messages. - if (message.routing_id() == MSG_ROUTING_CONTROL) - return OnControlMessageReceived(message); - - if (!router_.RouteMessage(message)) { - // Respond to sync messages even if router failed to route. - if (message.is_sync()) { - IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message); - reply->set_reply_error(); - Send(reply); - } - return false; - } - - // If the channel becomes unscheduled as a result of handling the message, - // synthesize an IPC message to flush the command buffer that became - // unscheduled. - for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_); - !it.IsAtEnd(); - it.Advance()) { - GpuCommandBufferStub* stub = it.GetCurrentValue(); - if (!stub->IsScheduled()) { - DCHECK(deferred_messages_.empty()); - deferred_messages_.push(new GpuCommandBufferMsg_Rescheduled( - stub->route_id())); - } - } + if (IsScheduled()) + OnScheduled(); return true; } @@ -154,6 +129,8 @@ bool GpuChannel::IsScheduled() { } void GpuChannel::OnScheduled() { + if (handle_messages_scheduled_) + return; // Post a task to handle any deferred messages. The deferred message queue is // not emptied here, which ensures that OnMessageReceived will continue to // defer newly received messages until the ones in the queue have all been @@ -162,7 +139,8 @@ void GpuChannel::OnScheduled() { MessageLoop::current()->PostTask( FROM_HERE, task_factory_.NewRunnableMethod( - &GpuChannel::HandleDeferredMessages)); + &GpuChannel::HandleMessage)); + handle_messages_scheduled_ = true; } void GpuChannel::LoseAllContexts() { @@ -241,18 +219,42 @@ bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) { return handled; } -void GpuChannel::HandleDeferredMessages() { - // Empty the deferred queue so OnMessageRecieved does not defer on that - // account and to prevent an infinite loop if the scheduler is unscheduled - // as a result of handling already deferred messages. - std::queue<IPC::Message*> deferred_messages_copy; - std::swap(deferred_messages_copy, deferred_messages_); - - while (!deferred_messages_copy.empty()) { - scoped_ptr<IPC::Message> message(deferred_messages_copy.front()); - deferred_messages_copy.pop(); +void GpuChannel::HandleMessage() { + handle_messages_scheduled_ = false; + if (!IsScheduled()) + return; + + if (!deferred_messages_.empty()) { + scoped_ptr<IPC::Message> message(deferred_messages_.front()); + deferred_messages_.pop_front(); + // Handle deferred control messages. + if (message->routing_id() == MSG_ROUTING_CONTROL) + OnControlMessageReceived(*message); + else if (!router_.RouteMessage(*message)) { + // Respond to sync messages even if router failed to route. + if (message->is_sync()) { + IPC::Message* reply = IPC::SyncMessage::GenerateReply(&*message); + reply->set_reply_error(); + Send(reply); + } + } else { + // If the channel becomes unscheduled as a result of handling the message, + // synthesize an IPC message to flush the command buffer that became + // unscheduled. + for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_); + !it.IsAtEnd(); + it.Advance()) { + GpuCommandBufferStub* stub = it.GetCurrentValue(); + if (!stub->IsScheduled()) { + deferred_messages_.push_front(new GpuCommandBufferMsg_Rescheduled( + stub->route_id())); + } + } + } + } - OnMessageReceived(*message); + if (IsScheduled() && !deferred_messages_.empty()) { + OnScheduled(); } } diff --git a/content/common/gpu/gpu_channel.h b/content/common/gpu/gpu_channel.h index 109dc43..768a801 100644 --- a/content/common/gpu/gpu_channel.h +++ b/content/common/gpu/gpu_channel.h @@ -122,7 +122,7 @@ class GpuChannel : public IPC::Channel::Listener, bool OnControlMessageReceived(const IPC::Message& msg); - void HandleDeferredMessages(); + void HandleMessage(); // Message handlers. void OnInitialize(base::ProcessHandle renderer_process); @@ -143,7 +143,7 @@ class GpuChannel : public IPC::Channel::Listener, scoped_ptr<IPC::SyncChannel> channel_; - std::queue<IPC::Message*> deferred_messages_; + std::deque<IPC::Message*> deferred_messages_; // The id of the renderer who is on the other side of the channel. int renderer_id_; @@ -174,6 +174,7 @@ class GpuChannel : public IPC::Channel::Listener, gpu::gles2::DisallowedFeatures disallowed_features_; GpuWatchdog* watchdog_; bool software_; + bool handle_messages_scheduled_; ScopedRunnableMethodFactory<GpuChannel> task_factory_; diff --git a/content/common/gpu/gpu_command_buffer_stub.cc b/content/common/gpu/gpu_command_buffer_stub.cc index 5b4e86b..2fe7604 100644 --- a/content/common/gpu/gpu_command_buffer_stub.cc +++ b/content/common/gpu/gpu_command_buffer_stub.cc @@ -90,7 +90,8 @@ bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) { IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_SetParent, OnSetParent); IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_GetState, OnGetState); - IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Flush, OnFlush); + IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_GetStateFast, + OnGetStateFast); IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_AsyncFlush, OnAsyncFlush); IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Rescheduled, OnRescheduled); IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateTransferBuffer, @@ -308,54 +309,21 @@ void GpuCommandBufferStub::OnParseError() { Send(msg); } -void GpuCommandBufferStub::OnFlush(int32 put_offset, - int32 last_known_get, - uint32 flush_count, - IPC::Message* reply_message) { - TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnFlush"); +void GpuCommandBufferStub::OnGetStateFast(IPC::Message* reply_message) { + TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnGetStateFast"); gpu::CommandBuffer::State state = command_buffer_->GetState(); - if (flush_count - last_flush_count_ >= 0x8000000U) { - // We received this message out-of-order. This should not happen but is here - // to catch regressions. Ignore the message. - NOTREACHED() << "Received an AsyncFlush message out-of-order"; - GpuCommandBufferMsg_Flush::WriteReplyParams(reply_message, state); - Send(reply_message); - } else { - last_flush_count_ = flush_count; - - // Reply immediately if the client was out of date with the current get - // offset. - bool reply_immediately = state.get_offset != last_known_get; - if (reply_immediately) { - GpuCommandBufferMsg_Flush::WriteReplyParams(reply_message, state); - Send(reply_message); - } - - // Process everything up to the put offset. - state = command_buffer_->FlushSync(put_offset, last_known_get); - - // Lose all contexts if the context was lost. - if (state.error == gpu::error::kLostContext && - gfx::GLContext::LosesAllContextsOnContextLost()) { - channel_->LoseAllContexts(); - } + if (state.error == gpu::error::kLostContext && + gfx::GLContext::LosesAllContextsOnContextLost()) + channel_->LoseAllContexts(); - // Then if the client was up-to-date with the get offset, reply to the - // synchronpous IPC only after processing all commands are processed. This - // prevents the client from "spinning" when it fills up the command buffer. - // Otherwise, since the state has changed since the immediate reply, send - // an asyncronous state update back to the client. - if (!reply_immediately) { - GpuCommandBufferMsg_Flush::WriteReplyParams(reply_message, state); - Send(reply_message); - } else { - ReportState(); - } - } + GpuCommandBufferMsg_GetStateFast::WriteReplyParams(reply_message, state); + Send(reply_message); } -void GpuCommandBufferStub::OnAsyncFlush(int32 put_offset, uint32 flush_count) { - TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnAsyncFlush"); +void GpuCommandBufferStub::OnAsyncFlush(int32 put_offset, + uint32 flush_count) { + TRACE_EVENT1("gpu", "GpuCommandBufferStub::OnAsyncFlush", + "put_offset", put_offset); if (flush_count - last_flush_count_ < 0x8000000U) { last_flush_count_ = flush_count; command_buffer_->Flush(put_offset); diff --git a/content/common/gpu/gpu_command_buffer_stub.h b/content/common/gpu/gpu_command_buffer_stub.h index 79cd02b..22175d2 100644 --- a/content/common/gpu/gpu_command_buffer_stub.h +++ b/content/common/gpu/gpu_command_buffer_stub.h @@ -92,10 +92,7 @@ class GpuCommandBufferStub uint32 parent_texture_id, IPC::Message* reply_message); void OnGetState(IPC::Message* reply_message); - void OnFlush(int32 put_offset, - int32 last_known_get, - uint32 flush_count, - IPC::Message* reply_message); + void OnGetStateFast(IPC::Message* reply_message); void OnAsyncFlush(int32 put_offset, uint32 flush_count); void OnEcho(const IPC::Message& message); void OnRescheduled(); diff --git a/content/common/gpu/gpu_messages.h b/content/common/gpu/gpu_messages.h index f19cd37..2d6ad38 100644 --- a/content/common/gpu/gpu_messages.h +++ b/content/common/gpu/gpu_messages.h @@ -317,12 +317,8 @@ IPC_SYNC_MESSAGE_ROUTED2_1(GpuCommandBufferMsg_SetParent, IPC_SYNC_MESSAGE_ROUTED0_1(GpuCommandBufferMsg_GetState, gpu::CommandBuffer::State /* state */) -// Synchronize the put and get offsets of both processes. Caller passes its -// current put offset. Current state (including get offset) is returned. -IPC_SYNC_MESSAGE_ROUTED3_1(GpuCommandBufferMsg_Flush, - int32 /* put_offset */, - int32 /* last_known_get */, - uint32 /* flush_count */, +// Get the current state of the command buffer, as fast as possible. +IPC_SYNC_MESSAGE_ROUTED0_1(GpuCommandBufferMsg_GetStateFast, gpu::CommandBuffer::State /* state */) // Asynchronously synchronize the put and get offsets of both processes. diff --git a/content/renderer/gpu/command_buffer_proxy.cc b/content/renderer/gpu/command_buffer_proxy.cc index e1f9a66..4cc5cde 100644 --- a/content/renderer/gpu/command_buffer_proxy.cc +++ b/content/renderer/gpu/command_buffer_proxy.cc @@ -182,6 +182,8 @@ void CommandBufferProxy::Flush(int32 put_offset) { if (last_state_.error != gpu::error::kNoError) return; + TRACE_EVENT1("gpu", "CommandBufferProxy::Flush", "put_offset", put_offset); + Send(new GpuCommandBufferMsg_AsyncFlush(route_id_, put_offset, ++flush_count_)); @@ -189,20 +191,17 @@ void CommandBufferProxy::Flush(int32 put_offset) { gpu::CommandBuffer::State CommandBufferProxy::FlushSync(int32 put_offset, int32 last_known_get) { - TRACE_EVENT0("gpu", "CommandBufferProxy::FlushSync"); + TRACE_EVENT1("gpu", "CommandBufferProxy::FlushSync", "put_offset", + put_offset); + Flush(put_offset); if (last_known_get == last_state_.get_offset) { // Send will flag state with lost context if IPC fails. if (last_state_.error == gpu::error::kNoError) { gpu::CommandBuffer::State state; - if (Send(new GpuCommandBufferMsg_Flush(route_id_, - put_offset, - last_known_get, - ++flush_count_, - &state))) + if (Send(new GpuCommandBufferMsg_GetStateFast(route_id_, + &state))) OnUpdateState(state); } - } else { - Flush(put_offset); } return last_state_; |