diff options
author | apatrick@chromium.org <apatrick@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2011-07-21 21:40:48 +0000 |
---|---|---|
committer | apatrick@chromium.org <apatrick@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2011-07-21 21:40:48 +0000 |
commit | d0f02c4df3c10b655dc32a326b131131bc7d31d7 (patch) | |
tree | b95d230702e15fc8115f5257bb6fad73eb6cddcb | |
parent | 3a62f9988fa2b22beec1531efc008accb27b4ca1 (diff) | |
download | chromium_src-d0f02c4df3c10b655dc32a326b131131bc7d31d7.zip chromium_src-d0f02c4df3c10b655dc32a326b131131bc7d31d7.tar.gz chromium_src-d0f02c4df3c10b655dc32a326b131131bc7d31d7.tar.bz2 |
Reland 93066 - Execute all GL commands up to the put offset reported by a each flush.This means glFlush is a barrier that prevents reordering of GL commands issued on different command buffers. I used it to replace latches for synchronizing the rendering of WebGL canvas and Pepper 3D with the accelerated compositor. The primary advantage is it is more robust than latches and there is no possibility of deadlock. It should also be possible for WebGL and Pepper 3D to use it whereas exposing SetLatch and WaitLatch would be dangerous.
The calls to SetLatch and WaitLatch are still in webkit but they are no-ops. SetLatch and WaitLatch are completely removed elsewhere.I changed CommandBuffer::FlushSync to Finish to reflect the new semantics. Going forward, I will add a synchronous CommandBuffer::WaitForToken and WaitForAvailableEntries, which should eliminate the need to call Finish unless glFinish is called by the client. The Pepper interface is unchanged because I don't want to break binary compatibility.I fixed a bug where the last read token in CmdBufferHelper was stale after receiving a ReportState IPC. That was causing a redundant synchronous flush in the client side SwapBuffers throttling.
I removed Yield because it does not make sense with the new semantics. There is no round robin scheduling.Tested with WebGL on Windows and Mac and checked that 72672 did not regress.
Review URL: http://codereview.chromium.org/7466022
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@93479 0039d316-1c4b-4281-b951-d872f2087c98
51 files changed, 351 insertions, 758 deletions
diff --git a/content/common/gpu/gpu_channel.cc b/content/common/gpu/gpu_channel.cc index e83320d..5c430c4 100644 --- a/content/common/gpu/gpu_channel.cc +++ b/content/common/gpu/gpu_channel.cc @@ -35,7 +35,8 @@ GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager, renderer_pid_(base::kNullProcessId), share_group_(new gfx::GLShareGroup), watchdog_(watchdog), - software_(software) { + software_(software), + task_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) { DCHECK(gpu_channel_manager); DCHECK(renderer_id); const CommandLine* command_line = CommandLine::ForCurrentProcess(); @@ -88,9 +89,20 @@ bool GpuChannel::OnMessageReceived(const IPC::Message& message) { << " with type " << message.type(); } + // Control messages are not deferred and can be handled out of order with + // respect to routed ones. if (message.routing_id() == MSG_ROUTING_CONTROL) return OnControlMessageReceived(message); + // If the channel is unscheduled, defer sync and async messages until it is + // rescheduled. Also, even if the channel is scheduled, do not allow newly + // received messages to be handled before previously received deferred ones; + // append them to the deferred queue as well. + if (!IsScheduled() || !deferred_messages_.empty()) { + deferred_messages_.push(new IPC::Message(message)); + return true; + } + if (!router_.RouteMessage(message)) { // Respond to sync messages even if router failed to route. if (message.is_sync()) { @@ -101,6 +113,20 @@ bool GpuChannel::OnMessageReceived(const IPC::Message& message) { return false; } + // If the channel becomes unscheduled as a result of handling the message, + // synthesize an IPC message to flush the command buffer that became + // unscheduled. + for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_); + !it.IsAtEnd(); + it.Advance()) { + GpuCommandBufferStub* stub = it.GetCurrentValue(); + if (!stub->IsScheduled()) { + DCHECK(deferred_messages_.empty()); + deferred_messages_.push(new GpuCommandBufferMsg_Rescheduled( + stub->route_id())); + } + } + return true; } @@ -129,6 +155,30 @@ bool GpuChannel::Send(IPC::Message* message) { return channel_->Send(message); } +bool GpuChannel::IsScheduled() { + for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_); + !it.IsAtEnd(); + it.Advance()) { + GpuCommandBufferStub* stub = it.GetCurrentValue(); + if (!stub->IsScheduled()) + return false; + } + + return true; +} + +void GpuChannel::OnScheduled() { + // Post a task to handle any deferred messages. The deferred message queue is + // not emptied here, which ensures that OnMessageReceived will continue to + // defer newly received messages until the ones in the queue have all been + // handled by HandleDeferredMessages. HandleDeferredMessages is invoked as a + // task to prevent reentrancy. + MessageLoop::current()->PostTask( + FROM_HERE, + task_factory_.NewRunnableMethod( + &GpuChannel::HandleDeferredMessages)); +} + void GpuChannel::LoseAllContexts() { gpu_channel_manager_->LoseAllContexts(); } @@ -220,7 +270,7 @@ void GpuChannel::DestroyCommandBufferByViewId(int32 render_view_id) { // that matches the given render_view_id and delete the route. for (StubMap::const_iterator iter(&stubs_); !iter.IsAtEnd(); iter.Advance()) { if (iter.GetCurrentValue()->render_view_id() == render_view_id) { - OnDestroyCommandBuffer(iter.GetCurrentKey()); + OnDestroyCommandBuffer(iter.GetCurrentKey(), NULL); return; } } @@ -228,15 +278,17 @@ void GpuChannel::DestroyCommandBufferByViewId(int32 render_view_id) { #endif bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) { + // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers + // here. This is so the reply can be delayed if the scheduler is unscheduled. bool handled = true; IPC_BEGIN_MESSAGE_MAP(GpuChannel, msg) IPC_MESSAGE_HANDLER(GpuChannelMsg_Initialize, OnInitialize) - IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateOffscreenCommandBuffer, - OnCreateOffscreenCommandBuffer) - IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer, - OnDestroyCommandBuffer) - IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateOffscreenSurface, - OnCreateOffscreenSurface) + IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuChannelMsg_CreateOffscreenCommandBuffer, + OnCreateOffscreenCommandBuffer) + IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuChannelMsg_DestroyCommandBuffer, + OnDestroyCommandBuffer) + IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuChannelMsg_CreateOffscreenSurface, + OnCreateOffscreenSurface) IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroySurface, OnDestroySurface) IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateTransportTexture, OnCreateTransportTexture) @@ -246,6 +298,21 @@ bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) { return handled; } +void GpuChannel::HandleDeferredMessages() { + // Empty the deferred queue so OnMessageRecieved does not defer on that + // account and to prevent an infinite loop if the scheduler is unscheduled + // as a result of handling already deferred messages. + std::queue<IPC::Message*> deferred_messages_copy; + std::swap(deferred_messages_copy, deferred_messages_); + + while (!deferred_messages_copy.empty()) { + scoped_ptr<IPC::Message> message(deferred_messages_copy.front()); + deferred_messages_copy.pop(); + + OnMessageReceived(*message); + } +} + int GpuChannel::GenerateRouteID() { static int last_id = 0; return ++last_id; @@ -263,10 +330,12 @@ void GpuChannel::OnInitialize(base::ProcessHandle renderer_process) { void GpuChannel::OnCreateOffscreenCommandBuffer( const gfx::Size& size, const GPUCreateCommandBufferConfig& init_params, - int32* route_id) { + IPC::Message* reply_message) { + int32 route_id = MSG_ROUTING_NONE; + content::GetContentClient()->SetActiveURL(init_params.active_url); #if defined(ENABLE_GPU) - *route_id = GenerateRouteID(); + route_id = GenerateRouteID(); scoped_ptr<GpuCommandBufferStub> stub(new GpuCommandBufferStub( this, @@ -275,37 +344,39 @@ void GpuChannel::OnCreateOffscreenCommandBuffer( disallowed_extensions_, init_params.allowed_extensions, init_params.attribs, - *route_id, + route_id, 0, 0, watchdog_, software_)); - router_.AddRoute(*route_id, stub.get()); - stubs_.AddWithID(stub.release(), *route_id); + router_.AddRoute(route_id, stub.get()); + stubs_.AddWithID(stub.release(), route_id); TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer", "route_id", route_id); -#else - *route_id = MSG_ROUTING_NONE; #endif + + GpuChannelMsg_CreateOffscreenCommandBuffer::WriteReplyParams( + reply_message, + route_id); + Send(reply_message); } -void GpuChannel::OnDestroyCommandBuffer(int32 route_id) { +void GpuChannel::OnDestroyCommandBuffer(int32 route_id, + IPC::Message* reply_message) { #if defined(ENABLE_GPU) TRACE_EVENT1("gpu", "GpuChannel::OnDestroyCommandBuffer", "route_id", route_id); if (router_.ResolveRoute(route_id)) { - GpuCommandBufferStub* stub = stubs_.Lookup(route_id); - // In case the renderer is currently blocked waiting for a sync reply from - // the stub, allow the stub to clean up and unblock pending messages here: - if (stub != NULL) - stub->CommandBufferWasDestroyed(); router_.RemoveRoute(route_id); stubs_.Remove(route_id); } #endif + + if (reply_message) + Send(reply_message); } void GpuChannel::OnCreateOffscreenSurface(const gfx::Size& size, - int* route_id) { - *route_id = MSG_ROUTING_NONE; + IPC::Message* reply_message) { + int route_id = MSG_ROUTING_NONE; #if defined(ENABLE_GPU) scoped_refptr<gfx::GLSurface> surface( @@ -313,15 +384,19 @@ void GpuChannel::OnCreateOffscreenSurface(const gfx::Size& size, if (!surface.get()) return; - *route_id = GenerateRouteID(); + route_id = GenerateRouteID(); scoped_ptr<GpuSurfaceStub> stub (new GpuSurfaceStub(this, - *route_id, + route_id, surface.release())); - router_.AddRoute(*route_id, stub.get()); - surfaces_.AddWithID(stub.release(), *route_id); + router_.AddRoute(route_id, stub.get()); + surfaces_.AddWithID(stub.release(), route_id); #endif + + GpuChannelMsg_CreateOffscreenSurface::WriteReplyParams(reply_message, + route_id); + Send(reply_message); } void GpuChannel::OnDestroySurface(int route_id) { diff --git a/content/common/gpu/gpu_channel.h b/content/common/gpu/gpu_channel.h index 2b5ad1f..7defd5b 100644 --- a/content/common/gpu/gpu_channel.h +++ b/content/common/gpu/gpu_channel.h @@ -6,6 +6,7 @@ #define CONTENT_COMMON_GPU_GPU_CHANNEL_H_ #pragma once +#include <queue> #include <set> #include <string> #include <vector> @@ -79,6 +80,15 @@ class GpuChannel : public IPC::Channel::Listener, // IPC::Message::Sender implementation: virtual bool Send(IPC::Message* msg); + // Whether this channel is able to handle IPC messages. + bool IsScheduled(); + + // This is called when a command buffer transitions from the unscheduled + // state to the scheduled state, which potentially means the channel + // transitions from the unscheduled to the scheduled state. When this occurs + // deferred IPC messaged are handled. + void OnScheduled(); + void CreateViewCommandBuffer( gfx::PluginWindowHandle window, int32 render_view_id, @@ -132,6 +142,8 @@ class GpuChannel : public IPC::Channel::Listener, bool OnControlMessageReceived(const IPC::Message& msg); + void HandleDeferredMessages(); + int GenerateRouteID(); // Message handlers. @@ -139,11 +151,11 @@ class GpuChannel : public IPC::Channel::Listener, void OnCreateOffscreenCommandBuffer( const gfx::Size& size, const GPUCreateCommandBufferConfig& init_params, - int32* route_id); - void OnDestroyCommandBuffer(int32 route_id); + IPC::Message* reply_message); + void OnDestroyCommandBuffer(int32 route_id, IPC::Message* reply_message); void OnCreateOffscreenSurface(const gfx::Size& size, - int* route_id); + IPC::Message* reply_message); void OnDestroySurface(int route_id); void OnCreateTransportTexture(int32 context_route_id, int32 host_id); @@ -155,6 +167,8 @@ class GpuChannel : public IPC::Channel::Listener, scoped_ptr<IPC::SyncChannel> channel_; + std::queue<IPC::Message*> deferred_messages_; + // The id of the renderer who is on the other side of the channel. int renderer_id_; @@ -190,6 +204,8 @@ class GpuChannel : public IPC::Channel::Listener, GpuWatchdog* watchdog_; bool software_; + ScopedRunnableMethodFactory<GpuChannel> task_factory_; + DISALLOW_COPY_AND_ASSIGN(GpuChannel); }; diff --git a/content/common/gpu/gpu_command_buffer_stub.cc b/content/common/gpu/gpu_command_buffer_stub.cc index 6de811d..3890e81 100644 --- a/content/common/gpu/gpu_command_buffer_stub.cc +++ b/content/common/gpu/gpu_command_buffer_stub.cc @@ -66,16 +66,6 @@ GpuCommandBufferStub::~GpuCommandBufferStub() { } bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) { - // If the scheduler is unscheduled, defer sync and async messages until it is - // rescheduled. Also, even if the scheduler is scheduled, do not allow newly - // received messages to be handled before previously received deferred ones; - // append them to the deferred queue as well. - if ((scheduler_.get() && !scheduler_->IsScheduled()) || - !deferred_messages_.empty()) { - deferred_messages_.push(new IPC::Message(message)); - return true; - } - // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers // here. This is so the reply can be delayed if the scheduler is unscheduled. bool handled = true; @@ -87,6 +77,7 @@ bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) { IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_GetState, OnGetState); IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Flush, OnFlush); IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_AsyncFlush, OnAsyncFlush); + IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Rescheduled, OnRescheduled); IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateTransferBuffer, OnCreateTransferBuffer); IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_RegisterTransferBuffer, @@ -120,6 +111,10 @@ bool GpuCommandBufferStub::Send(IPC::Message* message) { return channel_->Send(message); } +bool GpuCommandBufferStub::IsScheduled() { + return !scheduler_.get() || scheduler_->IsScheduled(); +} + void GpuCommandBufferStub::OnInitialize( base::SharedMemoryHandle ring_buffer, int32 size, @@ -144,9 +139,9 @@ void GpuCommandBufferStub::OnInitialize( // Initialize the CommandBufferService and GpuScheduler. if (command_buffer_->Initialize(&shared_memory, size)) { - scheduler_.reset(new gpu::GpuScheduler(command_buffer_.get(), - channel_, - NULL)); + scheduler_.reset(gpu::GpuScheduler::Create(command_buffer_.get(), + channel_, + NULL)); if (scheduler_->Initialize( handle_, initial_size_, @@ -162,10 +157,8 @@ void GpuCommandBufferStub::OnInitialize( NewCallback(this, &GpuCommandBufferStub::OnParseError)); scheduler_->SetSwapBuffersCallback( NewCallback(this, &GpuCommandBufferStub::OnSwapBuffers)); - scheduler_->SetLatchCallback(base::Bind( - &GpuChannel::OnLatchCallback, base::Unretained(channel_), route_id_)); scheduler_->SetScheduledCallback( - NewCallback(this, &GpuCommandBufferStub::OnScheduled)); + NewCallback(channel_, &GpuChannel::OnScheduled)); scheduler_->SetTokenCallback(base::Bind( &GpuCommandBufferStub::OnSetToken, base::Unretained(this))); if (watchdog_) @@ -264,22 +257,45 @@ void GpuCommandBufferStub::OnFlush(int32 put_offset, uint32 flush_count, IPC::Message* reply_message) { TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnFlush"); - gpu::CommandBuffer::State state; + gpu::CommandBuffer::State state = command_buffer_->GetState(); if (flush_count - last_flush_count_ >= 0x8000000U) { // We received this message out-of-order. This should not happen but is here // to catch regressions. Ignore the message. NOTREACHED() << "Received an AsyncFlush message out-of-order"; - state = command_buffer_->GetState(); + GpuCommandBufferMsg_Flush::WriteReplyParams(reply_message, state); + Send(reply_message); } else { last_flush_count_ = flush_count; + + // Reply immediately if the client was out of date with the current get + // offset. + bool reply_immediately = state.get_offset != last_known_get; + if (reply_immediately) { + GpuCommandBufferMsg_Flush::WriteReplyParams(reply_message, state); + Send(reply_message); + } + + // Process everything up to the put offset. state = command_buffer_->FlushSync(put_offset, last_known_get); - } - if (state.error == gpu::error::kLostContext && - gfx::GLContext::LosesAllContextsOnContextLost()) - channel_->LoseAllContexts(); - GpuCommandBufferMsg_Flush::WriteReplyParams(reply_message, state); - Send(reply_message); + // Lose all contexts if the context was lost. + if (state.error == gpu::error::kLostContext && + gfx::GLContext::LosesAllContextsOnContextLost()) { + channel_->LoseAllContexts(); + } + + // Then if the client was up-to-date with the get offset, reply to the + // synchronpous IPC only after processing all commands are processed. This + // prevents the client from "spinning" when it fills up the command buffer. + // Otherwise, since the state has changed since the immediate reply, send + // an asyncronous state update back to the client. + if (!reply_immediately) { + GpuCommandBufferMsg_Flush::WriteReplyParams(reply_message, state); + Send(reply_message); + } else { + ReportState(); + } + } } void GpuCommandBufferStub::OnAsyncFlush(int32 put_offset, uint32 flush_count) { @@ -292,10 +308,15 @@ void GpuCommandBufferStub::OnAsyncFlush(int32 put_offset, uint32 flush_count) { // to catch regressions. Ignore the message. NOTREACHED() << "Received a Flush message out-of-order"; } - // TODO(piman): Do this everytime the scheduler finishes processing a batch of - // commands. - MessageLoop::current()->PostTask(FROM_HERE, - task_factory_.NewRunnableMethod(&GpuCommandBufferStub::ReportState)); + + ReportState(); +} + +void GpuCommandBufferStub::OnRescheduled() { + gpu::CommandBuffer::State state = command_buffer_->GetLastState(); + command_buffer_->Flush(state.put_offset); + + ReportState(); } void GpuCommandBufferStub::OnCreateTransferBuffer(int32 size, @@ -379,33 +400,6 @@ void GpuCommandBufferStub::OnCommandProcessed() { watchdog_->CheckArmed(); } -void GpuCommandBufferStub::HandleDeferredMessages() { - // Empty the deferred queue so OnMessageRecieved does not defer on that - // account and to prevent an infinite loop if the scheduler is unscheduled - // as a result of handling already deferred messages. - std::queue<IPC::Message*> deferred_messages_copy; - std::swap(deferred_messages_copy, deferred_messages_); - - while (!deferred_messages_copy.empty()) { - scoped_ptr<IPC::Message> message(deferred_messages_copy.front()); - deferred_messages_copy.pop(); - - OnMessageReceived(*message); - } -} - -void GpuCommandBufferStub::OnScheduled() { - // Post a task to handle any deferred messages. The deferred message queue is - // not emptied here, which ensures that OnMessageReceived will continue to - // defer newly received messages until the ones in the queue have all been - // handled by HandleDeferredMessages. HandleDeferredMessages is invoked as a - // task to prevent reentrancy. - MessageLoop::current()->PostTask( - FROM_HERE, - task_factory_.NewRunnableMethod( - &GpuCommandBufferStub::HandleDeferredMessages)); -} - #if defined(OS_MACOSX) void GpuCommandBufferStub::OnSetWindowSize(const gfx::Size& size) { GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager(); @@ -493,19 +487,6 @@ void GpuCommandBufferStub::AcceleratedSurfaceBuffersSwapped( } #endif // defined(OS_MACOSX) || defined(TOUCH_UI) -void GpuCommandBufferStub::CommandBufferWasDestroyed() { - TRACE_EVENT0("gpu", "GpuCommandBufferStub::CommandBufferWasDestroyed"); - // In case the renderer is currently blocked waiting for a sync reply from - // the stub, this method allows us to cleanup and unblock pending messages. - if (scheduler_.get()) { - while (!scheduler_->IsScheduled()) - scheduler_->SetScheduled(true); - } - // Handle any deferred messages now that the scheduler is not blocking - // message handling. - HandleDeferredMessages(); -} - void GpuCommandBufferStub::AddSetTokenCallback( const base::Callback<void(int32)>& callback) { set_token_callbacks_.push_back(callback); diff --git a/content/common/gpu/gpu_command_buffer_stub.h b/content/common/gpu/gpu_command_buffer_stub.h index 902ec9b..b97234d4 100644 --- a/content/common/gpu/gpu_command_buffer_stub.h +++ b/content/common/gpu/gpu_command_buffer_stub.h @@ -52,6 +52,9 @@ class GpuCommandBufferStub // IPC::Message::Sender implementation: virtual bool Send(IPC::Message* msg); + // Whether this command buffer can currently handle IPC messages. + bool IsScheduled(); + // Get the GLContext associated with this object. gpu::GpuScheduler* scheduler() const { return scheduler_.get(); } @@ -88,10 +91,6 @@ class GpuCommandBufferStub void AcceleratedSurfaceBuffersSwapped(uint64 swap_buffers_count); #endif // defined(OS_MACOSX) || defined(TOUCH_UI) - // Called when the command buffer was destroyed, and the stub should now - // unblock itself and handle pending messages. - void CommandBufferWasDestroyed(); - // Register a callback to be Run() whenever the underlying scheduler receives // a set_token() call. The callback will be Run() with the just-set token as // its only parameter. Multiple callbacks may be registered. @@ -111,6 +110,7 @@ class GpuCommandBufferStub uint32 flush_count, IPC::Message* reply_message); void OnAsyncFlush(int32 put_offset, uint32 flush_count); + void OnRescheduled(); void OnCreateTransferBuffer(int32 size, int32 id_request, IPC::Message* reply_message); @@ -127,8 +127,6 @@ class GpuCommandBufferStub void OnSwapBuffers(); void OnCommandProcessed(); - void HandleDeferredMessages(); - void OnScheduled(); void OnParseError(); #if defined(OS_MACOSX) @@ -166,7 +164,6 @@ class GpuCommandBufferStub scoped_ptr<gpu::CommandBufferService> command_buffer_; scoped_ptr<gpu::GpuScheduler> scheduler_; - std::queue<IPC::Message*> deferred_messages_; std::vector<base::Callback<void(int32)> > set_token_callbacks_; // SetParent may be called before Initialize, in which case we need to keep diff --git a/content/common/gpu/gpu_messages.h b/content/common/gpu/gpu_messages.h index a1c13e2..b4fc5ff 100644 --- a/content/common/gpu/gpu_messages.h +++ b/content/common/gpu/gpu_messages.h @@ -300,7 +300,6 @@ IPC_SYNC_MESSAGE_CONTROL2_1(GpuChannelMsg_CreateOffscreenCommandBuffer, // The CommandBufferProxy sends this to the GpuCommandBufferStub in its // destructor, so that the stub deletes the actual CommandBufferService // object that it's hosting. -// TODO(apatrick): Implement this. IPC_SYNC_MESSAGE_CONTROL1_0(GpuChannelMsg_DestroyCommandBuffer, int32 /* instance_id */) @@ -356,6 +355,12 @@ IPC_MESSAGE_ROUTED2(GpuCommandBufferMsg_AsyncFlush, int32 /* put_offset */, uint32 /* flush_count */) +// Asynchronously process any commands known to the GPU process. This is only +// used in the event that a channel is unscheduled and needs to be flushed +// again to process any commands issued subsequent to unscheduling. The GPU +// process actually sends it (deferred) to itself. +IPC_MESSAGE_ROUTED0(GpuCommandBufferMsg_Rescheduled) + // Return the current state of the command buffer following a request via // an AsyncGetState or AsyncFlush message. (This message is sent from the // GPU process to the renderer process.) diff --git a/content/renderer/gpu/command_buffer_proxy.cc b/content/renderer/gpu/command_buffer_proxy.cc index 6e7b15e..53b7f67 100644 --- a/content/renderer/gpu/command_buffer_proxy.cc +++ b/content/renderer/gpu/command_buffer_proxy.cc @@ -165,6 +165,10 @@ gpu::CommandBuffer::State CommandBufferProxy::GetState() { return last_state_; } +gpu::CommandBuffer::State CommandBufferProxy::GetLastState() { + return last_state_; +} + void CommandBufferProxy::Flush(int32 put_offset) { if (last_state_.error != gpu::error::kNoError) return; diff --git a/content/renderer/gpu/command_buffer_proxy.h b/content/renderer/gpu/command_buffer_proxy.h index aba15c9..3ae3ff5 100644 --- a/content/renderer/gpu/command_buffer_proxy.h +++ b/content/renderer/gpu/command_buffer_proxy.h @@ -49,6 +49,7 @@ class CommandBufferProxy : public gpu::CommandBuffer, virtual bool Initialize(base::SharedMemory* buffer, int32 size); virtual gpu::Buffer GetRingBuffer(); virtual State GetState(); + virtual State GetLastState(); virtual void Flush(int32 put_offset); virtual State FlushSync(int32 put_offset, int32 last_known_get); virtual void SetGetOffset(int32 get_offset); @@ -97,11 +98,6 @@ class CommandBufferProxy : public gpu::CommandBuffer, virtual void SetWindowSize(const gfx::Size& size); #endif - // Get the last state received from the service without synchronizing. - State GetLastState() { - return last_state_; - } - private: // Send an IPC message over the GPU channel. This is private to fully diff --git a/content/renderer/gpu/webgraphicscontext3d_command_buffer_impl.cc b/content/renderer/gpu/webgraphicscontext3d_command_buffer_impl.cc index 2d825b9..79d7616 100644 --- a/content/renderer/gpu/webgraphicscontext3d_command_buffer_impl.cc +++ b/content/renderer/gpu/webgraphicscontext3d_command_buffer_impl.cc @@ -372,16 +372,11 @@ void WebGraphicsContext3DCommandBufferImpl::getChildToParentLatchCHROMIUM( void WebGraphicsContext3DCommandBufferImpl::waitLatchCHROMIUM( WGC3Duint latch_id) { - TRACE_EVENT1("gpu", "WebGfxCtx3DCmdBfrImpl::WaitLatch", "latch_id", latch_id); - gl_->WaitLatchCHROMIUM(latch_id); } void WebGraphicsContext3DCommandBufferImpl::setLatchCHROMIUM( WGC3Duint latch_id) { - TRACE_EVENT1("gpu", "WebGfxCtx3DCmdBfrImpl::SetLatch", "latch_id", latch_id); - gl_->SetLatchCHROMIUM(latch_id); - // required to ensure set command is sent to GPU process gl_->Flush(); } diff --git a/gpu/command_buffer/build_gles2_cmd_buffer.py b/gpu/command_buffer/build_gles2_cmd_buffer.py index 02cac97..76f3c59 100755 --- a/gpu/command_buffer/build_gles2_cmd_buffer.py +++ b/gpu/command_buffer/build_gles2_cmd_buffer.py @@ -217,8 +217,6 @@ GL_APICALL void GL_APIENTRY glCopyTextureToParentTextureCHROMIUM (GLidBi GL_APICALL void GL_APIENTRY glResizeCHROMIUM (GLuint width, GLuint height); GL_APICALL const GLchar* GL_APIENTRY glGetRequestableExtensionsCHROMIUM (void); GL_APICALL void GL_APIENTRY glRequestExtensionCHROMIUM (const char* extension); -GL_APICALL void GL_APIENTRY glSetLatchCHROMIUM (GLuint latch_id); -GL_APICALL void GL_APIENTRY glWaitLatchCHROMIUM (GLuint latch_id); GL_APICALL void GL_APIENTRY glRateLimitOffscreenContextCHROMIUM (void); GL_APICALL void GL_APIENTRY glSetSurfaceCHROMIUM (GLint surface_id); GL_APICALL void GL_APIENTRY glGetMultipleIntegervCHROMIUM (const GLenum* pnames, GLuint count, GLint* results, GLsizeiptr size); @@ -426,8 +424,6 @@ _CMD_ID_TABLE = { 'ResizeCHROMIUM': 448, 'GetRequestableExtensionsCHROMIUM': 449, 'RequestExtensionCHROMIUM': 450, - 'SetLatchCHROMIUM': 451, - 'WaitLatchCHROMIUM': 452, 'SetSurfaceCHROMIUM': 453, 'GetMultipleIntegervCHROMIUM': 454, 'GetProgramInfoCHROMIUM': 455, @@ -1737,12 +1733,6 @@ _FUNCTION_INFO = { 'extension': True, 'chromium': True, }, - 'SetLatchCHROMIUM': { - 'type': 'Custom', - }, - 'WaitLatchCHROMIUM': { - 'type': 'Custom', - }, 'RateLimitOffscreenContextCHROMIUM': { 'gen_cmd': False, 'extension': True, diff --git a/gpu/command_buffer/client/cmd_buffer_helper.cc b/gpu/command_buffer/client/cmd_buffer_helper.cc index 354d563..bd44431 100644 --- a/gpu/command_buffer/client/cmd_buffer_helper.cc +++ b/gpu/command_buffer/client/cmd_buffer_helper.cc @@ -21,8 +21,6 @@ CommandBufferHelper::CommandBufferHelper(CommandBuffer* command_buffer) total_entry_count_(0), usable_entry_count_(0), token_(0), - last_token_read_(-1), - get_(0), put_(0), last_put_sent_(0), commands_issued_(0), @@ -47,7 +45,6 @@ bool CommandBufferHelper::Initialize(int32 ring_buffer_size) { total_entry_count_ = num_ring_buffer_entries; usable_entry_count_ = total_entry_count_ - kJumpEntries; put_ = state.put_offset; - SynchronizeState(state); return true; } @@ -57,8 +54,7 @@ CommandBufferHelper::~CommandBufferHelper() { bool CommandBufferHelper::FlushSync() { time(&last_flush_time_); last_put_sent_ = put_; - CommandBuffer::State state = command_buffer_->FlushSync(put_, get_); - SynchronizeState(state); + CommandBuffer::State state = command_buffer_->FlushSync(put_, get_offset()); return state.error == error::kNoError; } @@ -77,7 +73,7 @@ bool CommandBufferHelper::Finish() { // has shutdown. if (!FlushSync()) return false; - } while (put_ != get_); + } while (put_ != get_offset()); return true; } @@ -96,7 +92,7 @@ int32 CommandBufferHelper::InsertToken() { TRACE_EVENT0("gpu", "CommandBufferHelper::InsertToken(wrapped)"); // we wrapped Finish(); - GPU_DCHECK_EQ(token_, last_token_read_); + GPU_DCHECK_EQ(token_, last_token_read()); } return token_; } @@ -109,8 +105,8 @@ void CommandBufferHelper::WaitForToken(int32 token) { if (token < 0) return; if (token > token_) return; // we wrapped - while (last_token_read_ < token) { - if (get_ == put_) { + while (last_token_read() < token) { + if (get_offset() == put_) { GPU_LOG(FATAL) << "Empty command buffer while waiting on a token."; return; } @@ -121,11 +117,6 @@ void CommandBufferHelper::WaitForToken(int32 token) { } } -void CommandBufferHelper::YieldScheduler() { - cmd::YieldScheduler& cmd = GetCmdSpace<cmd::YieldScheduler>(); - cmd.Init(); -} - // Waits for available entries, basically waiting until get >= put + count + 1. // It actually waits for contiguous entries, so it may need to wrap the buffer // around, adding a jump. Thus this function may change the value of put_. The @@ -139,9 +130,9 @@ void CommandBufferHelper::WaitForAvailableEntries(int32 count) { // need to make sure get wraps first, actually that get is 1 or more (since // put will wrap to 0 after we add the jump). GPU_DCHECK_LE(1, put_); - if (get_ > put_ || get_ == 0) { + if (get_offset() > put_ || get_offset() == 0) { TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries"); - while (get_ > put_ || get_ == 0) { + while (get_offset() > put_ || get_offset() == 0) { // Do not loop forever if the flush fails, meaning the command buffer // reader has shutdown. if (!FlushSync()) @@ -185,13 +176,7 @@ CommandBufferEntry* CommandBufferHelper::GetSpace(uint32 entries) { error::Error CommandBufferHelper::GetError() { CommandBuffer::State state = command_buffer_->GetState(); - SynchronizeState(state); return static_cast<error::Error>(state.error); } -void CommandBufferHelper::SynchronizeState(const CommandBuffer::State& state) { - get_ = state.get_offset; - last_token_read_ = state.token; -} - } // namespace gpu diff --git a/gpu/command_buffer/client/cmd_buffer_helper.h b/gpu/command_buffer/client/cmd_buffer_helper.h index c7413ca..a7c17ef 100644 --- a/gpu/command_buffer/client/cmd_buffer_helper.h +++ b/gpu/command_buffer/client/cmd_buffer_helper.h @@ -83,11 +83,6 @@ class CommandBufferHelper { // the value of the token to wait for. void WaitForToken(int32 token); - // Inserts a yield command, signaling the scheduler that this is a good point - // to update the state and schedule other command buffers. This is - // particularly useful after inserting a token that will be waited on. - void YieldScheduler(); - // Called prior to each command being issued. Waits for a certain amount of // space to be available. Returns address of space. CommandBufferEntry* GetSpace(uint32 entries); @@ -121,7 +116,11 @@ class CommandBufferHelper { } int32 last_token_read() const { - return last_token_read_; + return command_buffer_->GetLastState().token; + } + + int32 get_offset() const { + return command_buffer_->GetLastState().get_offset; } error::Error GetError(); @@ -221,20 +220,16 @@ class CommandBufferHelper { // Returns the number of available entries (they may not be contiguous). int32 AvailableEntries() { - return (get_ - put_ - 1 + usable_entry_count_) % usable_entry_count_; + return (get_offset() - put_ - 1 + usable_entry_count_) % + usable_entry_count_; } - // Synchronize with current service state. - void SynchronizeState(const CommandBuffer::State& state); - CommandBuffer* command_buffer_; Buffer ring_buffer_; CommandBufferEntry *entries_; int32 total_entry_count_; // the total number of entries int32 usable_entry_count_; // the usable number (ie, minus space for jump) int32 token_; - int32 last_token_read_; - int32 get_; int32 put_; int32 last_put_sent_; int commands_issued_; diff --git a/gpu/command_buffer/client/cmd_buffer_helper_test.cc b/gpu/command_buffer/client/cmd_buffer_helper_test.cc index 56eaa0a..01f3760 100644 --- a/gpu/command_buffer/client/cmd_buffer_helper_test.cc +++ b/gpu/command_buffer/client/cmd_buffer_helper_test.cc @@ -77,8 +77,8 @@ class CommandBufferHelperTest : public testing::Test { .WillRepeatedly( Invoke(do_jump_command_.get(), &DoJumpCommand::DoCommand)); - gpu_scheduler_.reset(new GpuScheduler( - command_buffer_.get(), NULL, parser_, 1)); + gpu_scheduler_.reset(GpuScheduler::CreateForTests( + command_buffer_.get(), NULL, parser_)); command_buffer_->SetPutOffsetChangeCallback(NewCallback( gpu_scheduler_.get(), &GpuScheduler::PutChanged)); @@ -185,10 +185,6 @@ TEST_F(CommandBufferHelperTest, TestCommandProcessing) { args2[1].value_float = 6.f; AddCommandWithExpect(error::kNoError, kUnusedCommandId, 2, args2); - helper_->Flush(); - // Check that the engine has work to do now. - EXPECT_FALSE(parser_->IsEmpty()); - // Wait until it's done. helper_->Finish(); // Check that the engine has no more work to do. diff --git a/gpu/command_buffer/client/fenced_allocator_test.cc b/gpu/command_buffer/client/fenced_allocator_test.cc index 3bf9bd8..883d752 100644 --- a/gpu/command_buffer/client/fenced_allocator_test.cc +++ b/gpu/command_buffer/client/fenced_allocator_test.cc @@ -51,8 +51,8 @@ class BaseFencedAllocatorTest : public testing::Test { 0, api_mock_.get()); - gpu_scheduler_.reset(new GpuScheduler( - command_buffer_.get(), NULL, parser_, INT_MAX)); + gpu_scheduler_.reset(GpuScheduler::CreateForTests( + command_buffer_.get(), NULL, parser_)); command_buffer_->SetPutOffsetChangeCallback(NewCallback( gpu_scheduler_.get(), &GpuScheduler::PutChanged)); diff --git a/gpu/command_buffer/client/gles2_c_lib_autogen.h b/gpu/command_buffer/client/gles2_c_lib_autogen.h index 2306501..3585cab 100644 --- a/gpu/command_buffer/client/gles2_c_lib_autogen.h +++ b/gpu/command_buffer/client/gles2_c_lib_autogen.h @@ -568,12 +568,6 @@ const GLchar* GLES2GetRequestableExtensionsCHROMIUM() { void GLES2RequestExtensionCHROMIUM(const char* extension) { gles2::GetGLContext()->RequestExtensionCHROMIUM(extension); } -void GLES2SetLatchCHROMIUM(GLuint latch_id) { - gles2::GetGLContext()->SetLatchCHROMIUM(latch_id); -} -void GLES2WaitLatchCHROMIUM(GLuint latch_id) { - gles2::GetGLContext()->WaitLatchCHROMIUM(latch_id); -} void GLES2RateLimitOffscreenContextCHROMIUM() { gles2::GetGLContext()->RateLimitOffscreenContextCHROMIUM(); } diff --git a/gpu/command_buffer/client/gles2_cmd_helper_autogen.h b/gpu/command_buffer/client/gles2_cmd_helper_autogen.h index 8078354..c28e989 100644 --- a/gpu/command_buffer/client/gles2_cmd_helper_autogen.h +++ b/gpu/command_buffer/client/gles2_cmd_helper_autogen.h @@ -1219,16 +1219,6 @@ c.Init(bucket_id); } - void SetLatchCHROMIUM(GLuint latch_id) { - gles2::SetLatchCHROMIUM& c = GetCmdSpace<gles2::SetLatchCHROMIUM>(); - c.Init(latch_id); - } - - void WaitLatchCHROMIUM(GLuint latch_id) { - gles2::WaitLatchCHROMIUM& c = GetCmdSpace<gles2::WaitLatchCHROMIUM>(); - c.Init(latch_id); - } - void SetSurfaceCHROMIUM(GLint surface_id) { gles2::SetSurfaceCHROMIUM& c = GetCmdSpace<gles2::SetSurfaceCHROMIUM>(); c.Init(surface_id); diff --git a/gpu/command_buffer/client/gles2_demo.cc b/gpu/command_buffer/client/gles2_demo.cc index f981b1b..7dde7f6 100644 --- a/gpu/command_buffer/client/gles2_demo.cc +++ b/gpu/command_buffer/client/gles2_demo.cc @@ -56,9 +56,9 @@ bool GLES2Demo::Setup(void* hwnd, int32 size) { if (!command_buffer->Initialize(size)) return NULL; - GpuScheduler* gpu_scheduler = new GpuScheduler(command_buffer.get(), - NULL, - NULL); + GpuScheduler* gpu_scheduler = GpuScheduler::Create(command_buffer.get(), + NULL, + NULL); if (!gpu_scheduler->Initialize(reinterpret_cast<HWND>(hwnd), gfx::Size(), false, diff --git a/gpu/command_buffer/client/gles2_implementation.cc b/gpu/command_buffer/client/gles2_implementation.cc index 1a7eeaa..52abdf1 100644 --- a/gpu/command_buffer/client/gles2_implementation.cc +++ b/gpu/command_buffer/client/gles2_implementation.cc @@ -832,7 +832,6 @@ void GLES2Implementation::SwapBuffers() { // the scheduler yields between the InsertToken and the SwapBuffers. swap_buffers_tokens_.push(helper_->InsertToken()); helper_->SwapBuffers(); - helper_->YieldScheduler(); helper_->CommandBufferHelper::Flush(); // Wait if we added too many swap buffers. if (swap_buffers_tokens_.size() > kMaxSwapBuffers) { diff --git a/gpu/command_buffer/client/gles2_implementation_autogen.h b/gpu/command_buffer/client/gles2_implementation_autogen.h index 270d303..397d975 100644 --- a/gpu/command_buffer/client/gles2_implementation_autogen.h +++ b/gpu/command_buffer/client/gles2_implementation_autogen.h @@ -1272,16 +1272,6 @@ const GLchar* GetRequestableExtensionsCHROMIUM(); void RequestExtensionCHROMIUM(const char* extension); -void SetLatchCHROMIUM(GLuint latch_id) { - GPU_CLIENT_LOG("[" << this << "] glSetLatchCHROMIUM(" << latch_id << ")"); - helper_->SetLatchCHROMIUM(latch_id); -} - -void WaitLatchCHROMIUM(GLuint latch_id) { - GPU_CLIENT_LOG("[" << this << "] glWaitLatchCHROMIUM(" << latch_id << ")"); - helper_->WaitLatchCHROMIUM(latch_id); -} - void RateLimitOffscreenContextCHROMIUM(); void SetSurfaceCHROMIUM(GLint surface_id) { diff --git a/gpu/command_buffer/client/gles2_implementation_unittest.cc b/gpu/command_buffer/client/gles2_implementation_unittest.cc index eb003a5..a37f4b2 100644 --- a/gpu/command_buffer/client/gles2_implementation_unittest.cc +++ b/gpu/command_buffer/client/gles2_implementation_unittest.cc @@ -46,6 +46,10 @@ class GLES2MockCommandBufferHelper : public CommandBuffer { return state_; } + virtual State GetLastState() { + return state_; + } + virtual void Flush(int32 put_offset) { state_.put_offset = put_offset; } @@ -258,7 +262,7 @@ class GLES2ImplementationTest : public testing::Test { false)); EXPECT_CALL(*command_buffer_, OnFlush(_)).Times(1).RetiresOnSaturation(); - helper_->CommandBufferHelper::FlushSync(); + helper_->CommandBufferHelper::Finish(); Buffer ring_buffer = command_buffer_->GetRingBuffer(); commands_ = static_cast<CommandBufferEntry*>(ring_buffer.ptr) + command_buffer_->GetState().put_offset; diff --git a/gpu/command_buffer/client/mapped_memory_unittest.cc b/gpu/command_buffer/client/mapped_memory_unittest.cc index 067c8e6..735ac23 100644 --- a/gpu/command_buffer/client/mapped_memory_unittest.cc +++ b/gpu/command_buffer/client/mapped_memory_unittest.cc @@ -49,8 +49,8 @@ class MappedMemoryTestBase : public testing::Test { 0, api_mock_.get()); - gpu_scheduler_.reset(new GpuScheduler( - command_buffer_.get(), NULL, parser_, INT_MAX)); + gpu_scheduler_.reset(GpuScheduler::CreateForTests( + command_buffer_.get(), NULL, parser_)); command_buffer_->SetPutOffsetChangeCallback(NewCallback( gpu_scheduler_.get(), &GpuScheduler::PutChanged)); diff --git a/gpu/command_buffer/client/ring_buffer_test.cc b/gpu/command_buffer/client/ring_buffer_test.cc index 01bc3e0..a816393 100644 --- a/gpu/command_buffer/client/ring_buffer_test.cc +++ b/gpu/command_buffer/client/ring_buffer_test.cc @@ -71,8 +71,8 @@ class BaseRingBufferTest : public testing::Test { 0, api_mock_.get()); - gpu_scheduler_.reset(new GpuScheduler( - command_buffer_.get(), NULL, parser_, INT_MAX)); + gpu_scheduler_.reset(GpuScheduler::CreateForTests( + command_buffer_.get(), NULL, parser_)); command_buffer_->SetPutOffsetChangeCallback(NewCallback( gpu_scheduler_.get(), &GpuScheduler::PutChanged)); diff --git a/gpu/command_buffer/common/cmd_buffer_common.cc b/gpu/command_buffer/common/cmd_buffer_common.cc index a9113b2..9ddb1f3 100644 --- a/gpu/command_buffer/common/cmd_buffer_common.cc +++ b/gpu/command_buffer/common/cmd_buffer_common.cc @@ -31,6 +31,17 @@ const char* GetCommandName(CommandId command_id) { } // namespace cmd +// TODO(apatrick): this method body is here instead of command_buffer.cc +// because NaCl currently compiles in this file but not the other. +// Remove this method body and the includes of command_buffer.h and +// logging.h above once NaCl defines SetContextLostReason() in its +// CommandBuffer subclass and has been rolled forward. See +// http://crbug.com/89670 . +gpu::CommandBuffer::State CommandBuffer::GetLastState() { + GPU_NOTREACHED(); + return gpu::CommandBuffer::State(); +} + // TODO(kbr): this method body is here instead of command_buffer.cc // because NaCl currently compiles in this file but not the other. // Remove this method body and the includes of command_buffer.h and diff --git a/gpu/command_buffer/common/cmd_buffer_common.h b/gpu/command_buffer/common/cmd_buffer_common.h index 0f050e4..eed4724 100644 --- a/gpu/command_buffer/common/cmd_buffer_common.h +++ b/gpu/command_buffer/common/cmd_buffer_common.h @@ -158,7 +158,6 @@ namespace cmd { OP(SetBucketDataImmediate) /* 9 */ \ OP(GetBucketSize) /* 10 */ \ OP(GetBucketData) /* 11 */ \ - OP(YieldScheduler) /* 12 */ \ // Common commands. enum CommandId { @@ -643,32 +642,6 @@ COMPILE_ASSERT(offsetof(GetBucketData, shared_memory_id) == 16, COMPILE_ASSERT(offsetof(GetBucketData, shared_memory_offset) == 20, Offsetof_GetBucketData_shared_memory_offset_not_20); -// A Yield command. Hints the scheduler that this is a good point to update the -// state and schedule other command buffers. -struct YieldScheduler { - typedef YieldScheduler ValueType; - static const CommandId kCmdId = kYieldScheduler; - static const cmd::ArgFlags kArgFlags = cmd::kFixed; - - void SetHeader() { - header.SetCmd<ValueType>(); - } - - void Init() { - SetHeader(); - } - static void* Set(void* cmd) { - static_cast<ValueType*>(cmd)->Init(); - return NextCmdAddress<ValueType>(cmd); - } - - CommandHeader header; -}; - -COMPILE_ASSERT(sizeof(YieldScheduler) == 4, Sizeof_YieldScheduler_is_not_4); -COMPILE_ASSERT(offsetof(YieldScheduler, header) == 0, - Offsetof_YieldScheduler_header_not_0); - } // namespace cmd #pragma pack(pop) diff --git a/gpu/command_buffer/common/command_buffer.h b/gpu/command_buffer/common/command_buffer.h index 539098b..2eff201 100644 --- a/gpu/command_buffer/common/command_buffer.h +++ b/gpu/command_buffer/common/command_buffer.h @@ -78,17 +78,18 @@ class CommandBuffer { // Returns the current status. virtual State GetState() = 0; + // Returns the last state without synchronizing with the service. + virtual State GetLastState(); + // The writer calls this to update its put offset. This ensures the reader - // sees the latest added commands, and will eventually process them. + // sees the latest added commands, and will eventually process them. On the + // service side, commands are processed up to the given put_offset before + // subsequent Flushes on the same GpuChannel. virtual void Flush(int32 put_offset) = 0; // The writer calls this to update its put offset. This function returns the - // reader's most recent get offset. Does not return until after the put offset - // change callback has been invoked. Returns -1 if the put offset is invalid. - // If last_known_get is different from the reader's current get pointer, this - // function will return immediately, otherwise it guarantees that the reader - // has processed some commands before returning (assuming the command buffer - // isn't empty and there is no error). + // reader's most recent get offset. Does not return until all pending commands + // have been executed. virtual State FlushSync(int32 put_offset, int32 last_known_get) = 0; // Sets the current get offset. This can be called from any thread. diff --git a/gpu/command_buffer/common/command_buffer_mock.h b/gpu/command_buffer/common/command_buffer_mock.h index 3243d17..321c40d 100644 --- a/gpu/command_buffer/common/command_buffer_mock.h +++ b/gpu/command_buffer/common/command_buffer_mock.h @@ -25,6 +25,7 @@ class MockCommandBuffer : public CommandBuffer { MOCK_METHOD2(Initialize, bool(base::SharedMemory* buffer, int32 size)); MOCK_METHOD0(GetRingBuffer, Buffer()); MOCK_METHOD0(GetState, State()); + MOCK_METHOD0(GetLastState, State()); MOCK_METHOD1(Flush, void(int32 put_offset)); MOCK_METHOD2(FlushSync, State(int32 put_offset, int32 last_known_get)); MOCK_METHOD1(SetGetOffset, void(int32 get_offset)); diff --git a/gpu/command_buffer/common/constants.h b/gpu/command_buffer/common/constants.h index c204e87..1b14636 100644 --- a/gpu/command_buffer/common/constants.h +++ b/gpu/command_buffer/common/constants.h @@ -21,28 +21,12 @@ namespace error { kUnknownCommand, kInvalidArguments, kLostContext, - kGenericError, - - // This is not an error. It is returned by WaitLatch when it is blocked. - // When blocked, the context will not reschedule itself until another - // context executes a SetLatch command. - kWaiting, - - // This is not an error either. It just hints the scheduler that it can exit - // its loop, update state, and schedule other command buffers. - kYield + kGenericError }; // Return true if the given error code is an actual error. inline bool IsError(Error error) { - switch (error) { - case kNoError: - case kWaiting: - case kYield: - return false; - default: - return true; - } + return error != kNoError; } // Provides finer grained information about why the context was lost. diff --git a/gpu/command_buffer/common/gles2_cmd_format_autogen.h b/gpu/command_buffer/common/gles2_cmd_format_autogen.h index 3b76346..b9d6c06 100644 --- a/gpu/command_buffer/common/gles2_cmd_format_autogen.h +++ b/gpu/command_buffer/common/gles2_cmd_format_autogen.h @@ -8913,74 +8913,6 @@ COMPILE_ASSERT(offsetof(RequestExtensionCHROMIUM, header) == 0, COMPILE_ASSERT(offsetof(RequestExtensionCHROMIUM, bucket_id) == 4, OffsetOf_RequestExtensionCHROMIUM_bucket_id_not_4); -struct SetLatchCHROMIUM { - typedef SetLatchCHROMIUM ValueType; - static const CommandId kCmdId = kSetLatchCHROMIUM; - static const cmd::ArgFlags kArgFlags = cmd::kFixed; - - static uint32 ComputeSize() { - return static_cast<uint32>(sizeof(ValueType)); // NOLINT - } - - void SetHeader() { - header.SetCmd<ValueType>(); - } - - void Init(GLuint _latch_id) { - SetHeader(); - latch_id = _latch_id; - } - - void* Set(void* cmd, GLuint _latch_id) { - static_cast<ValueType*>(cmd)->Init(_latch_id); - return NextCmdAddress<ValueType>(cmd); - } - - gpu::CommandHeader header; - uint32 latch_id; -}; - -COMPILE_ASSERT(sizeof(SetLatchCHROMIUM) == 8, - Sizeof_SetLatchCHROMIUM_is_not_8); -COMPILE_ASSERT(offsetof(SetLatchCHROMIUM, header) == 0, - OffsetOf_SetLatchCHROMIUM_header_not_0); -COMPILE_ASSERT(offsetof(SetLatchCHROMIUM, latch_id) == 4, - OffsetOf_SetLatchCHROMIUM_latch_id_not_4); - -struct WaitLatchCHROMIUM { - typedef WaitLatchCHROMIUM ValueType; - static const CommandId kCmdId = kWaitLatchCHROMIUM; - static const cmd::ArgFlags kArgFlags = cmd::kFixed; - - static uint32 ComputeSize() { - return static_cast<uint32>(sizeof(ValueType)); // NOLINT - } - - void SetHeader() { - header.SetCmd<ValueType>(); - } - - void Init(GLuint _latch_id) { - SetHeader(); - latch_id = _latch_id; - } - - void* Set(void* cmd, GLuint _latch_id) { - static_cast<ValueType*>(cmd)->Init(_latch_id); - return NextCmdAddress<ValueType>(cmd); - } - - gpu::CommandHeader header; - uint32 latch_id; -}; - -COMPILE_ASSERT(sizeof(WaitLatchCHROMIUM) == 8, - Sizeof_WaitLatchCHROMIUM_is_not_8); -COMPILE_ASSERT(offsetof(WaitLatchCHROMIUM, header) == 0, - OffsetOf_WaitLatchCHROMIUM_header_not_0); -COMPILE_ASSERT(offsetof(WaitLatchCHROMIUM, latch_id) == 4, - OffsetOf_WaitLatchCHROMIUM_latch_id_not_4); - struct SetSurfaceCHROMIUM { typedef SetSurfaceCHROMIUM ValueType; static const CommandId kCmdId = kSetSurfaceCHROMIUM; diff --git a/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h b/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h index 40af555..61513f5 100644 --- a/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h +++ b/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h @@ -3510,32 +3510,6 @@ TEST(GLES2FormatTest, RequestExtensionCHROMIUM) { EXPECT_EQ(static_cast<uint32>(11), cmd.bucket_id); } -TEST(GLES2FormatTest, SetLatchCHROMIUM) { - SetLatchCHROMIUM cmd = { { 0 } }; - void* next_cmd = cmd.Set( - &cmd, - static_cast<GLuint>(11)); - EXPECT_EQ(static_cast<uint32>(SetLatchCHROMIUM::kCmdId), - cmd.header.command); - EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u); - EXPECT_EQ(static_cast<char*>(next_cmd), - reinterpret_cast<char*>(&cmd) + sizeof(cmd)); - EXPECT_EQ(static_cast<GLuint>(11), cmd.latch_id); -} - -TEST(GLES2FormatTest, WaitLatchCHROMIUM) { - WaitLatchCHROMIUM cmd = { { 0 } }; - void* next_cmd = cmd.Set( - &cmd, - static_cast<GLuint>(11)); - EXPECT_EQ(static_cast<uint32>(WaitLatchCHROMIUM::kCmdId), - cmd.header.command); - EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u); - EXPECT_EQ(static_cast<char*>(next_cmd), - reinterpret_cast<char*>(&cmd) + sizeof(cmd)); - EXPECT_EQ(static_cast<GLuint>(11), cmd.latch_id); -} - TEST(GLES2FormatTest, SetSurfaceCHROMIUM) { SetSurfaceCHROMIUM cmd = { { 0 } }; void* next_cmd = cmd.Set( diff --git a/gpu/command_buffer/common/gles2_cmd_ids_autogen.h b/gpu/command_buffer/common/gles2_cmd_ids_autogen.h index 25bf081..e164a51 100644 --- a/gpu/command_buffer/common/gles2_cmd_ids_autogen.h +++ b/gpu/command_buffer/common/gles2_cmd_ids_autogen.h @@ -205,8 +205,6 @@ OP(ResizeCHROMIUM) /* 448 */ \ OP(GetRequestableExtensionsCHROMIUM) /* 449 */ \ OP(RequestExtensionCHROMIUM) /* 450 */ \ - OP(SetLatchCHROMIUM) /* 451 */ \ - OP(WaitLatchCHROMIUM) /* 452 */ \ OP(SetSurfaceCHROMIUM) /* 453 */ \ OP(GetMultipleIntegervCHROMIUM) /* 454 */ \ OP(GetProgramInfoCHROMIUM) /* 455 */ \ diff --git a/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h b/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h index 1988472..d83c3c1 100644 --- a/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h +++ b/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h @@ -179,7 +179,7 @@ static GLES2Util::EnumToString enum_to_string_table[] = { { 0x00000400, "GL_STENCIL_BUFFER_BIT", }, { 0x800A, "GL_FUNC_SUBTRACT", }, { 0x8E2C, "GL_DEPTH_COMPONENT16_NONLINEAR_NV", }, - { 0x889F, "GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING", }, + { 0x8508, "GL_DECR_WRAP", }, { 0x8006, "GL_FUNC_ADD", }, { 0x8007, "GL_MIN_EXT", }, { 0x8004, "GL_ONE_MINUS_CONSTANT_ALPHA", }, @@ -401,7 +401,7 @@ static GLES2Util::EnumToString enum_to_string_table[] = { { 0x80CA, "GL_BLEND_DST_ALPHA", }, { 0x8CD6, "GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT", }, { 0x8872, "GL_MAX_TEXTURE_IMAGE_UNITS", }, - { 0x8508, "GL_DECR_WRAP", }, + { 0x889F, "GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING", }, { 0x8507, "GL_INCR_WRAP", }, { 0x8895, "GL_ELEMENT_ARRAY_BUFFER_BINDING", }, { 0x8894, "GL_ARRAY_BUFFER_BINDING", }, diff --git a/gpu/command_buffer/service/cmd_parser.cc b/gpu/command_buffer/service/cmd_parser.cc index 9ed3fca..fba06e6 100644 --- a/gpu/command_buffer/service/cmd_parser.cc +++ b/gpu/command_buffer/service/cmd_parser.cc @@ -64,7 +64,7 @@ error::Error CommandParser::ProcessCommand() { } // If get was not set somewhere else advance it. - if (result != error::kWaiting && get == get_) + if (get == get_) get_ = (get + header.size) % entry_count_; return result; } diff --git a/gpu/command_buffer/service/cmd_parser_test.cc b/gpu/command_buffer/service/cmd_parser_test.cc index 315a475..857ca8e 100644 --- a/gpu/command_buffer/service/cmd_parser_test.cc +++ b/gpu/command_buffer/service/cmd_parser_test.cc @@ -288,28 +288,4 @@ TEST_F(CommandParserTest, TestError) { Mock::VerifyAndClearExpectations(api_mock()); } -TEST_F(CommandParserTest, TestWaiting) { - const unsigned int kNumEntries = 5; - scoped_ptr<CommandParser> parser(MakeParser(kNumEntries)); - CommandBufferOffset put = parser->put(); - CommandHeader header; - - // Generate a command with size 1. - header.size = 1; - header.command = 3; - buffer()[put++].value_header = header; - - parser->set_put(put); - // A command that returns kWaiting should not advance the get pointer. - AddDoCommandExpect(error::kWaiting, 3, 0, NULL); - EXPECT_EQ(error::kWaiting, parser->ProcessAllCommands()); - EXPECT_EQ(0, parser->get()); - Mock::VerifyAndClearExpectations(api_mock()); - // Not waiting should advance the get pointer. - AddDoCommandExpect(error::kNoError, 3, 0, NULL); - EXPECT_EQ(error::kNoError, parser->ProcessAllCommands()); - EXPECT_EQ(put, parser->get()); - Mock::VerifyAndClearExpectations(api_mock()); -} - } // namespace gpu diff --git a/gpu/command_buffer/service/command_buffer_service.cc b/gpu/command_buffer/service/command_buffer_service.cc index 064341d..26ccbee 100644 --- a/gpu/command_buffer/service/command_buffer_service.cc +++ b/gpu/command_buffer/service/command_buffer_service.cc @@ -104,6 +104,10 @@ CommandBufferService::State CommandBufferService::GetState() { return state; } +CommandBufferService::State CommandBufferService::GetLastState() { + return GetState(); +} + CommandBufferService::State CommandBufferService::FlushSync( int32 put_offset, int32 last_known_get) { if (put_offset < 0 || put_offset > num_entries_) { @@ -114,7 +118,7 @@ CommandBufferService::State CommandBufferService::FlushSync( put_offset_ = put_offset; if (put_offset_change_callback_.get()) { - put_offset_change_callback_->Run(last_known_get == get_offset_); + put_offset_change_callback_->Run(); } return GetState(); @@ -129,7 +133,7 @@ void CommandBufferService::Flush(int32 put_offset) { put_offset_ = put_offset; if (put_offset_change_callback_.get()) { - put_offset_change_callback_->Run(false); + put_offset_change_callback_->Run(); } } @@ -261,7 +265,7 @@ void CommandBufferService::SetContextLostReason( } void CommandBufferService::SetPutOffsetChangeCallback( - Callback1<bool>::Type* callback) { + Callback0::Type* callback) { put_offset_change_callback_.reset(callback); } diff --git a/gpu/command_buffer/service/command_buffer_service.h b/gpu/command_buffer/service/command_buffer_service.h index 9c52531..c388e9f 100644 --- a/gpu/command_buffer/service/command_buffer_service.h +++ b/gpu/command_buffer/service/command_buffer_service.h @@ -29,6 +29,7 @@ class CommandBufferService : public CommandBuffer { virtual bool Initialize(base::SharedMemory* buffer, int32 size); virtual Buffer GetRingBuffer(); virtual State GetState(); + virtual State GetLastState(); virtual void Flush(int32 put_offset); virtual State FlushSync(int32 put_offset, int32 last_known_get); virtual void SetGetOffset(int32 get_offset); @@ -50,7 +51,7 @@ class CommandBufferService : public CommandBuffer { // writer a means of waiting for the reader to make some progress before // attempting to write more to the command buffer. Takes ownership of // callback. - virtual void SetPutOffsetChangeCallback(Callback1<bool>::Type* callback); + virtual void SetPutOffsetChangeCallback(Callback0::Type* callback); virtual void SetParseErrorCallback(Callback0::Type* callback); private: @@ -58,7 +59,7 @@ class CommandBufferService : public CommandBuffer { int32 num_entries_; int32 get_offset_; int32 put_offset_; - scoped_ptr<Callback1<bool>::Type> put_offset_change_callback_; + scoped_ptr<Callback0::Type> put_offset_change_callback_; scoped_ptr<Callback0::Type> parse_error_callback_; std::vector<Buffer> registered_objects_; std::set<int32> unused_registered_object_elements_; diff --git a/gpu/command_buffer/service/common_decoder.cc b/gpu/command_buffer/service/common_decoder.cc index 35eaf66..7b28603 100644 --- a/gpu/command_buffer/service/common_decoder.cc +++ b/gpu/command_buffer/service/common_decoder.cc @@ -330,10 +330,4 @@ error::Error CommonDecoder::HandleGetBucketData( return error::kNoError; } -error::Error CommonDecoder::HandleYieldScheduler( - uint32 immediate_data_size, - const cmd::YieldScheduler& args) { - return error::kYield; -} - } // namespace gpu diff --git a/gpu/command_buffer/service/common_decoder_unittest.cc b/gpu/command_buffer/service/common_decoder_unittest.cc index 9b53a56..8f88398 100644 --- a/gpu/command_buffer/service/common_decoder_unittest.cc +++ b/gpu/command_buffer/service/common_decoder_unittest.cc @@ -556,11 +556,5 @@ TEST_F(CommonDecoderTest, GetBucketData) { EXPECT_NE(error::kNoError, ExecuteCmd(cmd)); } -TEST_F(CommonDecoderTest, YieldScheduler) { - cmd::YieldScheduler cmd; - cmd.Init(); - EXPECT_EQ(error::kYield, ExecuteCmd(cmd)); -} - } // namespace gpu diff --git a/gpu/command_buffer/service/gles2_cmd_decoder.cc b/gpu/command_buffer/service/gles2_cmd_decoder.cc index e989af6..d5bd745 100644 --- a/gpu/command_buffer/service/gles2_cmd_decoder.cc +++ b/gpu/command_buffer/service/gles2_cmd_decoder.cc @@ -478,7 +478,6 @@ class GLES2DecoderImpl : public base::SupportsWeakPtr<GLES2DecoderImpl>, virtual void SetResizeCallback(Callback1<gfx::Size>::Type* callback); virtual void SetSwapBuffersCallback(Callback0::Type* callback); - virtual void SetLatchCallback(const base::Callback<void(bool)>& callback);; virtual bool GetServiceTextureId(uint32 client_texture_id, uint32* service_texture_id); @@ -1271,7 +1270,6 @@ class GLES2DecoderImpl : public base::SupportsWeakPtr<GLES2DecoderImpl>, scoped_ptr<Callback1<gfx::Size>::Type> resize_callback_; scoped_ptr<Callback0::Type> swap_buffers_callback_; - base::Callback<void(bool)> latch_callback_; // The format of the back buffer_ GLenum back_buffer_color_format_; @@ -2356,11 +2354,6 @@ void GLES2DecoderImpl::SetSwapBuffersCallback(Callback0::Type* callback) { swap_buffers_callback_.reset(callback); } -void GLES2DecoderImpl::SetLatchCallback( - const base::Callback<void(bool)>& callback) { - latch_callback_ = callback; -} - bool GLES2DecoderImpl::GetServiceTextureId(uint32 client_texture_id, uint32* service_texture_id) { TextureManager::TextureInfo* texture = @@ -6527,62 +6520,6 @@ error::Error GLES2DecoderImpl::HandleSwapBuffers( return error::kNoError; } -error::Error GLES2DecoderImpl::HandleSetLatchCHROMIUM( - uint32 immediate_data_size, const gles2::SetLatchCHROMIUM& c) { - TRACE_EVENT1("gpu", "SetLatch", "latch_id", c.latch_id); - // Ensure the side effects of previous commands are visible to other contexts. - // There is no need to do this for ANGLE because it uses a - // single D3D device for all contexts. - if (!IsAngle()) - glFlush(); - - int32 shm_id = gpu::kLatchSharedMemoryId; - uint32 latch_id = c.latch_id; - uint32 shm_offset = 0; - base::subtle::Atomic32* latch; - if (!SafeMultiplyUint32(latch_id, sizeof(*latch), &shm_offset)) { - return error::kOutOfBounds; - } - latch = GetSharedMemoryAs<base::subtle::Atomic32*>( - shm_id, shm_offset, sizeof(*latch)); - if (!latch) { - return error::kOutOfBounds; - } - base::subtle::Atomic32 old = - base::subtle::NoBarrier_CompareAndSwap(latch, 0, 1); - DCHECK(old == 0); - if (!latch_callback_.is_null()) - latch_callback_.Run(true); - return error::kNoError; -} - -error::Error GLES2DecoderImpl::HandleWaitLatchCHROMIUM( - uint32 immediate_data_size, const gles2::WaitLatchCHROMIUM& c) { - TRACE_EVENT1("gpu", "WaitLatch", "latch_id", c.latch_id); - int32 shm_id = gpu::kLatchSharedMemoryId; - uint32 latch_id = c.latch_id; - uint32 shm_offset = 0; - base::subtle::Atomic32* latch; - if (!SafeMultiplyUint32(latch_id, sizeof(*latch), &shm_offset)) { - return error::kOutOfBounds; - } - latch = GetSharedMemoryAs<base::subtle::Atomic32*>( - shm_id, shm_offset, sizeof(*latch)); - if (!latch) { - return error::kOutOfBounds; - } - - base::subtle::Atomic32 old = - base::subtle::NoBarrier_CompareAndSwap(latch, 1, 0); - if (old == 0) { - if (!latch_callback_.is_null()) - latch_callback_.Run(false); - return error::kWaiting; - } else { - return error::kNoError; - } -} - error::Error GLES2DecoderImpl::HandleCommandBufferEnableCHROMIUM( uint32 immediate_data_size, const gles2::CommandBufferEnableCHROMIUM& c) { Bucket* bucket = GetBucket(c.bucket_id); diff --git a/gpu/command_buffer/service/gles2_cmd_decoder.h b/gpu/command_buffer/service/gles2_cmd_decoder.h index abd2b85..23c5e3a 100644 --- a/gpu/command_buffer/service/gles2_cmd_decoder.h +++ b/gpu/command_buffer/service/gles2_cmd_decoder.h @@ -110,11 +110,6 @@ class GLES2Decoder : public CommonDecoder { // Sets a callback which is called when a SwapBuffers command is processed. virtual void SetSwapBuffersCallback(Callback0::Type* callback) = 0; - // Sets a callback which is called after a Set/WaitLatch command is processed. - // The bool parameter will be true for SetLatch, and false for a WaitLatch - // that is blocked. An unblocked WaitLatch will not trigger a callback. - virtual void SetLatchCallback(const base::Callback<void(bool)>& callback) = 0; - // Get the service texture ID corresponding to a client texture ID. // If no such record is found then return false. virtual bool GetServiceTextureId(uint32 client_texture_id, diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc b/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc index eadfbcd..b5997f5 100644 --- a/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc +++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest.cc @@ -2893,65 +2893,6 @@ TEST_F(GLES2DecoderWithShaderTest, VertexAttribPointer) { } } -TEST_F(GLES2DecoderTest, SetLatch) { - bool isAngle = false; -#if defined(OS_WIN) - isAngle = (gfx::GetGLImplementation() == gfx::kGLImplementationEGLGLES2); -#endif - if (!isAngle) { - EXPECT_CALL(*gl_, Flush()).Times(3); - } - const uint32 kLatchId = 1; - base::subtle::Atomic32* latches = static_cast<base::subtle::Atomic32*>( - shared_memory_base_); - const uint32 kInvalidLatchId = kSharedBufferSize / sizeof(*latches); - const uint32 kLastValidLatchId = kInvalidLatchId - 1; - latches[kLatchId] = 0; - latches[kLastValidLatchId] = 0; - SetLatchCHROMIUM cmd; - // Check out of range latch id. - cmd.Init(kInvalidLatchId); - EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd)); - cmd.Init(kLatchId); - // Check valid latch. - EXPECT_EQ(0, latches[kLatchId]); - EXPECT_EQ(error::kNoError, ExecuteCmd(cmd)); - EXPECT_EQ(1, latches[kLatchId]); - // Check last valid latch. - EXPECT_EQ(0, latches[kLastValidLatchId]); - cmd.Init(kLastValidLatchId); - EXPECT_EQ(error::kNoError, ExecuteCmd(cmd)); - EXPECT_EQ(1, latches[kLastValidLatchId]); -} - -TEST_F(GLES2DecoderTest, WaitLatch) { - const uint32 kLatchId = 1; - base::subtle::Atomic32* latches = static_cast<base::subtle::Atomic32*>( - shared_memory_base_); - const uint32 kInvalidLatchId = kSharedBufferSize / sizeof(*latches); - const uint32 kLastValidLatchId = kInvalidLatchId - 1; - latches[kLatchId] = 0; - latches[kLastValidLatchId] = 0; - WaitLatchCHROMIUM cmd; - // Check out of range latch id. - cmd.Init(kInvalidLatchId); - EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd)); - // Check valid latch. - cmd.Init(kLatchId); - EXPECT_EQ(0, latches[kLatchId]); - EXPECT_EQ(error::kWaiting, ExecuteCmd(cmd)); - latches[kLatchId] = 1; - EXPECT_EQ(error::kNoError, ExecuteCmd(cmd)); - EXPECT_EQ(0, latches[kLatchId]); - // Check last valid latch. - cmd.Init(kLastValidLatchId); - EXPECT_EQ(0, latches[kLastValidLatchId]); - EXPECT_EQ(error::kWaiting, ExecuteCmd(cmd)); - latches[kLastValidLatchId] = 1; - EXPECT_EQ(error::kNoError, ExecuteCmd(cmd)); - EXPECT_EQ(0, latches[kLastValidLatchId]); -} - TEST_F(GLES2DecoderTest, SetSurfaceCHROMIUMChangesSurfaceForExistentSurface) { const int kSurfaceId = 1; scoped_refptr<gfx::GLSurfaceStub> surface(new gfx::GLSurfaceStub); diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h index c5f5594..05f80a3 100644 --- a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h +++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h @@ -1712,7 +1712,6 @@ TEST_F(GLES2DecoderTest2, ViewportInvalidArgs3_0) { // TODO(gman): RequestExtensionCHROMIUM -// TODO(gman): SetLatchCHROMIUM - +// TODO(gman): SetSurfaceCHROMIUM #endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_2_AUTOGEN_H_ diff --git a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h index 54bfdf6..cab6b33 100644 --- a/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h +++ b/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h @@ -10,9 +10,6 @@ #ifndef GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_3_AUTOGEN_H_ #define GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_3_AUTOGEN_H_ -// TODO(gman): WaitLatchCHROMIUM - -// TODO(gman): SetSurfaceCHROMIUM // TODO(gman): GetMultipleIntegervCHROMIUM // TODO(gman): GetProgramInfoCHROMIUM diff --git a/gpu/command_buffer/service/gpu_scheduler.cc b/gpu/command_buffer/service/gpu_scheduler.cc index 9365118..fbdb16b 100644 --- a/gpu/command_buffer/service/gpu_scheduler.cc +++ b/gpu/command_buffer/service/gpu_scheduler.cc @@ -19,41 +19,37 @@ using ::base::SharedMemory; namespace gpu { -GpuScheduler::GpuScheduler(CommandBuffer* command_buffer, - SurfaceManager* surface_manager, - gles2::ContextGroup* group) - : command_buffer_(command_buffer), - commands_per_update_(100), - unscheduled_count_(0), -#if defined(OS_MACOSX) || defined(TOUCH_UI) - swap_buffers_count_(0), - acknowledged_swap_buffers_count_(0), -#endif - method_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) { +GpuScheduler* GpuScheduler::Create(CommandBuffer* command_buffer, + SurfaceManager* surface_manager, + gles2::ContextGroup* group) { DCHECK(command_buffer); - decoder_.reset(gles2::GLES2Decoder::Create(surface_manager, group)); - decoder_->set_engine(this); + + gles2::GLES2Decoder* decoder = + gles2::GLES2Decoder::Create(surface_manager, group); + + GpuScheduler* scheduler = new GpuScheduler(command_buffer, + decoder, + NULL); + + decoder->set_engine(scheduler); + if (CommandLine::ForCurrentProcess()->HasSwitch( switches::kEnableGPUServiceLogging)) { - decoder_->set_debug(true); + decoder->set_debug(true); } + + return scheduler; } -GpuScheduler::GpuScheduler(CommandBuffer* command_buffer, - gles2::GLES2Decoder* decoder, - CommandParser* parser, - int commands_per_update) - : command_buffer_(command_buffer), - commands_per_update_(commands_per_update), - unscheduled_count_(0), -#if defined(OS_MACOSX) || defined(TOUCH_UI) - swap_buffers_count_(0), - acknowledged_swap_buffers_count_(0), -#endif - method_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) { +GpuScheduler* GpuScheduler::CreateForTests(CommandBuffer* command_buffer, + gles2::GLES2Decoder* decoder, + CommandParser* parser) { DCHECK(command_buffer); - decoder_.reset(decoder); - parser_.reset(parser); + GpuScheduler* scheduler = new GpuScheduler(command_buffer, + decoder, + parser); + + return scheduler; } GpuScheduler::~GpuScheduler() { @@ -82,11 +78,6 @@ bool GpuScheduler::InitializeCommon( } #endif - // Do not limit to a certain number of commands before scheduling another - // update when rendering onscreen. - if (!surface->IsOffscreen()) - commands_per_update_ = INT_MAX; - // Map the ring buffer and create the parser. Buffer ring_buffer = command_buffer_->GetRingBuffer(); if (ring_buffer.ptr) { @@ -144,29 +135,16 @@ const unsigned int kMaxOutstandingSwapBuffersCallsPerOnscreenContext = 1; } #endif -void GpuScheduler::PutChanged(bool sync) { +void GpuScheduler::PutChanged() { TRACE_EVENT1("gpu", "GpuScheduler:PutChanged", "this", this); - CommandBuffer::State state = command_buffer_->GetState(); - parser_->set_put(state.put_offset); - if (sync) - ProcessCommands(); - else - ScheduleProcessCommands(); -} + DCHECK(IsScheduled()); -void GpuScheduler::ProcessCommands() { - TRACE_EVENT1("gpu", "GpuScheduler:ProcessCommands", "this", this); CommandBuffer::State state = command_buffer_->GetState(); + parser_->set_put(state.put_offset); if (state.error != error::kNoError) return; - if (unscheduled_count_ > 0) { - TRACE_EVENT1("gpu", "EarlyOut_Unscheduled", - "unscheduled_count_", unscheduled_count_); - return; - } - if (decoder_.get()) { if (!decoder_->MakeCurrent()) { LOG(ERROR) << "Context lost because MakeCurrent failed."; @@ -184,60 +162,30 @@ void GpuScheduler::ProcessCommands() { #if defined(OS_MACOSX) || defined(TOUCH_UI) // Don't swamp the browser process with SwapBuffers calls it can't handle. - if (do_rate_limiting && - swap_buffers_count_ - acknowledged_swap_buffers_count_ >= - kMaxOutstandingSwapBuffersCallsPerOnscreenContext) { - TRACE_EVENT0("gpu", "EarlyOut_OSX_Throttle"); - // Stop doing work on this command buffer. In the GPU process, - // receipt of the GpuMsg_AcceleratedSurfaceBuffersSwappedACK - // message causes ProcessCommands to be scheduled again. - return; - } + DCHECK(!do_rate_limiting || + swap_buffers_count_ - acknowledged_swap_buffers_count_ == 0); #endif - base::TimeTicks start_time = base::TimeTicks::Now(); - base::TimeDelta elapsed; - bool is_break = false; error::Error error = error::kNoError; - do { - int commands_processed = 0; - while (commands_processed < commands_per_update_ && - !parser_->IsEmpty()) { - error = parser_->ProcessCommand(); - - // TODO(piman): various classes duplicate various pieces of state, leading - // to needlessly complex update logic. It should be possible to simply - // share the state across all of them. - command_buffer_->SetGetOffset(static_cast<int32>(parser_->get())); - - if (error == error::kWaiting || error == error::kYield) { - is_break = true; - break; - } else if (error::IsError(error)) { - command_buffer_->SetContextLostReason(decoder_->GetContextLostReason()); - command_buffer_->SetParseError(error); - return; - } - - if (unscheduled_count_ > 0) { - is_break = true; - break; - } - - ++commands_processed; - if (command_processed_callback_.get()) { - command_processed_callback_->Run(); - } + while (!parser_->IsEmpty()) { + error = parser_->ProcessCommand(); + + // TODO(piman): various classes duplicate various pieces of state, leading + // to needlessly complex update logic. It should be possible to simply + // share the state across all of them. + command_buffer_->SetGetOffset(static_cast<int32>(parser_->get())); + + if (error::IsError(error)) { + command_buffer_->SetContextLostReason(decoder_->GetContextLostReason()); + command_buffer_->SetParseError(error); + return; } - elapsed = base::TimeTicks::Now() - start_time; - } while(!is_break && - !parser_->IsEmpty() && - elapsed.InMicroseconds() < kMinimumSchedulerQuantumMicros); - - if (unscheduled_count_ == 0 && - error != error::kWaiting && - !parser_->IsEmpty()) { - ScheduleProcessCommands(); + + if (command_processed_callback_.get()) + command_processed_callback_->Run(); + + if (unscheduled_count_ > 0) + return; } } @@ -249,12 +197,8 @@ void GpuScheduler::SetScheduled(bool scheduled) { --unscheduled_count_; DCHECK_GE(unscheduled_count_, 0); - if (unscheduled_count_ == 0) { - if (scheduled_callback_.get()) - scheduled_callback_->Run(); - - ScheduleProcessCommands(); - } + if (unscheduled_count_ == 0 && scheduled_callback_.get()) + scheduled_callback_->Run(); } else { ++unscheduled_count_; } @@ -320,10 +264,18 @@ void GpuScheduler::SetTokenCallback( set_token_callback_ = callback; } -void GpuScheduler::ScheduleProcessCommands() { - MessageLoop::current()->PostTask( - FROM_HERE, - method_factory_.NewRunnableMethod(&GpuScheduler::ProcessCommands)); +GpuScheduler::GpuScheduler(CommandBuffer* command_buffer, + gles2::GLES2Decoder* decoder, + CommandParser* parser) + : command_buffer_(command_buffer), + decoder_(decoder), + parser_(parser), + unscheduled_count_(0), +#if defined(OS_MACOSX) || defined(TOUCH_UI) + swap_buffers_count_(0), + acknowledged_swap_buffers_count_(0), +#endif + method_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) { } void GpuScheduler::WillResize(gfx::Size size) { diff --git a/gpu/command_buffer/service/gpu_scheduler.h b/gpu/command_buffer/service/gpu_scheduler.h index d34e67f..4ebbab9 100644 --- a/gpu/command_buffer/service/gpu_scheduler.h +++ b/gpu/command_buffer/service/gpu_scheduler.h @@ -43,20 +43,15 @@ class ContextGroup; // posts tasks to the current message loop to do additional work. class GpuScheduler : public CommandBufferEngine { public: - // Scheduler quantum: makes ProcessCommands continue until the specified time - // has passed, or the command buffer yields or runs out of commands. - static const int kMinimumSchedulerQuantumMicros = 2000; - // If a group is not passed in one will be created. - GpuScheduler(CommandBuffer* command_buffer, - SurfaceManager* surface_manager, - gles2::ContextGroup* group); + static GpuScheduler* Create(CommandBuffer* command_buffer, + SurfaceManager* surface_manager, + gles2::ContextGroup* group); // This constructor is for unit tests. - GpuScheduler(CommandBuffer* command_buffer, - gles2::GLES2Decoder* decoder, - CommandParser* parser, - int commands_per_update); + static GpuScheduler* CreateForTests(CommandBuffer* command_buffer, + gles2::GLES2Decoder* decoder, + CommandParser* parser); virtual ~GpuScheduler(); @@ -74,7 +69,7 @@ class GpuScheduler : public CommandBufferEngine { bool SetParent(GpuScheduler* parent_scheduler, uint32 parent_texture_id); - void PutChanged(bool sync); + void PutChanged(); // Sets whether commands should be processed by this scheduler. Setting to // false unschedules. Setting to true reschedules. Whether or not the @@ -152,13 +147,6 @@ class GpuScheduler : public CommandBufferEngine { void SetCommandProcessedCallback(Callback0::Type* callback); - // Sets a callback which is called after a Set/WaitLatch command is processed. - // The bool parameter will be true for SetLatch, and false for a WaitLatch - // that is blocked. An unblocked WaitLatch will not trigger a callback. - void SetLatchCallback(const base::Callback<void(bool)>& callback) { - decoder_->SetLatchCallback(callback); - } - // Sets a callback which is called when set_token() is called, and passes the // just-set token to the callback. DCHECKs that no callback has previously // been registered for this notification. @@ -179,8 +167,10 @@ class GpuScheduler : public CommandBufferEngine { private: - // Helper which causes a call to ProcessCommands to be scheduled later. - void ScheduleProcessCommands(); + // If a group is not passed in one will be created. + GpuScheduler(CommandBuffer* command_buffer, + gles2::GLES2Decoder* decoder, + CommandParser* parser); // Called via a callback just before we are supposed to call the // user's resize callback. @@ -189,15 +179,12 @@ class GpuScheduler : public CommandBufferEngine { // Called via a callback just before we are supposed to call the // user's swap buffers callback. void WillSwapBuffers(); - void ProcessCommands(); // The GpuScheduler holds a weak reference to the CommandBuffer. The // CommandBuffer owns the GpuScheduler and holds a strong reference to it // through the ProcessCommands callback. CommandBuffer* command_buffer_; - int commands_per_update_; - scoped_ptr<gles2::GLES2Decoder> decoder_; scoped_ptr<CommandParser> parser_; diff --git a/gpu/command_buffer/service/gpu_scheduler_unittest.cc b/gpu/command_buffer/service/gpu_scheduler_unittest.cc index 3d21f90..4fb54b4 100644 --- a/gpu/command_buffer/service/gpu_scheduler_unittest.cc +++ b/gpu/command_buffer/service/gpu_scheduler_unittest.cc @@ -44,7 +44,7 @@ class GpuSchedulerTest : public testing::Test { ON_CALL(*command_buffer_.get(), GetState()) .WillByDefault(Return(default_state)); - async_api_.reset(new StrictMock<SpecializedDoCommandAsyncAPIMock>); + async_api_.reset(new StrictMock<AsyncAPIMock>); decoder_ = new gles2::MockGLES2Decoder(); @@ -55,10 +55,9 @@ class GpuSchedulerTest : public testing::Test { 0, async_api_.get()); - scheduler_.reset(new GpuScheduler(command_buffer_.get(), - decoder_, - parser_, - 2)); + scheduler_.reset(gpu::GpuScheduler::CreateForTests(command_buffer_.get(), + decoder_, + parser_)); EXPECT_CALL(*decoder_, Destroy()) .Times(1) @@ -97,7 +96,7 @@ TEST_F(GpuSchedulerTest, SchedulerDoesNothingIfRingBufferIsEmpty) { EXPECT_CALL(*command_buffer_, SetParseError(_)) .Times(0); - scheduler_->PutChanged(true); + scheduler_->PutChanged(); } TEST_F(GpuSchedulerTest, ProcessesOneCommand) { @@ -119,7 +118,7 @@ TEST_F(GpuSchedulerTest, ProcessesOneCommand) { EXPECT_CALL(*command_buffer_, SetParseError(_)) .Times(0); - scheduler_->PutChanged(true); + scheduler_->PutChanged(); } TEST_F(GpuSchedulerTest, ProcessesTwoCommands) { @@ -144,7 +143,7 @@ TEST_F(GpuSchedulerTest, ProcessesTwoCommands) { .WillOnce(Return(error::kNoError)); EXPECT_CALL(*command_buffer_, SetGetOffset(3)); - scheduler_->PutChanged(true); + scheduler_->PutChanged(); } TEST_F(GpuSchedulerTest, SchedulerSetsTheGLContext) { @@ -157,48 +156,7 @@ TEST_F(GpuSchedulerTest, SchedulerSetsTheGLContext) { EXPECT_CALL(*command_buffer_, GetState()) .WillRepeatedly(Return(state)); - scheduler_->PutChanged(true); -} - -TEST_F(GpuSchedulerTest, PostsTaskToFinishRemainingCommands) { - unsigned int pauseCmd = SpecializedDoCommandAsyncAPIMock::kTestQuantumCommand; - CommandHeader* header = reinterpret_cast<CommandHeader*>(&buffer_[0]); - header[0].command = 7; - header[0].size = 2; - buffer_[1] = 123; - header[2].command = pauseCmd; - header[2].size = 1; - header[3].command = 9; - header[3].size = 1; - - CommandBuffer::State state; - - state.put_offset = 4; - EXPECT_CALL(*command_buffer_, GetState()) - .WillRepeatedly(Return(state)); - - EXPECT_CALL(*async_api_, DoCommand(7, 1, &buffer_[0])) - .WillOnce(Return(error::kNoError)); - EXPECT_CALL(*command_buffer_, SetGetOffset(2)); - - EXPECT_CALL(*async_api_, DoCommand(pauseCmd, 0, &buffer_[2])) - .WillOnce(Return(error::kNoError)); - EXPECT_CALL(*command_buffer_, SetGetOffset(3)); - - scheduler_->PutChanged(true); - - // ProcessCommands is called a second time when the pending task is run. - - state.put_offset = 4; - EXPECT_CALL(*command_buffer_, GetState()) - .WillRepeatedly(Return(state)); - - EXPECT_CALL(*async_api_, DoCommand(9, 0, &buffer_[3])) - .WillOnce(Return(error::kNoError)); - - EXPECT_CALL(*command_buffer_, SetGetOffset(4)); - - MessageLoop::current()->RunAllPending(); + scheduler_->PutChanged(); } TEST_F(GpuSchedulerTest, SetsErrorCodeOnCommandBuffer) { @@ -222,7 +180,7 @@ TEST_F(GpuSchedulerTest, SetsErrorCodeOnCommandBuffer) { EXPECT_CALL(*command_buffer_, SetParseError(error::kUnknownCommand)); - scheduler_->PutChanged(true); + scheduler_->PutChanged(); } TEST_F(GpuSchedulerTest, ProcessCommandsDoesNothingAfterError) { @@ -232,7 +190,7 @@ TEST_F(GpuSchedulerTest, ProcessCommandsDoesNothingAfterError) { EXPECT_CALL(*command_buffer_, GetState()) .WillRepeatedly(Return(state)); - scheduler_->PutChanged(true); + scheduler_->PutChanged(); } TEST_F(GpuSchedulerTest, CanGetAddressOfSharedMemory) { diff --git a/gpu/command_buffer/service/mocks.cc b/gpu/command_buffer/service/mocks.cc index 70898b3..46a8977 100644 --- a/gpu/command_buffer/service/mocks.cc +++ b/gpu/command_buffer/service/mocks.cc @@ -27,25 +27,6 @@ void AsyncAPIMock::SetToken(unsigned int command, engine_->set_token(args->token); } -SpecializedDoCommandAsyncAPIMock::SpecializedDoCommandAsyncAPIMock() {} - -SpecializedDoCommandAsyncAPIMock::~SpecializedDoCommandAsyncAPIMock() {} - -error::Error SpecializedDoCommandAsyncAPIMock::DoCommand( - unsigned int command, - unsigned int arg_count, - const void* cmd_data) { - if (command == kTestQuantumCommand) { - // Surpass the GpuScheduler scheduling quantum. - base::TimeTicks start_time = base::TimeTicks::Now(); - while ((base::TimeTicks::Now() - start_time).InMicroseconds() < - GpuScheduler::kMinimumSchedulerQuantumMicros) { - base::PlatformThread::Sleep(1); - } - } - return AsyncAPIMock::DoCommand(command, arg_count, cmd_data); -} - namespace gles2 { MockShaderTranslator::MockShaderTranslator() {} diff --git a/gpu/command_buffer/service/mocks.h b/gpu/command_buffer/service/mocks.h index f526c01..0d341bd 100644 --- a/gpu/command_buffer/service/mocks.h +++ b/gpu/command_buffer/service/mocks.h @@ -69,20 +69,6 @@ class AsyncAPIMock : public AsyncAPIInterface { CommandBufferEngine *engine_; }; -// Allows specialized behavior per command in DoCommand. -class SpecializedDoCommandAsyncAPIMock : public AsyncAPIMock { - public: - // Cause DoCommand to sleep more than the GpuScheduler time quantum. - static const unsigned int kTestQuantumCommand = 333; - - SpecializedDoCommandAsyncAPIMock(); - virtual ~SpecializedDoCommandAsyncAPIMock(); - - virtual error::Error DoCommand(unsigned int command, - unsigned int arg_count, - const void* cmd_data); -}; - namespace gles2 { class MockShaderTranslator : public ShaderTranslatorInterface { diff --git a/gpu/demos/framework/window.cc b/gpu/demos/framework/window.cc index f609a55..87da9a0 100644 --- a/gpu/demos/framework/window.cc +++ b/gpu/demos/framework/window.cc @@ -60,7 +60,9 @@ bool Window::CreateRenderContext(gfx::PluginWindowHandle hwnd) { } GpuScheduler* gpu_scheduler( - new GpuScheduler(command_buffer.get(), NULL, NULL)); + GpuScheduler::Create(command_buffer.get(), + NULL, + NULL)); if (!gpu_scheduler->Initialize(hwnd, gfx::Size(), false, gpu::gles2::DisallowedExtensions(), NULL, std::vector<int32>(), diff --git a/gpu/gles2_conform_support/egl/display.cc b/gpu/gles2_conform_support/egl/display.cc index 63604b9..6485e33 100644 --- a/gpu/gles2_conform_support/egl/display.cc +++ b/gpu/gles2_conform_support/egl/display.cc @@ -109,7 +109,9 @@ EGLSurface Display::CreateWindowSurface(EGLConfig config, using gpu::GpuScheduler; std::vector<int32> attribs; scoped_ptr<GpuScheduler> gpu_scheduler( - new GpuScheduler(command_buffer_.get(), NULL, NULL)); + GpuScheduler::Create(command_buffer_.get(), + NULL, + NULL)); if (!gpu_scheduler->Initialize( win, gfx::Size(), false, gpu::gles2::DisallowedExtensions(), NULL, attribs, NULL)) diff --git a/ppapi/proxy/ppb_context_3d_proxy.cc b/ppapi/proxy/ppb_context_3d_proxy.cc index cc1c75a..9bf9098 100644 --- a/ppapi/proxy/ppb_context_3d_proxy.cc +++ b/ppapi/proxy/ppb_context_3d_proxy.cc @@ -87,6 +87,7 @@ class PepperCommandBuffer : public gpu::CommandBuffer { virtual bool Initialize(base::SharedMemory* buffer, int32 size); virtual gpu::Buffer GetRingBuffer(); virtual State GetState(); + virtual State GetLastState(); virtual void Flush(int32 put_offset); virtual State FlushSync(int32 put_offset, int32 last_known_get); virtual void SetGetOffset(int32 get_offset); @@ -186,6 +187,10 @@ gpu::CommandBuffer::State PepperCommandBuffer::GetState() { return last_state_; } +gpu::CommandBuffer::State PepperCommandBuffer::GetLastState() { + return last_state_; +} + void PepperCommandBuffer::Flush(int32 put_offset) { if (last_state_.error != gpu::error::kNoError) return; diff --git a/webkit/gpu/webgraphicscontext3d_in_process_command_buffer_impl.cc b/webkit/gpu/webgraphicscontext3d_in_process_command_buffer_impl.cc index a45e78c..296a531 100644 --- a/webkit/gpu/webgraphicscontext3d_in_process_command_buffer_impl.cc +++ b/webkit/gpu/webgraphicscontext3d_in_process_command_buffer_impl.cc @@ -78,7 +78,7 @@ class GLInProcessContext : public base::SupportsWeakPtr<GLInProcessContext> { ~GLInProcessContext(); - void PumpCommands(bool sync); + void PumpCommands(); // Create a GLInProcessContext that renders directly to a view. The view and // the associated window must not be destroyed until the returned @@ -384,10 +384,10 @@ void GLInProcessContext::ResizeOffscreen(const gfx::Size& size) { } } -void GLInProcessContext::PumpCommands(bool /* sync */) { +void GLInProcessContext::PumpCommands() { ::gpu::CommandBuffer::State state; do { - gpu_scheduler_->PutChanged(true); + gpu_scheduler_->PutChanged(); MessageLoop::current()->RunAllPending(); state = command_buffer_->GetState(); } while (state.get_offset != state.put_offset); @@ -577,7 +577,9 @@ bool GLInProcessContext::Initialize(bool onscreen, if (!command_buffer_->Initialize(kCommandBufferSize)) return false; - gpu_scheduler_ = new GpuScheduler(command_buffer_.get(), NULL, NULL); + gpu_scheduler_ = GpuScheduler::Create(command_buffer_.get(), + NULL, + NULL); if (onscreen) { if (render_surface == gfx::kNullPluginWindow) { @@ -1073,18 +1075,11 @@ void WebGraphicsContext3DInProcessCommandBufferImpl:: void WebGraphicsContext3DInProcessCommandBufferImpl::waitLatchCHROMIUM( WGC3Duint latch_id) { - // TODO(gmam): See if we can comment this in. - // ClearContext(); - gl_->WaitLatchCHROMIUM(latch_id); } void WebGraphicsContext3DInProcessCommandBufferImpl::setLatchCHROMIUM( WGC3Duint latch_id) { - // TODO(gmam): See if we can comment this in. - // ClearContext(); - gl_->SetLatchCHROMIUM(latch_id); - // required to ensure set command is sent to GPU process gl_->Flush(); } diff --git a/webkit/gpu/webgraphicscontext3d_in_process_impl.cc b/webkit/gpu/webgraphicscontext3d_in_process_impl.cc index cc6f69a..0b5f75d 100644 --- a/webkit/gpu/webgraphicscontext3d_in_process_impl.cc +++ b/webkit/gpu/webgraphicscontext3d_in_process_impl.cc @@ -707,6 +707,7 @@ void WebGraphicsContext3DInProcessImpl::waitLatchCHROMIUM( void WebGraphicsContext3DInProcessImpl::setLatchCHROMIUM( WGC3Duint latch_id) { + glFlush(); } WebString WebGraphicsContext3DInProcessImpl:: |