diff options
author | vmiura <vmiura@chromium.org> | 2015-02-06 08:42:51 -0800 |
---|---|---|
committer | Commit bot <commit-bot@chromium.org> | 2015-02-06 16:43:14 +0000 |
commit | b700b43d82aee159ade4db622dea6bdd920a7d7b (patch) | |
tree | c0e213b8e4a55b95fe39d8160d16609764c8b0dc /content/common/gpu | |
parent | b493f2c03eef1e147ec597c6ea9672012643a698 (diff) | |
download | chromium_src-b700b43d82aee159ade4db622dea6bdd920a7d7b.zip chromium_src-b700b43d82aee159ade4db622dea6bdd920a7d7b.tar.gz chromium_src-b700b43d82aee159ade4db622dea6bdd920a7d7b.tar.bz2 |
Add OrderingBarrierCHROMIUM API.
Implements a GPU Channel level command buffer barrier, which ensures
ordering between channels, without immediately notifying the GPU service.
Multiple Ordering Barriers can be combined into single IPCs to the GPU
service, thus this API can be used in place of ShallowFlushCHROMIUM, to
reduce IPC count and GPU service overhead.
BUG=454500
Review URL: https://codereview.chromium.org/896723008
Cr-Commit-Position: refs/heads/master@{#315048}
Diffstat (limited to 'content/common/gpu')
-rw-r--r-- | content/common/gpu/client/command_buffer_proxy_impl.cc | 40 | ||||
-rw-r--r-- | content/common/gpu/client/command_buffer_proxy_impl.h | 2 | ||||
-rw-r--r-- | content/common/gpu/client/gpu_channel_host.cc | 52 | ||||
-rw-r--r-- | content/common/gpu/client/gpu_channel_host.h | 24 |
4 files changed, 104 insertions, 14 deletions
diff --git a/content/common/gpu/client/command_buffer_proxy_impl.cc b/content/common/gpu/client/command_buffer_proxy_impl.cc index ec473f3..0f894be5 100644 --- a/content/common/gpu/client/command_buffer_proxy_impl.cc +++ b/content/common/gpu/client/command_buffer_proxy_impl.cc @@ -25,13 +25,13 @@ namespace content { -CommandBufferProxyImpl::CommandBufferProxyImpl( - GpuChannelHost* channel, - int route_id) +CommandBufferProxyImpl::CommandBufferProxyImpl(GpuChannelHost* channel, + int route_id) : channel_(channel), route_id_(route_id), flush_count_(0), last_put_offset_(-1), + last_barrier_put_offset_(-1), next_signal_id_(0) { } @@ -179,16 +179,36 @@ void CommandBufferProxyImpl::Flush(int32 put_offset) { "put_offset", put_offset); - if (last_put_offset_ == put_offset) + bool put_offset_changed = last_put_offset_ != put_offset; + last_put_offset_ = put_offset; + last_barrier_put_offset_ = put_offset; + + if (channel_) { + channel_->OrderingBarrier(route_id_, put_offset, ++flush_count_, + latency_info_, put_offset_changed, true); + } + + if (put_offset_changed) + latency_info_.clear(); +} + +void CommandBufferProxyImpl::OrderingBarrier(int32 put_offset) { + if (last_state_.error != gpu::error::kNoError) return; - last_put_offset_ = put_offset; + TRACE_EVENT1("gpu", "CommandBufferProxyImpl::OrderingBarrier", "put_offset", + put_offset); + + bool put_offset_changed = last_barrier_put_offset_ != put_offset; + last_barrier_put_offset_ = put_offset; + + if (channel_) { + channel_->OrderingBarrier(route_id_, put_offset, ++flush_count_, + latency_info_, put_offset_changed, false); + } - Send(new GpuCommandBufferMsg_AsyncFlush(route_id_, - put_offset, - ++flush_count_, - latency_info_)); - latency_info_.clear(); + if (put_offset_changed) + latency_info_.clear(); } void CommandBufferProxyImpl::SetLatencyInfo( diff --git a/content/common/gpu/client/command_buffer_proxy_impl.h b/content/common/gpu/client/command_buffer_proxy_impl.h index ea39740..28af2ef 100644 --- a/content/common/gpu/client/command_buffer_proxy_impl.h +++ b/content/common/gpu/client/command_buffer_proxy_impl.h @@ -89,6 +89,7 @@ class CommandBufferProxyImpl State GetLastState() override; int32 GetLastToken() override; void Flush(int32 put_offset) override; + void OrderingBarrier(int32 put_offset) override; void WaitForTokenInRange(int32 start, int32 end) override; void WaitForGetOffsetInRange(int32 start, int32 end) override; void SetGetBuffer(int32 shm_id) override; @@ -190,6 +191,7 @@ class CommandBufferProxyImpl int route_id_; unsigned int flush_count_; int32 last_put_offset_; + int32 last_barrier_put_offset_; base::Closure channel_error_callback_; diff --git a/content/common/gpu/client/gpu_channel_host.cc b/content/common/gpu/client/gpu_channel_host.cc index be28c50..72e8991 100644 --- a/content/common/gpu/client/gpu_channel_host.cc +++ b/content/common/gpu/client/gpu_channel_host.cc @@ -30,6 +30,16 @@ GpuListenerInfo::GpuListenerInfo() {} GpuListenerInfo::~GpuListenerInfo() {} +ProxyFlushInfo::ProxyFlushInfo() + : flush_pending(false), + route_id(MSG_ROUTING_NONE), + put_offset(0), + flush_count(0) { +} + +ProxyFlushInfo::~ProxyFlushInfo() { +} + // static scoped_refptr<GpuChannelHost> GpuChannelHost::Create( GpuChannelHostFactory* factory, @@ -112,6 +122,39 @@ bool GpuChannelHost::Send(IPC::Message* msg) { return false; } +void GpuChannelHost::OrderingBarrier( + int route_id, + int32 put_offset, + unsigned int flush_count, + const std::vector<ui::LatencyInfo>& latency_info, + bool put_offset_changed, + bool do_flush) { + AutoLock lock(context_lock_); + if (flush_info_.flush_pending && flush_info_.route_id != route_id) + InternalFlush(); + + if (put_offset_changed) { + flush_info_.flush_pending = true; + flush_info_.route_id = route_id; + flush_info_.put_offset = put_offset; + flush_info_.flush_count = flush_count; + flush_info_.latency_info.insert(flush_info_.latency_info.end(), + latency_info.begin(), latency_info.end()); + + if (do_flush) + InternalFlush(); + } +} + +void GpuChannelHost::InternalFlush() { + DCHECK(flush_info_.flush_pending); + Send(new GpuCommandBufferMsg_AsyncFlush( + flush_info_.route_id, flush_info_.put_offset, flush_info_.flush_count, + flush_info_.latency_info)); + flush_info_.latency_info.clear(); + flush_info_.flush_pending = false; +} + CommandBufferProxyImpl* GpuChannelHost::CreateViewCommandBuffer( int32 surface_id, CommandBufferProxyImpl* share_group, @@ -178,10 +221,8 @@ CommandBufferProxyImpl* GpuChannelHost::CreateOffscreenCommandBuffer( init_params.gpu_preference = gpu_preference; int32 route_id = GenerateRouteID(); bool succeeded = false; - if (!Send(new GpuChannelMsg_CreateOffscreenCommandBuffer(size, - init_params, - route_id, - &succeeded))) { + if (!Send(new GpuChannelMsg_CreateOffscreenCommandBuffer( + size, init_params, route_id, &succeeded))) { LOG(ERROR) << "Failed to send GpuChannelMsg_CreateOffscreenCommandBuffer."; return NULL; } @@ -229,6 +270,9 @@ void GpuChannelHost::DestroyCommandBuffer( AutoLock lock(context_lock_); proxies_.erase(route_id); + if (flush_info_.flush_pending && flush_info_.route_id == route_id) + flush_info_.flush_pending = false; + delete command_buffer; } diff --git a/content/common/gpu/client/gpu_channel_host.h b/content/common/gpu/client/gpu_channel_host.h index 877baa0..fbff47b 100644 --- a/content/common/gpu/client/gpu_channel_host.h +++ b/content/common/gpu/client/gpu_channel_host.h @@ -23,6 +23,7 @@ #include "ipc/ipc_channel_handle.h" #include "ipc/ipc_sync_channel.h" #include "ipc/message_filter.h" +#include "ui/events/latency_info.h" #include "ui/gfx/geometry/size.h" #include "ui/gfx/gpu_memory_buffer.h" #include "ui/gfx/native_widget_types.h" @@ -63,6 +64,17 @@ struct GpuListenerInfo { scoped_refptr<base::MessageLoopProxy> loop; }; +struct ProxyFlushInfo { + ProxyFlushInfo(); + ~ProxyFlushInfo(); + + bool flush_pending; + int route_id; + int32 put_offset; + unsigned int flush_count; + std::vector<ui::LatencyInfo> latency_info; +}; + class CONTENT_EXPORT GpuChannelHostFactory { public: virtual ~GpuChannelHostFactory() {} @@ -103,6 +115,15 @@ class GpuChannelHost : public IPC::Sender, // IPC::Sender implementation: bool Send(IPC::Message* msg) override; + // Set an ordering barrier. AsyncFlushes any pending barriers on other + // routes. Combines multiple OrderingBarriers into a single AsyncFlush. + void OrderingBarrier(int route_id, + int32 put_offset, + unsigned int flush_count, + const std::vector<ui::LatencyInfo>& latency_info, + bool put_offset_changed, + bool do_flush); + // Create and connect to a command buffer in the GPU process. CommandBufferProxyImpl* CreateViewCommandBuffer( int32 surface_id, @@ -173,6 +194,8 @@ class GpuChannelHost : public IPC::Sender, ~GpuChannelHost() override; void Connect(const IPC::ChannelHandle& channel_handle, base::WaitableEvent* shutdown_event); + bool InternalSend(IPC::Message* msg); + void InternalFlush(); // A filter used internally to route incoming messages from the IO thread // to the correct message loop. It also maintains some shared state between @@ -245,6 +268,7 @@ class GpuChannelHost : public IPC::Sender, // Used to look up a proxy from its routing id. typedef base::hash_map<int, CommandBufferProxyImpl*> ProxyMap; ProxyMap proxies_; + ProxyFlushInfo flush_info_; DISALLOW_COPY_AND_ASSIGN(GpuChannelHost); }; |