summaryrefslogtreecommitdiffstats
path: root/content/common
diff options
context:
space:
mode:
Diffstat (limited to 'content/common')
-rw-r--r--content/common/gpu/client/command_buffer_proxy_impl.cc10
-rw-r--r--content/common/gpu/client/command_buffer_proxy_impl.h2
-rw-r--r--content/common/gpu/gpu_channel.cc28
-rw-r--r--content/common/gpu/gpu_channel.h2
-rw-r--r--content/common/gpu/gpu_command_buffer_stub.cc24
-rw-r--r--content/common/gpu/gpu_command_buffer_stub.h4
-rw-r--r--content/common/gpu/gpu_messages.h11
7 files changed, 29 insertions, 52 deletions
diff --git a/content/common/gpu/client/command_buffer_proxy_impl.cc b/content/common/gpu/client/command_buffer_proxy_impl.cc
index 7959fda..22e59a6 100644
--- a/content/common/gpu/client/command_buffer_proxy_impl.cc
+++ b/content/common/gpu/client/command_buffer_proxy_impl.cc
@@ -189,15 +189,15 @@ void CommandBufferProxyImpl::Flush(int32 put_offset) {
Send(new GpuCommandBufferMsg_AsyncFlush(route_id_,
put_offset,
- ++flush_count_));
+ ++flush_count_,
+ latency_info_));
+ latency_info_.clear();
}
void CommandBufferProxyImpl::SetLatencyInfo(
const std::vector<ui::LatencyInfo>& latency_info) {
- if (last_state_.error != gpu::error::kNoError ||
- latency_info.empty())
- return;
- Send(new GpuCommandBufferMsg_SetLatencyInfo(route_id_, latency_info));
+ for (size_t i = 0; i < latency_info.size(); i++)
+ latency_info_.push_back(latency_info[i]);
}
void CommandBufferProxyImpl::WaitForTokenInRange(int32 start, int32 end) {
diff --git a/content/common/gpu/client/command_buffer_proxy_impl.h b/content/common/gpu/client/command_buffer_proxy_impl.h
index 01efa8f..5adbbcf4 100644
--- a/content/common/gpu/client/command_buffer_proxy_impl.h
+++ b/content/common/gpu/client/command_buffer_proxy_impl.h
@@ -204,6 +204,8 @@ class CommandBufferProxyImpl
gpu::Capabilities capabilities_;
+ std::vector<ui::LatencyInfo> latency_info_;
+
DISALLOW_COPY_AND_ASSIGN(CommandBufferProxyImpl);
};
diff --git a/content/common/gpu/gpu_channel.cc b/content/common/gpu/gpu_channel.cc
index be1aa0a..8e9c226 100644
--- a/content/common/gpu/gpu_channel.cc
+++ b/content/common/gpu/gpu_channel.cc
@@ -667,21 +667,15 @@ size_t GpuChannel::MatchSwapBufferMessagesPattern(
DCHECK(current_message);
if (deferred_messages_.empty() || !current_message)
return 0;
- // Only care about SetLatencyInfo and AsyncFlush message.
- if (current_message->type() != GpuCommandBufferMsg_SetLatencyInfo::ID &&
- current_message->type() != GpuCommandBufferMsg_AsyncFlush::ID)
+ // Only care about AsyncFlush message.
+ if (current_message->type() != GpuCommandBufferMsg_AsyncFlush::ID)
return 0;
size_t index = 0;
int32 routing_id = current_message->routing_id();
- // In case of the current message is SetLatencyInfo, we try to look ahead one
- // more deferred messages.
- IPC::Message *first_message = NULL;
- IPC::Message *second_message = NULL;
-
// Fetch the first message and move index to point to the second message.
- first_message = deferred_messages_[index++];
+ IPC::Message* first_message = deferred_messages_[index++];
// If the current message is AsyncFlush, the expected message sequence for
// SwapBuffer should be AsyncFlush->Echo. We only try to match Echo message.
@@ -691,20 +685,6 @@ size_t GpuChannel::MatchSwapBufferMessagesPattern(
return 1;
}
- // If the current message is SetLatencyInfo, the expected message sequence
- // for SwapBuffer should be SetLatencyInfo->AsyncFlush->Echo (optional).
- if (current_message->type() == GpuCommandBufferMsg_SetLatencyInfo::ID &&
- first_message->type() == GpuCommandBufferMsg_AsyncFlush::ID &&
- first_message->routing_id() == routing_id) {
- if (deferred_messages_.size() >= 2)
- second_message = deferred_messages_[index];
- if (!second_message)
- return 1;
- if (second_message->type() == GpuCommandBufferMsg_Echo::ID &&
- second_message->routing_id() == routing_id) {
- return 2;
- }
- }
// No matched message is found.
return 0;
}
@@ -771,7 +751,7 @@ void GpuChannel::HandleMessage() {
// We process the pending messages immediately if these messages matches
// the pattern of SwapBuffers, for example, GLRenderer always issues
// SwapBuffers calls with a specific IPC message patterns, for example,
- // it should be SetLatencyInfo->AsyncFlush->Echo sequence.
+ // it should be AsyncFlush->Echo sequence.
//
// Instead of posting a task to message loop, it could avoid the possibility
// of being blocked by other channels, and make SwapBuffers executed as soon
diff --git a/content/common/gpu/gpu_channel.h b/content/common/gpu/gpu_channel.h
index 6b6dffb..e48eaa1 100644
--- a/content/common/gpu/gpu_channel.h
+++ b/content/common/gpu/gpu_channel.h
@@ -170,7 +170,7 @@ class GpuChannel : public IPC::Listener, public IPC::Sender {
// Try to match the messages pattern for GL SwapBuffers operation in the
// deferred message queue starting from the current processing message.
// Return the number of messages that matches the given pattern, e.g.
- // SetLatencyInfo -> AsyncFlush -> Echo sequence.
+ // AsyncFlush -> Echo sequence.
size_t MatchSwapBufferMessagesPattern(IPC::Message* current_message);
// The lifetime of objects of this class is managed by a GpuChannelManager.
diff --git a/content/common/gpu/gpu_command_buffer_stub.cc b/content/common/gpu/gpu_command_buffer_stub.cc
index fa22f94..4e2834f 100644
--- a/content/common/gpu/gpu_command_buffer_stub.cc
+++ b/content/common/gpu/gpu_command_buffer_stub.cc
@@ -229,8 +229,7 @@ bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) {
if (decoder_.get() && message.type() != GpuCommandBufferMsg_Echo::ID &&
message.type() != GpuCommandBufferMsg_WaitForTokenInRange::ID &&
message.type() != GpuCommandBufferMsg_WaitForGetOffsetInRange::ID &&
- message.type() != GpuCommandBufferMsg_RetireSyncPoint::ID &&
- message.type() != GpuCommandBufferMsg_SetLatencyInfo::ID) {
+ message.type() != GpuCommandBufferMsg_RetireSyncPoint::ID) {
if (!MakeCurrent())
return false;
have_context = true;
@@ -252,7 +251,6 @@ bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) {
IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForGetOffsetInRange,
OnWaitForGetOffsetInRange);
IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_AsyncFlush, OnAsyncFlush);
- IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetLatencyInfo, OnSetLatencyInfo);
IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Rescheduled, OnRescheduled);
IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RegisterTransferBuffer,
OnRegisterTransferBuffer);
@@ -630,15 +628,6 @@ void GpuCommandBufferStub::OnInitialize(
}
}
-void GpuCommandBufferStub::OnSetLatencyInfo(
- const std::vector<ui::LatencyInfo>& latency_info) {
- if (!ui::LatencyInfo::Verify(latency_info,
- "GpuCommandBufferStub::OnSetLatencyInfo"))
- return;
- if (!latency_info_callback_.is_null())
- latency_info_callback_.Run(latency_info);
-}
-
void GpuCommandBufferStub::OnCreateStreamTexture(
uint32 texture_id, int32 stream_id, bool* succeeded) {
#if defined(OS_ANDROID)
@@ -758,9 +747,18 @@ void GpuCommandBufferStub::CheckCompleteWaits() {
}
}
-void GpuCommandBufferStub::OnAsyncFlush(int32 put_offset, uint32 flush_count) {
+void GpuCommandBufferStub::OnAsyncFlush(
+ int32 put_offset,
+ uint32 flush_count,
+ const std::vector<ui::LatencyInfo>& latency_info) {
TRACE_EVENT1(
"gpu", "GpuCommandBufferStub::OnAsyncFlush", "put_offset", put_offset);
+
+ if (ui::LatencyInfo::Verify(latency_info,
+ "GpuCommandBufferStub::OnAsyncFlush") &&
+ !latency_info_callback_.is_null()) {
+ latency_info_callback_.Run(latency_info);
+ }
DCHECK(command_buffer_.get());
if (flush_count - last_flush_count_ < 0x8000000U) {
last_flush_count_ = flush_count;
diff --git a/content/common/gpu/gpu_command_buffer_stub.h b/content/common/gpu/gpu_command_buffer_stub.h
index 52cb5d7..f0efa65 100644
--- a/content/common/gpu/gpu_command_buffer_stub.h
+++ b/content/common/gpu/gpu_command_buffer_stub.h
@@ -164,7 +164,8 @@ class GpuCommandBufferStub
void OnWaitForGetOffsetInRange(int32 start,
int32 end,
IPC::Message* reply_message);
- void OnAsyncFlush(int32 put_offset, uint32 flush_count);
+ void OnAsyncFlush(int32 put_offset, uint32 flush_count,
+ const std::vector<ui::LatencyInfo>& latency_info);
void OnEcho(const IPC::Message& message);
void OnRescheduled();
void OnRegisterTransferBuffer(int32 id,
@@ -205,7 +206,6 @@ class GpuCommandBufferStub
void OnCommandProcessed();
void OnParseError();
- void OnSetLatencyInfo(const std::vector<ui::LatencyInfo>& latency_info);
void OnCreateStreamTexture(
uint32 texture_id, int32 stream_id, bool* succeeded);
diff --git a/content/common/gpu/gpu_messages.h b/content/common/gpu/gpu_messages.h
index 193544e..86d699b 100644
--- a/content/common/gpu/gpu_messages.h
+++ b/content/common/gpu/gpu_messages.h
@@ -524,14 +524,11 @@ IPC_SYNC_MESSAGE_ROUTED2_1(GpuCommandBufferMsg_WaitForGetOffsetInRange,
// Asynchronously synchronize the put and get offsets of both processes.
// Caller passes its current put offset. Current state (including get offset)
-// is returned in shared memory.
-IPC_MESSAGE_ROUTED2(GpuCommandBufferMsg_AsyncFlush,
+// is returned in shared memory. The input latency info for the current
+// frame is also sent to the GPU process.
+IPC_MESSAGE_ROUTED3(GpuCommandBufferMsg_AsyncFlush,
int32 /* put_offset */,
- uint32 /* flush_count */)
-
-// Sends information about the latency of the current frame to the GPU
-// process.
-IPC_MESSAGE_ROUTED1(GpuCommandBufferMsg_SetLatencyInfo,
+ uint32 /* flush_count */,
std::vector<ui::LatencyInfo> /* latency_info */)
// Asynchronously process any commands known to the GPU process. This is only