summaryrefslogtreecommitdiffstats
path: root/content
diff options
context:
space:
mode:
authorpiman@google.com <piman@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2011-04-28 23:37:14 +0000
committerpiman@google.com <piman@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2011-04-28 23:37:14 +0000
commitef16c174a500a841cf6a120dc4ef9fca89fac9f9 (patch)
tree8491a815c314e161a462f42c41c374403ce6ec8c /content
parentd0a7409f5ad0c075d5208ea0eb93ff07868c6168 (diff)
downloadchromium_src-ef16c174a500a841cf6a120dc4ef9fca89fac9f9.zip
chromium_src-ef16c174a500a841cf6a120dc4ef9fca89fac9f9.tar.gz
chromium_src-ef16c174a500a841cf6a120dc4ef9fca89fac9f9.tar.bz2
Rework FlushSync to return early if commands have been processed since the last update
BUG=80480 TEST= Review URL: http://codereview.chromium.org/6883179 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@83442 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'content')
-rw-r--r--content/common/gpu/gpu_command_buffer_stub.cc40
-rw-r--r--content/common/gpu/gpu_command_buffer_stub.h8
-rw-r--r--content/common/gpu/gpu_messages.h7
-rw-r--r--content/renderer/command_buffer_proxy.cc79
-rw-r--r--content/renderer/command_buffer_proxy.h14
5 files changed, 61 insertions, 87 deletions
diff --git a/content/common/gpu/gpu_command_buffer_stub.cc b/content/common/gpu/gpu_command_buffer_stub.cc
index de5e48a..d78f4bc 100644
--- a/content/common/gpu/gpu_command_buffer_stub.cc
+++ b/content/common/gpu/gpu_command_buffer_stub.cc
@@ -54,7 +54,8 @@ GpuCommandBufferStub::GpuCommandBufferStub(
route_id_(route_id),
renderer_id_(renderer_id),
render_view_id_(render_view_id),
- watchdog_(watchdog) {
+ watchdog_(watchdog),
+ task_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) {
}
GpuCommandBufferStub::~GpuCommandBufferStub() {
@@ -72,7 +73,6 @@ bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) {
IPC_BEGIN_MESSAGE_MAP(GpuCommandBufferStub, message)
IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Initialize, OnInitialize);
IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_GetState, OnGetState);
- IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_AsyncGetState, OnAsyncGetState);
IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Flush, OnFlush);
IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_AsyncFlush, OnAsyncFlush);
IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateTransferBuffer,
@@ -135,7 +135,7 @@ void GpuCommandBufferStub::OnInitialize(
parent_texture_id_)) {
command_buffer_->SetPutOffsetChangeCallback(
NewCallback(scheduler_.get(),
- &gpu::GpuScheduler::ProcessCommands));
+ &gpu::GpuScheduler::PutChanged));
scheduler_->SetSwapBuffersCallback(
NewCallback(this, &GpuCommandBufferStub::OnSwapBuffers));
scheduler_->SetLatchCallback(base::Bind(
@@ -176,17 +176,16 @@ void GpuCommandBufferStub::OnCommandProcessed() {
void GpuCommandBufferStub::OnGetState(gpu::CommandBuffer::State* state) {
*state = command_buffer_->GetState();
-}
-
-void GpuCommandBufferStub::OnAsyncGetState() {
- gpu::CommandBuffer::State state = command_buffer_->GetState();
- Send(new GpuCommandBufferMsg_UpdateState(route_id_, state));
+ if (state->error == gpu::error::kLostContext &&
+ gfx::GLContext::LosesAllContextsOnContextLost())
+ channel_->LoseAllContexts();
}
void GpuCommandBufferStub::OnFlush(int32 put_offset,
+ int32 last_known_get,
gpu::CommandBuffer::State* state) {
GPU_TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnFlush");
- *state = command_buffer_->FlushSync(put_offset);
+ *state = command_buffer_->FlushSync(put_offset, last_known_get);
if (state->error == gpu::error::kLostContext &&
gfx::GLContext::LosesAllContextsOnContextLost())
channel_->LoseAllContexts();
@@ -194,12 +193,11 @@ void GpuCommandBufferStub::OnFlush(int32 put_offset,
void GpuCommandBufferStub::OnAsyncFlush(int32 put_offset) {
GPU_TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnAsyncFlush");
- gpu::CommandBuffer::State state = command_buffer_->FlushSync(put_offset);
- if (state.error == gpu::error::kLostContext &&
- gfx::GLContext::LosesAllContextsOnContextLost())
- channel_->LoseAllContexts();
- else
- Send(new GpuCommandBufferMsg_UpdateState(route_id_, state));
+ command_buffer_->Flush(put_offset);
+ // TODO(piman): Do this everytime the scheduler finishes processing a batch of
+ // commands.
+ MessageLoop::current()->PostTask(FROM_HERE,
+ task_factory_.NewRunnableMethod(&GpuCommandBufferStub::ReportState));
}
void GpuCommandBufferStub::OnCreateTransferBuffer(int32 size,
@@ -346,4 +344,16 @@ void GpuCommandBufferStub::ViewResized() {
#endif
}
+void GpuCommandBufferStub::ReportState() {
+ gpu::CommandBuffer::State state = command_buffer_->GetState();
+ if (state.error == gpu::error::kLostContext &&
+ gfx::GLContext::LosesAllContextsOnContextLost()) {
+ channel_->LoseAllContexts();
+ } else {
+ IPC::Message* msg = new GpuCommandBufferMsg_UpdateState(route_id_, state);
+ msg->set_unblock(true);
+ Send(msg);
+ }
+}
+
#endif // defined(ENABLE_GPU)
diff --git a/content/common/gpu/gpu_command_buffer_stub.h b/content/common/gpu/gpu_command_buffer_stub.h
index 76df806..796ad8b 100644
--- a/content/common/gpu/gpu_command_buffer_stub.h
+++ b/content/common/gpu/gpu_command_buffer_stub.h
@@ -13,6 +13,7 @@
#include "base/memory/weak_ptr.h"
#include "base/process.h"
+#include "base/task.h"
#include "gpu/command_buffer/service/command_buffer_service.h"
#include "gpu/command_buffer/service/gpu_scheduler.h"
#include "ipc/ipc_channel.h"
@@ -81,8 +82,9 @@ class GpuCommandBufferStub
int32 size,
bool* result);
void OnGetState(gpu::CommandBuffer::State* state);
- void OnAsyncGetState();
- void OnFlush(int32 put_offset, gpu::CommandBuffer::State* state);
+ void OnFlush(int32 put_offset,
+ int32 last_known_get,
+ gpu::CommandBuffer::State* state);
void OnAsyncFlush(int32 put_offset);
void OnCreateTransferBuffer(int32 size, int32 id_request, int32* id);
void OnRegisterTransferBuffer(base::SharedMemoryHandle transfer_buffer,
@@ -104,6 +106,7 @@ class GpuCommandBufferStub
#endif // defined(OS_MACOSX)
void ResizeCallback(gfx::Size size);
+ void ReportState();
// The lifetime of objects of this class is managed by a GpuChannel. The
// GpuChannels destroy all the GpuCommandBufferStubs that they own when they
@@ -127,6 +130,7 @@ class GpuCommandBufferStub
scoped_ptr<gpu::CommandBufferService> command_buffer_;
scoped_ptr<gpu::GpuScheduler> scheduler_;
GpuWatchdog* watchdog_;
+ ScopedRunnableMethodFactory<GpuCommandBufferStub> task_factory_;
DISALLOW_COPY_AND_ASSIGN(GpuCommandBufferStub);
};
diff --git a/content/common/gpu/gpu_messages.h b/content/common/gpu/gpu_messages.h
index a5f6337..3dc7958 100644
--- a/content/common/gpu/gpu_messages.h
+++ b/content/common/gpu/gpu_messages.h
@@ -342,14 +342,11 @@ IPC_SYNC_MESSAGE_ROUTED2_1(GpuCommandBufferMsg_Initialize,
IPC_SYNC_MESSAGE_ROUTED0_1(GpuCommandBufferMsg_GetState,
gpu::CommandBuffer::State /* state */)
-// Get the current state of the command buffer asynchronously. State is
-// returned via UpdateState message.
-IPC_MESSAGE_ROUTED0(GpuCommandBufferMsg_AsyncGetState)
-
// Synchronize the put and get offsets of both processes. Caller passes its
// current put offset. Current state (including get offset) is returned.
-IPC_SYNC_MESSAGE_ROUTED1_1(GpuCommandBufferMsg_Flush,
+IPC_SYNC_MESSAGE_ROUTED2_1(GpuCommandBufferMsg_Flush,
int32 /* put_offset */,
+ int32 /* last_known_get */,
gpu::CommandBuffer::State /* state */)
// Asynchronously synchronize the put and get offsets of both processes.
diff --git a/content/renderer/command_buffer_proxy.cc b/content/renderer/command_buffer_proxy.cc
index 42af276..6a1c06c 100644
--- a/content/renderer/command_buffer_proxy.cc
+++ b/content/renderer/command_buffer_proxy.cc
@@ -140,23 +140,37 @@ Buffer CommandBufferProxy::GetRingBuffer() {
gpu::CommandBuffer::State CommandBufferProxy::GetState() {
// Send will flag state with lost context if IPC fails.
- if (last_state_.error == gpu::error::kNoError)
- Send(new GpuCommandBufferMsg_GetState(route_id_, &last_state_));
+ if (last_state_.error == gpu::error::kNoError) {
+ gpu::CommandBuffer::State state;
+ if (Send(new GpuCommandBufferMsg_GetState(route_id_, &state)))
+ OnUpdateState(state);
+ }
return last_state_;
}
void CommandBufferProxy::Flush(int32 put_offset) {
- AsyncFlush(put_offset, NULL);
+ if (last_state_.error != gpu::error::kNoError)
+ return;
+
+ Send(new GpuCommandBufferMsg_AsyncFlush(route_id_, put_offset));
}
-gpu::CommandBuffer::State CommandBufferProxy::FlushSync(int32 put_offset) {
+gpu::CommandBuffer::State CommandBufferProxy::FlushSync(int32 put_offset,
+ int32 last_known_get) {
GPU_TRACE_EVENT0("gpu", "CommandBufferProxy::FlushSync");
- // Send will flag state with lost context if IPC fails.
- if (last_state_.error == gpu::error::kNoError) {
- Send(new GpuCommandBufferMsg_Flush(route_id_,
- put_offset,
- &last_state_));
+ if (last_known_get == last_state_.get_offset) {
+ // Send will flag state with lost context if IPC fails.
+ if (last_state_.error == gpu::error::kNoError) {
+ gpu::CommandBuffer::State state;
+ if (Send(new GpuCommandBufferMsg_Flush(route_id_,
+ put_offset,
+ last_known_get,
+ &state)))
+ OnUpdateState(state);
+ }
+ } else {
+ Flush(put_offset);
}
return last_state_;
@@ -345,37 +359,6 @@ void CommandBufferProxy::SetWindowSize(const gfx::Size& size) {
}
#endif
-void CommandBufferProxy::AsyncGetState(Task* completion_task) {
- if (last_state_.error != gpu::error::kNoError)
- return;
-
- IPC::Message* message = new GpuCommandBufferMsg_AsyncGetState(route_id_);
-
- // Do not let a synchronous flush hold up this message. If this handler is
- // deferred until after the synchronous flush completes, it will overwrite the
- // cached last_state_ with out-of-date data.
- message->set_unblock(true);
-
- if (Send(message))
- pending_async_flush_tasks_.push(linked_ptr<Task>(completion_task));
-}
-
-void CommandBufferProxy::AsyncFlush(int32 put_offset, Task* completion_task) {
- if (last_state_.error != gpu::error::kNoError)
- return;
-
- IPC::Message* message = new GpuCommandBufferMsg_AsyncFlush(route_id_,
- put_offset);
-
- // Do not let a synchronous flush hold up this message. If this handler is
- // deferred until after the synchronous flush completes, it will overwrite the
- // cached last_state_ with out-of-date data.
- message->set_unblock(true);
-
- if (Send(message))
- pending_async_flush_tasks_.push(linked_ptr<Task>(completion_task));
-}
-
bool CommandBufferProxy::Send(IPC::Message* msg) {
// Caller should not intentionally send a message if the context is lost.
DCHECK(last_state_.error == gpu::error::kNoError);
@@ -399,16 +382,8 @@ bool CommandBufferProxy::Send(IPC::Message* msg) {
}
void CommandBufferProxy::OnUpdateState(const gpu::CommandBuffer::State& state) {
- last_state_ = state;
-
- linked_ptr<Task> task = pending_async_flush_tasks_.front();
- pending_async_flush_tasks_.pop();
-
- if (task.get()) {
- // Although we need need to update last_state_ while potentially waiting
- // for a synchronous flush to complete, we do not need to invoke the
- // callback synchonously. Also, post it as a non nestable task so it is
- // always invoked by the outermost message loop.
- MessageLoop::current()->PostNonNestableTask(FROM_HERE, task.release());
- }
+ // Handle wraparound. It works as long as we don't have more than 2B state
+ // updates in flight across which reordering occurs.
+ if (state.generation - last_state_.generation < 0x80000000U)
+ last_state_ = state;
}
diff --git a/content/renderer/command_buffer_proxy.h b/content/renderer/command_buffer_proxy.h
index e63150c..9cbd033 100644
--- a/content/renderer/command_buffer_proxy.h
+++ b/content/renderer/command_buffer_proxy.h
@@ -49,7 +49,7 @@ class CommandBufferProxy : public gpu::CommandBuffer,
virtual gpu::Buffer GetRingBuffer();
virtual State GetState();
virtual void Flush(int32 put_offset);
- virtual State FlushSync(int32 put_offset);
+ virtual State FlushSync(int32 put_offset, int32 last_known_get);
virtual void SetGetOffset(int32 get_offset);
virtual int32 CreateTransferBuffer(size_t size, int32 id_request);
virtual int32 RegisterTransferBuffer(base::SharedMemory* shared_memory,
@@ -82,14 +82,6 @@ class CommandBufferProxy : public gpu::CommandBuffer,
return last_state_;
}
- // Get the state asynchronously. The task is posted when the state is
- // updated. Takes ownership of the task object.
- void AsyncGetState(Task* completion_task);
-
- // Flush the command buffer asynchronously. The task is posted when the flush
- // completes. Takes ownership of the task object.
- void AsyncFlush(int32 put_offset, Task* completion_task);
-
private:
// Send an IPC message over the GPU channel. This is private to fully
@@ -115,10 +107,6 @@ class CommandBufferProxy : public gpu::CommandBuffer,
IPC::Channel::Sender* channel_;
int route_id_;
- // Pending asynchronous flush callbacks.
- typedef std::queue<linked_ptr<Task> > AsyncFlushTaskQueue;
- AsyncFlushTaskQueue pending_async_flush_tasks_;
-
scoped_ptr<Task> notify_repaint_task_;
scoped_ptr<Callback0::Type> swap_buffers_callback_;