summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authordyen <dyen@chromium.org>2015-09-22 14:53:26 -0700
committerCommit bot <commit-bot@chromium.org>2015-09-22 22:09:09 +0000
commita6b0d39acbecdc8585ea240992f933f131ff0bde (patch)
treeb1b18965986e792892ee8121c115b54a6ffd7e35
parent2b4bacc82a4c4ac99a7386be8583d3fe0bea90b7 (diff)
downloadchromium_src-a6b0d39acbecdc8585ea240992f933f131ff0bde.zip
chromium_src-a6b0d39acbecdc8585ea240992f933f131ff0bde.tar.gz
chromium_src-a6b0d39acbecdc8585ea240992f933f131ff0bde.tar.bz2
Added global order numbers to in process command buffers.
The global order numbers have been generalized and managed by the sync point manager. The GpuChannel previously managed the global order numbers by itself, but these order numbers have to be ordered with respect to order numbers for in process command buffers as well. The global order numbers have been merged to a sync point state class (SyncPointClientState), and later wait/release functions will be implemented in SyncPointClient. R=piman@chromium.org, sievers@chromium.org BUG=514815 Review URL: https://codereview.chromium.org/1339203002 Cr-Commit-Position: refs/heads/master@{#350247}
-rw-r--r--content/common/gpu/gpu_channel.cc60
-rw-r--r--content/common/gpu/gpu_channel.h36
-rw-r--r--content/common/gpu/gpu_command_buffer_stub.cc18
-rw-r--r--content/common/gpu/gpu_command_buffer_stub.h2
-rw-r--r--gpu/command_buffer/service/in_process_command_buffer.cc25
-rw-r--r--gpu/command_buffer/service/in_process_command_buffer.h6
-rw-r--r--gpu/command_buffer/service/sync_point_manager.cc92
-rw-r--r--gpu/command_buffer/service/sync_point_manager.h112
8 files changed, 303 insertions, 48 deletions
diff --git a/content/common/gpu/gpu_channel.cc b/content/common/gpu/gpu_channel.cc
index 3f7361d..c470609 100644
--- a/content/common/gpu/gpu_channel.cc
+++ b/content/common/gpu/gpu_channel.cc
@@ -70,21 +70,22 @@ const int64 kStopPreemptThresholdMs = kVsyncIntervalMs;
} // anonymous namespace
-// Begin order numbers at 1 so 0 can mean no orders.
-uint32_t GpuChannelMessageQueue::global_order_counter_ = 1;
-
scoped_refptr<GpuChannelMessageQueue> GpuChannelMessageQueue::Create(
const base::WeakPtr<GpuChannel>& gpu_channel,
base::SingleThreadTaskRunner* task_runner) {
return new GpuChannelMessageQueue(gpu_channel, task_runner);
}
+scoped_refptr<gpu::SyncPointClientState>
+ GpuChannelMessageQueue::GetSyncPointClientState() {
+ return sync_point_client_state_;
+}
+
GpuChannelMessageQueue::GpuChannelMessageQueue(
const base::WeakPtr<GpuChannel>& gpu_channel,
base::SingleThreadTaskRunner* task_runner)
: enabled_(true),
- unprocessed_order_num_(0),
- processed_order_num_(0),
+ sync_point_client_state_(gpu::SyncPointClientState::Create()),
gpu_channel_(gpu_channel),
task_runner_(task_runner) {}
@@ -93,14 +94,20 @@ GpuChannelMessageQueue::~GpuChannelMessageQueue() {
}
uint32_t GpuChannelMessageQueue::GetUnprocessedOrderNum() const {
- base::AutoLock auto_lock(channel_messages_lock_);
- return unprocessed_order_num_;
+ return sync_point_client_state_->unprocessed_order_num();
}
-void GpuChannelMessageQueue::PushBackMessage(const IPC::Message& message) {
+uint32_t GpuChannelMessageQueue::GetProcessedOrderNum() const {
+ return sync_point_client_state_->processed_order_num();
+}
+
+void GpuChannelMessageQueue::PushBackMessage(
+ gpu::SyncPointManager* sync_point_manager, const IPC::Message& message) {
base::AutoLock auto_lock(channel_messages_lock_);
- if (enabled_)
- PushMessageHelper(make_scoped_ptr(new GpuChannelMessage(message)));
+ if (enabled_) {
+ PushMessageHelper(sync_point_manager,
+ make_scoped_ptr(new GpuChannelMessage(message)));
+ }
}
bool GpuChannelMessageQueue::GenerateSyncPointMessage(
@@ -118,7 +125,7 @@ bool GpuChannelMessageQueue::GenerateSyncPointMessage(
msg->retire_sync_point = retire_sync_point;
msg->sync_point = *sync_point;
- PushMessageHelper(msg.Pass());
+ PushMessageHelper(sync_point_manager, msg.Pass());
return true;
}
return false;
@@ -139,19 +146,27 @@ base::TimeTicks GpuChannelMessageQueue::GetNextMessageTimeTick() const {
GpuChannelMessage* GpuChannelMessageQueue::GetNextMessage() const {
base::AutoLock auto_lock(channel_messages_lock_);
if (!channel_messages_.empty()) {
- DCHECK_GT(channel_messages_.front()->order_number, processed_order_num_);
- DCHECK_LE(channel_messages_.front()->order_number, unprocessed_order_num_);
+ DCHECK_GT(channel_messages_.front()->order_number,
+ sync_point_client_state_->processed_order_num());
+ DCHECK_LE(channel_messages_.front()->order_number,
+ sync_point_client_state_->unprocessed_order_num());
+
return channel_messages_.front();
}
return nullptr;
}
+void GpuChannelMessageQueue::BeginMessageProcessing(
+ const GpuChannelMessage* msg) {
+ sync_point_client_state_->BeginProcessingOrderNumber(msg->order_number);
+}
+
bool GpuChannelMessageQueue::MessageProcessed() {
base::AutoLock auto_lock(channel_messages_lock_);
DCHECK(!channel_messages_.empty());
scoped_ptr<GpuChannelMessage> msg(channel_messages_.front());
channel_messages_.pop_front();
- processed_order_num_ = msg->order_number;
+ sync_point_client_state_->FinishProcessingOrderNumber(msg->order_number);
return !channel_messages_.empty();
}
@@ -186,15 +201,16 @@ void GpuChannelMessageQueue::ScheduleHandleMessage() {
}
void GpuChannelMessageQueue::PushMessageHelper(
+ gpu::SyncPointManager* sync_point_manager,
scoped_ptr<GpuChannelMessage> msg) {
channel_messages_lock_.AssertAcquired();
DCHECK(enabled_);
- msg->order_number = global_order_counter_++;
+ msg->order_number =
+ sync_point_client_state_->GenerateUnprocessedOrderNumber(
+ sync_point_manager);
msg->time_received = base::TimeTicks::Now();
- unprocessed_order_num_ = msg->order_number;
-
bool had_messages = !channel_messages_.empty();
channel_messages_.push_back(msg.release());
if (!had_messages)
@@ -339,7 +355,7 @@ bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message& message) {
base::Bind(&GpuChannel::HandleOutOfOrderMessage,
gpu_channel_, message));
} else {
- message_queue_->PushBackMessage(message);
+ message_queue_->PushBackMessage(sync_point_manager_, message);
}
handled = true;
}
@@ -611,7 +627,7 @@ base::ProcessId GpuChannel::GetClientPID() const {
}
uint32_t GpuChannel::GetProcessedOrderNum() const {
- return message_queue_->processed_order_num();
+ return message_queue_->GetProcessedOrderNum();
}
uint32_t GpuChannel::GetUnprocessedOrderNum() const {
@@ -806,6 +822,10 @@ bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) {
return handled;
}
+scoped_refptr<gpu::SyncPointClientState> GpuChannel::GetSyncPointClientState() {
+ return message_queue_->GetSyncPointClientState();
+}
+
void GpuChannel::HandleMessage() {
// If we have been preempted by another channel, just post a task to wake up.
if (preempted_flag_ && preempted_flag_->IsSet()) {
@@ -819,8 +839,8 @@ void GpuChannel::HandleMessage() {
if (!m)
return;
- current_order_num_ = m->order_number;
const IPC::Message& message = m->message;
+ message_queue_->BeginMessageProcessing(m);
int32_t routing_id = message.routing_id();
GpuCommandBufferStub* stub = stubs_.get(routing_id);
diff --git a/content/common/gpu/gpu_channel.h b/content/common/gpu/gpu_channel.h
index aaaa30b..62a495b 100644
--- a/content/common/gpu/gpu_channel.h
+++ b/content/common/gpu/gpu_channel.h
@@ -36,6 +36,7 @@ class WaitableEvent;
namespace gpu {
class PreemptionFlag;
+class SyncPointClientState;
class SyncPointManager;
union ValueState;
class ValueStateMap;
@@ -166,16 +167,15 @@ class CONTENT_EXPORT GpuChannel
// Visible for testing.
GpuChannelMessageFilter* filter() const { return filter_.get(); }
- // Returns the global order number of the IPC message that started processing
- // last.
- uint32_t current_order_num() const { return current_order_num_; }
-
// Returns the global order number for the last processed IPC message.
uint32_t GetProcessedOrderNum() const;
// Returns the global order number for the last unprocessed IPC message.
uint32_t GetUnprocessedOrderNum() const;
+ // Returns the shared sync point client state.
+ scoped_refptr<gpu::SyncPointClientState> GetSyncPointClientState();
+
void HandleMessage();
// Some messages such as WaitForGetOffsetInRange and WaitForTokenInRange are
@@ -279,8 +279,6 @@ class CONTENT_EXPORT GpuChannel
// Map of stream id to stream state.
base::hash_map<int32, StreamState> streams_;
- uint32_t current_order_num_;
-
bool allow_future_sync_points_;
bool allow_real_time_streams_;
@@ -412,11 +410,13 @@ class GpuChannelMessageQueue
const base::WeakPtr<GpuChannel>& gpu_channel,
base::SingleThreadTaskRunner* task_runner);
- // Returns the global order number for the last processed IPC message.
+ scoped_refptr<gpu::SyncPointClientState> GetSyncPointClientState();
+
+ // Returns the global order number for the last unprocessed IPC message.
uint32_t GetUnprocessedOrderNum() const;
// Returns the global order number for the last unprocessed IPC message.
- uint32_t processed_order_num() const { return processed_order_num_; }
+ uint32_t GetProcessedOrderNum() const;
bool HasQueuedMessages() const;
@@ -424,11 +424,14 @@ class GpuChannelMessageQueue
GpuChannelMessage* GetNextMessage() const;
+ void BeginMessageProcessing(const GpuChannelMessage* msg);
+
// Should be called after a message returned by GetNextMessage is processed.
// Returns true if there are more messages on the queue.
bool MessageProcessed();
- void PushBackMessage(const IPC::Message& message);
+ void PushBackMessage(gpu::SyncPointManager* sync_point_manager,
+ const IPC::Message& message);
bool GenerateSyncPointMessage(gpu::SyncPointManager* sync_point_manager,
const IPC::Message& message,
@@ -446,24 +449,19 @@ class GpuChannelMessageQueue
void ScheduleHandleMessage();
- void PushMessageHelper(scoped_ptr<GpuChannelMessage> msg);
-
- // This number is only ever incremented/read on the IO thread.
- static uint32_t global_order_counter_;
+ void PushMessageHelper(gpu::SyncPointManager* sync_point_manager,
+ scoped_ptr<GpuChannelMessage> msg);
bool enabled_;
- // Highest IPC order number seen, set when queued on the IO thread.
- uint32_t unprocessed_order_num_;
// Both deques own the messages.
std::deque<GpuChannelMessage*> channel_messages_;
- // This lock protects enabled_, unprocessed_order_num_, and channel_messages_.
+ // This lock protects enabled_ and channel_messages_.
mutable base::Lock channel_messages_lock_;
- // Last finished IPC order number. Not protected by a lock as it's only
- // accessed on the main thread.
- uint32_t processed_order_num_;
+ // Keeps track of sync point related state such as message order numbers.
+ scoped_refptr<gpu::SyncPointClientState> sync_point_client_state_;
base::WeakPtr<GpuChannel> gpu_channel_;
scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
diff --git a/content/common/gpu/gpu_command_buffer_stub.cc b/content/common/gpu/gpu_command_buffer_stub.cc
index 36016d8..3ba218a 100644
--- a/content/common/gpu/gpu_command_buffer_stub.cc
+++ b/content/common/gpu/gpu_command_buffer_stub.cc
@@ -485,6 +485,8 @@ void GpuCommandBufferStub::Destroy() {
// destroy it before those.
scheduler_.reset();
+ sync_point_client_.reset();
+
bool have_context = false;
if (decoder_ && decoder_->GetGLContext()) {
// Try to make the context current regardless of whether it was lost, so we
@@ -528,10 +530,22 @@ void GpuCommandBufferStub::OnInitialize(
bool result = command_buffer_->Initialize();
DCHECK(result);
+ GpuChannelManager* manager = channel_->gpu_channel_manager();
+ DCHECK(manager);
+
+ gpu::SyncPointManager* sync_point_manager = manager->sync_point_manager();
+ DCHECK(sync_point_manager);
+
decoder_.reset(::gpu::gles2::GLES2Decoder::Create(context_group_.get()));
scheduler_.reset(new gpu::GpuScheduler(command_buffer_.get(),
decoder_.get(),
decoder_.get()));
+ sync_point_client_ =
+ sync_point_manager->CreateSyncPointClient(
+ channel_->GetSyncPointClientState(),
+ gpu::CommandBufferNamespace::GPU_IO,
+ command_buffer_id_);
+
if (preemption_flag_.get())
scheduler_->SetPreemptByFlag(preemption_flag_);
@@ -551,7 +565,6 @@ void GpuCommandBufferStub::OnInitialize(
this,
handle_);
} else {
- GpuChannelManager* manager = channel_->gpu_channel_manager();
surface_ = manager->GetDefaultOffscreenSurface();
}
@@ -682,8 +695,7 @@ void GpuCommandBufferStub::OnInitialize(
Send(reply_message);
if (handle_.is_null() && !active_url_.is_empty()) {
- GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
- gpu_channel_manager->Send(new GpuHostMsg_DidCreateOffscreenContext(
+ manager->Send(new GpuHostMsg_DidCreateOffscreenContext(
active_url_));
}
diff --git a/content/common/gpu/gpu_command_buffer_stub.h b/content/common/gpu/gpu_command_buffer_stub.h
index 9e1e15c..b1aac89 100644
--- a/content/common/gpu/gpu_command_buffer_stub.h
+++ b/content/common/gpu/gpu_command_buffer_stub.h
@@ -34,6 +34,7 @@
namespace gpu {
struct Mailbox;
+class SyncPointClient;
class ValueStateMap;
namespace gles2 {
class MailboxManager;
@@ -271,6 +272,7 @@ class GpuCommandBufferStub
scoped_ptr<gpu::CommandBufferService> command_buffer_;
scoped_ptr<gpu::gles2::GLES2Decoder> decoder_;
scoped_ptr<gpu::GpuScheduler> scheduler_;
+ scoped_ptr<gpu::SyncPointClient> sync_point_client_;
scoped_refptr<gfx::GLSurface> surface_;
scoped_ptr<GpuMemoryManagerClientState> memory_manager_client_state_;
diff --git a/gpu/command_buffer/service/in_process_command_buffer.cc b/gpu/command_buffer/service/in_process_command_buffer.cc
index 3b3584e..9711aad 100644
--- a/gpu/command_buffer/service/in_process_command_buffer.cc
+++ b/gpu/command_buffer/service/in_process_command_buffer.cc
@@ -344,6 +344,11 @@ bool InProcessCommandBuffer::InitializeOnGpuThread(
return false;
}
+ sync_point_client_state_ = SyncPointClientState::Create();
+ sync_point_client_ = service_->sync_point_manager()->CreateSyncPointClient(
+ sync_point_client_state_,
+ GetNamespaceID(), GetCommandBufferID());
+
if (service_->UseVirtualizedGLContexts() ||
decoder_->GetContextGroup()
->feature_info()
@@ -438,6 +443,8 @@ bool InProcessCommandBuffer::DestroyOnGpuThread() {
}
context_ = NULL;
surface_ = NULL;
+ sync_point_client_ = NULL;
+ sync_point_client_state_ = NULL;
gl_share_group_ = NULL;
#if defined(OS_ANDROID)
stream_texture_manager_.reset();
@@ -480,10 +487,13 @@ int32 InProcessCommandBuffer::GetLastToken() {
return last_state_.token;
}
-void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset) {
+void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset,
+ uint32_t order_num) {
CheckSequencedThread();
ScopedEvent handle_flush(&flush_event_);
base::AutoLock lock(command_buffer_lock_);
+
+ sync_point_client_state_->BeginProcessingOrderNumber(order_num);
command_buffer_->Flush(put_offset);
{
// Update state before signaling the flush event.
@@ -493,6 +503,13 @@ void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset) {
DCHECK((!error::IsError(state_after_last_flush_.error) && !context_lost_) ||
(error::IsError(state_after_last_flush_.error) && context_lost_));
+ // Currently the in process command buffer does not support being descheduled,
+ // if it does we would need to back off on calling the finish processing
+ // order number function until the message is rescheduled and finished
+ // processing. This DCHECK is to enforce this.
+ DCHECK(context_lost_ || put_offset == state_after_last_flush_.get_offset);
+ sync_point_client_state_->FinishProcessingOrderNumber(order_num);
+
// If we've processed all pending commands but still have pending queries,
// pump idle work until the query is passed.
if (put_offset == state_after_last_flush_.get_offset &&
@@ -533,10 +550,14 @@ void InProcessCommandBuffer::Flush(int32 put_offset) {
if (last_put_offset_ == put_offset)
return;
+ SyncPointManager* sync_manager = service_->sync_point_manager();
+ const uint32_t order_num =
+ sync_point_client_state_->GenerateUnprocessedOrderNumber(sync_manager);
last_put_offset_ = put_offset;
base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread,
gpu_thread_weak_ptr_,
- put_offset);
+ put_offset,
+ order_num);
QueueTask(task);
}
diff --git a/gpu/command_buffer/service/in_process_command_buffer.h b/gpu/command_buffer/service/in_process_command_buffer.h
index 4859760..f4f91f8 100644
--- a/gpu/command_buffer/service/in_process_command_buffer.h
+++ b/gpu/command_buffer/service/in_process_command_buffer.h
@@ -47,6 +47,8 @@ class StreamTextureManagerInProcess;
#endif
namespace gpu {
+class SyncPointClient;
+class SyncPointClientState;
class SyncPointManager;
class ValueStateMap;
@@ -201,7 +203,7 @@ class GPU_EXPORT InProcessCommandBuffer : public CommandBuffer,
bool InitializeOnGpuThread(const InitializeOnGpuThreadParams& params);
bool DestroyOnGpuThread();
- void FlushOnGpuThread(int32 put_offset);
+ void FlushOnGpuThread(int32 put_offset, uint32_t order_num);
void ScheduleDelayedWorkOnGpuThread();
uint32 CreateStreamTextureOnGpuThread(uint32 client_texture_id);
bool MakeCurrent();
@@ -240,6 +242,8 @@ class GPU_EXPORT InProcessCommandBuffer : public CommandBuffer,
scoped_ptr<gles2::GLES2Decoder> decoder_;
scoped_refptr<gfx::GLContext> context_;
scoped_refptr<gfx::GLSurface> surface_;
+ scoped_refptr<SyncPointClientState> sync_point_client_state_;
+ scoped_ptr<SyncPointClient> sync_point_client_;
base::Closure context_lost_callback_;
bool delayed_work_pending_; // Used to throttle PerformDelayedWork.
ImageFactory* image_factory_;
diff --git a/gpu/command_buffer/service/sync_point_manager.cc b/gpu/command_buffer/service/sync_point_manager.cc
index 562fa8c..039e494 100644
--- a/gpu/command_buffer/service/sync_point_manager.cc
+++ b/gpu/command_buffer/service/sync_point_manager.cc
@@ -14,15 +14,87 @@ namespace gpu {
static const int kMaxSyncBase = INT_MAX;
+scoped_refptr<SyncPointClientState> SyncPointClientState::Create() {
+ return new SyncPointClientState;
+}
+
+uint32_t SyncPointClientState::GenerateUnprocessedOrderNumber(
+ SyncPointManager* sync_point_manager) {
+ const uint32_t order_num = sync_point_manager->GenerateOrderNumber();
+ base::subtle::Release_Store(&unprocessed_order_num_, order_num);
+ return order_num;
+}
+
+SyncPointClientState::SyncPointClientState()
+ : processed_order_num_(0),
+ unprocessed_order_num_(0),
+ current_order_num_(0) {
+}
+
+SyncPointClientState::~SyncPointClientState() {
+}
+
+SyncPointClient::~SyncPointClient() {
+ sync_point_manager_->DestroySyncPointClient(namespace_id_, client_id_);
+}
+
+SyncPointClient::SyncPointClient(SyncPointManager* sync_point_manager,
+ scoped_refptr<SyncPointClientState> state,
+ CommandBufferNamespace namespace_id,
+ uint64_t client_id)
+ : sync_point_manager_(sync_point_manager),
+ client_state_(state),
+ namespace_id_(namespace_id),
+ client_id_(client_id) {
+}
+
SyncPointManager::SyncPointManager(bool allow_threaded_wait)
: allow_threaded_wait_(allow_threaded_wait),
// To reduce the risk that a sync point created in a previous GPU process
// will be in flight in the next GPU process, randomize the starting sync
// point number. http://crbug.com/373452
next_sync_point_(base::RandInt(1, kMaxSyncBase)),
- retire_cond_var_(&lock_) {}
+ retire_cond_var_(&lock_) {
+ global_order_num_.GetNext();
+}
-SyncPointManager::~SyncPointManager() {}
+SyncPointManager::~SyncPointManager() {
+ for (const ClientMap& client_map : client_maps_) {
+ DCHECK(client_map.empty());
+ }
+}
+
+scoped_ptr<SyncPointClient> SyncPointManager::CreateSyncPointClient(
+ scoped_refptr<SyncPointClientState> client_state,
+ CommandBufferNamespace namespace_id, uint64_t client_id) {
+ DCHECK_GE(namespace_id, 0);
+ DCHECK_LT(static_cast<size_t>(namespace_id), arraysize(client_maps_));
+ base::AutoLock auto_lock(client_maps_lock_);
+
+ ClientMap& client_map = client_maps_[namespace_id];
+ std::pair<ClientMap::iterator, bool> result = client_map.insert(
+ std::make_pair(client_id, new SyncPointClient(this,
+ client_state,
+ namespace_id,
+ client_id)));
+ DCHECK(result.second);
+
+ return make_scoped_ptr(result.first->second);
+}
+
+scoped_refptr<SyncPointClientState> SyncPointManager::GetSyncPointClientState(
+ CommandBufferNamespace namespace_id, uint64_t client_id) {
+ DCHECK_GE(namespace_id, 0);
+ DCHECK_LT(static_cast<size_t>(namespace_id), arraysize(client_maps_));
+ base::AutoLock auto_lock(client_maps_lock_);
+
+ ClientMap& client_map = client_maps_[namespace_id];
+ ClientMap::iterator it = client_map.find(client_id);
+ if (it != client_map.end()) {
+ return it->second->client_state();
+ }
+ return nullptr;
+}
uint32 SyncPointManager::GenerateSyncPoint() {
base::AutoLock lock(lock_);
@@ -95,4 +167,20 @@ bool SyncPointManager::IsSyncPointRetiredLocked(uint32 sync_point) {
return sync_point_map_.find(sync_point) == sync_point_map_.end();
}
+uint32_t SyncPointManager::GenerateOrderNumber() {
+ return global_order_num_.GetNext();
+}
+
+void SyncPointManager::DestroySyncPointClient(
+ CommandBufferNamespace namespace_id, uint64_t client_id) {
+ DCHECK_GE(namespace_id, 0);
+ DCHECK_LT(static_cast<size_t>(namespace_id), arraysize(client_maps_));
+
+ base::AutoLock auto_lock(client_maps_lock_);
+ ClientMap& client_map = client_maps_[namespace_id];
+ ClientMap::iterator it = client_map.find(client_id);
+ DCHECK(it != client_map.end());
+ client_map.erase(it);
+}
+
} // namespace gpu
diff --git a/gpu/command_buffer/service/sync_point_manager.h b/gpu/command_buffer/service/sync_point_manager.h
index 98e849b..3f11e05 100644
--- a/gpu/command_buffer/service/sync_point_manager.h
+++ b/gpu/command_buffer/service/sync_point_manager.h
@@ -7,15 +7,103 @@
#include <vector>
+#include "base/atomic_sequence_num.h"
#include "base/callback.h"
#include "base/containers/hash_tables.h"
+#include "base/logging.h"
+#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
#include "base/synchronization/condition_variable.h"
#include "base/synchronization/lock.h"
+#include "base/threading/thread_checker.h"
+#include "gpu/command_buffer/common/constants.h"
#include "gpu/gpu_export.h"
namespace gpu {
+class SyncPointClient;
+class SyncPointManager;
+
+class GPU_EXPORT SyncPointClientState
+ : public base::RefCountedThreadSafe<SyncPointClientState> {
+ public:
+ static scoped_refptr<SyncPointClientState> Create();
+ uint32_t GenerateUnprocessedOrderNumber(SyncPointManager* sync_point_manager);
+
+ void BeginProcessingOrderNumber(uint32_t order_num) {
+ DCHECK(processing_thread_checker_.CalledOnValidThread());
+ DCHECK_GE(order_num, current_order_num_);
+ current_order_num_ = order_num;
+ }
+
+ void FinishProcessingOrderNumber(uint32_t order_num) {
+ DCHECK(processing_thread_checker_.CalledOnValidThread());
+ DCHECK_EQ(current_order_num_, order_num);
+ DCHECK_GT(order_num, processed_order_num());
+ base::subtle::Release_Store(&processed_order_num_, order_num);
+ }
+
+ uint32_t processed_order_num() const {
+ return base::subtle::Acquire_Load(&processed_order_num_);
+ }
+
+ uint32_t unprocessed_order_num() const {
+ return base::subtle::Acquire_Load(&unprocessed_order_num_);
+ }
+
+ uint32_t current_order_num() const {
+ DCHECK(processing_thread_checker_.CalledOnValidThread());
+ return current_order_num_;
+ }
+
+ protected:
+ friend class base::RefCountedThreadSafe<SyncPointClientState>;
+ friend class SyncPointClient;
+
+ SyncPointClientState();
+ virtual ~SyncPointClientState();
+
+ // Last finished IPC order number.
+ base::subtle::Atomic32 processed_order_num_;
+
+ // Unprocessed order number expected to be processed under normal execution.
+ base::subtle::Atomic32 unprocessed_order_num_;
+
+ // Non thread-safe functions need to be called from a single thread.
+ base::ThreadChecker processing_thread_checker_;
+
+ // Current IPC order number being processed (only used on processing thread).
+ uint32_t current_order_num_;
+
+ DISALLOW_COPY_AND_ASSIGN(SyncPointClientState);
+};
+
+class GPU_EXPORT SyncPointClient {
+ public:
+ ~SyncPointClient();
+
+ scoped_refptr<SyncPointClientState> client_state() { return client_state_; }
+
+ private:
+ friend class SyncPointManager;
+
+ SyncPointClient(SyncPointManager* sync_point_manager,
+ scoped_refptr<SyncPointClientState> state,
+ CommandBufferNamespace namespace_id, uint64_t client_id);
+
+ // Sync point manager is guaranteed to exist in the lifetime of the client.
+ SyncPointManager* sync_point_manager_;
+
+ // Keep the state that is sharable across multiple threads.
+ scoped_refptr<SyncPointClientState> client_state_;
+
+ // Unique namespace/client id pair for this sync point client.
+ CommandBufferNamespace namespace_id_;
+ uint64_t client_id_;
+
+ DISALLOW_COPY_AND_ASSIGN(SyncPointClient);
+};
+
// This class manages the sync points, which allow cross-channel
// synchronization.
class GPU_EXPORT SyncPointManager {
@@ -23,6 +111,15 @@ class GPU_EXPORT SyncPointManager {
explicit SyncPointManager(bool allow_threaded_wait);
~SyncPointManager();
+ // Creates/Destroy a sync point client which message processors should hold.
+ scoped_ptr<SyncPointClient> CreateSyncPointClient(
+ scoped_refptr<SyncPointClientState> client_state,
+ CommandBufferNamespace namespace_id, uint64_t client_id);
+
+ // Finds the state of an already created sync point client.
+ scoped_refptr<SyncPointClientState> GetSyncPointClientState(
+ CommandBufferNamespace namespace_id, uint64_t client_id);
+
// Generates a sync point, returning its ID. This can me called on any thread.
// IDs start at a random number. Never return 0.
uint32 GenerateSyncPoint();
@@ -44,14 +141,27 @@ class GPU_EXPORT SyncPointManager {
void WaitSyncPoint(uint32 sync_point);
private:
+ friend class SyncPointClient;
+ friend class SyncPointClientState;
+
typedef std::vector<base::Closure> ClosureList;
typedef base::hash_map<uint32, ClosureList> SyncPointMap;
-
+ typedef base::hash_map<uint64_t, SyncPointClient*> ClientMap;
bool IsSyncPointRetiredLocked(uint32 sync_point);
+ uint32_t GenerateOrderNumber();
+ void DestroySyncPointClient(CommandBufferNamespace namespace_id,
+ uint64_t client_id);
const bool allow_threaded_wait_;
+ // Order number is global for all clients.
+ base::AtomicSequenceNumber global_order_num_;
+
+ // Client map holds a map of clients id to client for each namespace.
+ base::Lock client_maps_lock_;
+ ClientMap client_maps_[NUM_COMMAND_BUFFER_NAMESPACES];
+
// Protects the 2 fields below. Note: callbacks shouldn't be called with this
// held.
base::Lock lock_;