diff options
author | piman@chromium.org <piman@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2013-06-13 01:43:34 +0000 |
---|---|---|
committer | piman@chromium.org <piman@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2013-06-13 01:43:34 +0000 |
commit | 7951bfe3854423b11bee1d40bf83843b64f082ad (patch) | |
tree | 45ece678eb129d1b9dfc24f0bb205d73a6f8355f | |
parent | 4d90d439351de6a038b7e6e75f17e4ab31aa7056 (diff) | |
download | chromium_src-7951bfe3854423b11bee1d40bf83843b64f082ad.zip chromium_src-7951bfe3854423b11bee1d40bf83843b64f082ad.tar.gz chromium_src-7951bfe3854423b11bee1d40bf83843b64f082ad.tar.bz2 |
Restore thread safety to GpuChannelHost.
RefCountedThreadSafe + SupportsWeakPtr is generally a bad combo.
This changes how thread safety is handled in GpuChannelHost:
- Shared data/state set by the IO thread is moved onto the MessageFilter,
protected by a lock.
- MessageFilter doesn't need to post tasks to the GpuChannelHost on the main
thread.
- Most of the GpuChannelHost fields are constant, the remaining ones are atomic
or protected by a lock.
It also includes various cleanup:
- Reduced the scope of some locks. In particular we mostly avoid taking locks
while sending messages.
- Removed GpuChannelHostFactory::IsIOThread which isn't used.
- Simplifies channel state. We always are "connected" until we are "lost".
A behavior change is that the "lost" state is set directly on the IO thread, as
soon as we receive the channel error. This makes recreation logic less dependent
on precise task order.
BUG=242826
R=apatrick@chromium.org
Review URL: https://codereview.chromium.org/16228004
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@205992 0039d316-1c4b-4281-b951-d872f2087c98
9 files changed, 199 insertions, 210 deletions
diff --git a/content/browser/gpu/browser_gpu_channel_host_factory.cc b/content/browser/gpu/browser_gpu_channel_host_factory.cc index cda1c34..05159d2 100644 --- a/content/browser/gpu/browser_gpu_channel_host_factory.cc +++ b/content/browser/gpu/browser_gpu_channel_host_factory.cc @@ -62,10 +62,6 @@ bool BrowserGpuChannelHostFactory::IsMainThread() { return BrowserThread::CurrentlyOn(BrowserThread::UI); } -bool BrowserGpuChannelHostFactory::IsIOThread() { - return BrowserThread::CurrentlyOn(BrowserThread::IO); -} - base::MessageLoop* BrowserGpuChannelHostFactory::GetMainLoop() { return BrowserThread::UnsafeGetMessageLoopForThread(BrowserThread::UI); } @@ -253,7 +249,7 @@ GpuChannelHost* BrowserGpuChannelHostFactory::EstablishGpuChannelSync( CauseForGpuLaunch cause_for_gpu_launch) { if (gpu_channel_.get()) { // Recreate the channel if it has been lost. - if (gpu_channel_->state() == GpuChannelHost::kLost) + if (gpu_channel_->IsLost()) gpu_channel_ = NULL; else return gpu_channel_.get(); @@ -281,13 +277,10 @@ GpuChannelHost* BrowserGpuChannelHostFactory::EstablishGpuChannelSync( if (request.channel_handle.name.empty()) return NULL; - gpu_channel_ = new GpuChannelHost(this, request.gpu_host_id, gpu_client_id_); - gpu_channel_->set_gpu_info(request.gpu_info); GetContentClient()->SetGpuInfo(request.gpu_info); - - // Connect to the GPU process if a channel name was received. - gpu_channel_->Connect(request.channel_handle); - + gpu_channel_ = GpuChannelHost::Create( + this, request.gpu_host_id, gpu_client_id_, + request.gpu_info, request.channel_handle); return gpu_channel_.get(); } diff --git a/content/browser/gpu/browser_gpu_channel_host_factory.h b/content/browser/gpu/browser_gpu_channel_host_factory.h index 5a28fad..abd7483 100644 --- a/content/browser/gpu/browser_gpu_channel_host_factory.h +++ b/content/browser/gpu/browser_gpu_channel_host_factory.h @@ -22,7 +22,6 @@ class BrowserGpuChannelHostFactory : public GpuChannelHostFactory { // GpuChannelHostFactory implementation. virtual bool IsMainThread() OVERRIDE; - virtual bool IsIOThread() OVERRIDE; virtual base::MessageLoop* GetMainLoop() OVERRIDE; virtual scoped_refptr<base::MessageLoopProxy> GetIOLoopProxy() OVERRIDE; virtual base::WaitableEvent* GetShutDownEvent() OVERRIDE; diff --git a/content/common/gpu/client/gpu_channel_host.cc b/content/common/gpu/client/gpu_channel_host.cc index 1d83372..fc6e89d 100644 --- a/content/common/gpu/client/gpu_channel_host.cc +++ b/content/common/gpu/client/gpu_channel_host.cc @@ -4,6 +4,8 @@ #include "content/common/gpu/client/gpu_channel_host.h" +#include <algorithm> + #include "base/bind.h" #include "base/debug/trace_event.h" #include "base/message_loop.h" @@ -29,18 +31,32 @@ GpuListenerInfo::GpuListenerInfo() {} GpuListenerInfo::~GpuListenerInfo() {} -GpuChannelHost::GpuChannelHost( - GpuChannelHostFactory* factory, int gpu_host_id, int client_id) +// static +scoped_refptr<GpuChannelHost> GpuChannelHost::Create( + GpuChannelHostFactory* factory, + int gpu_host_id, + int client_id, + const gpu::GPUInfo& gpu_info, + const IPC::ChannelHandle& channel_handle) { + DCHECK(factory->IsMainThread()); + scoped_refptr<GpuChannelHost> host = new GpuChannelHost( + factory, gpu_host_id, client_id, gpu_info); + host->Connect(channel_handle); + return host; +} + +GpuChannelHost::GpuChannelHost(GpuChannelHostFactory* factory, + int gpu_host_id, + int client_id, + const gpu::GPUInfo& gpu_info) : factory_(factory), client_id_(client_id), gpu_host_id_(gpu_host_id), - state_(kUnconnected) { + gpu_info_(gpu_info) { next_transfer_buffer_id_.GetNext(); } -void GpuChannelHost::Connect( - const IPC::ChannelHandle& channel_handle) { - DCHECK(factory_->IsMainThread()); +void GpuChannelHost::Connect(const IPC::ChannelHandle& channel_handle) { // Open a channel to the GPU process. We pass NULL as the main listener here // since we need to filter everything to route it to the right thread. scoped_refptr<base::MessageLoopProxy> io_loop = factory_->GetIOLoopProxy(); @@ -56,51 +72,17 @@ void GpuChannelHost::Connect( channel_->AddFilter(sync_filter_.get()); - channel_filter_ = new MessageFilter(AsWeakPtr(), factory_); + channel_filter_ = new MessageFilter(); // Install the filter last, because we intercept all leftover // messages. channel_->AddFilter(channel_filter_.get()); - - // It is safe to send IPC messages before the channel completes the connection - // and receives the hello message from the GPU process. The messages get - // cached. - state_ = kConnected; -} - -void GpuChannelHost::set_gpu_info(const gpu::GPUInfo& gpu_info) { - gpu_info_ = gpu_info; -} - -void GpuChannelHost::SetStateLost() { - state_ = kLost; -} - -const gpu::GPUInfo& GpuChannelHost::gpu_info() const { - return gpu_info_; } -void GpuChannelHost::OnMessageReceived(const IPC::Message& message) { - bool handled = true; - - IPC_BEGIN_MESSAGE_MAP(GpuChannelHost, message) - IPC_MESSAGE_HANDLER(GpuChannelMsg_GenerateMailboxNamesReply, - OnGenerateMailboxNamesReply) - IPC_MESSAGE_UNHANDLED(handled = false) - IPC_END_MESSAGE_MAP() - - DCHECK(handled); -} - -void GpuChannelHost::OnChannelError() { - state_ = kLost; - - // Channel is invalid and will be reinitialized if this host is requested - // again. - channel_.reset(); -} - -bool GpuChannelHost::Send(IPC::Message* message) { +bool GpuChannelHost::Send(IPC::Message* msg) { + // Callee takes ownership of message, regardless of whether Send is + // successful. See IPC::Sender. + scoped_ptr<IPC::Message> message(msg); // The GPU process never sends synchronous IPCs so clear the unblock flag to // preserve order. message->set_unblock(false); @@ -114,18 +96,13 @@ bool GpuChannelHost::Send(IPC::Message* message) { // TODO: Can we just always use sync_filter_ since we setup the channel // without a main listener? if (factory_->IsMainThread()) { - if (channel_) { - // http://crbug.com/125264 - base::ThreadRestrictions::ScopedAllowWait allow_wait; - return channel_->Send(message); - } + // http://crbug.com/125264 + base::ThreadRestrictions::ScopedAllowWait allow_wait; + return channel_->Send(message.release()); } else if (base::MessageLoop::current()) { - return sync_filter_->Send(message); + return sync_filter_->Send(message.release()); } - // Callee takes ownership of message, regardless of whether Send is - // successful. See IPC::Sender. - delete message; return false; } @@ -141,11 +118,6 @@ CommandBufferProxyImpl* GpuChannelHost::CreateViewCommandBuffer( "surface_id", surface_id); - AutoLock lock(context_lock_); - // An error occurred. Need to get the host again to reinitialize it. - if (!channel_) - return NULL; - GPUCreateCommandBufferConfig init_params; init_params.share_group_id = share_group ? share_group->GetRouteID() : MSG_ROUTING_NONE; @@ -160,6 +132,8 @@ CommandBufferProxyImpl* GpuChannelHost::CreateViewCommandBuffer( CommandBufferProxyImpl* command_buffer = new CommandBufferProxyImpl(this, route_id); AddRoute(route_id, command_buffer->AsWeakPtr()); + + AutoLock lock(context_lock_); proxies_[route_id] = command_buffer; return command_buffer; } @@ -173,11 +147,6 @@ CommandBufferProxyImpl* GpuChannelHost::CreateOffscreenCommandBuffer( gfx::GpuPreference gpu_preference) { TRACE_EVENT0("gpu", "GpuChannelHost::CreateOffscreenCommandBuffer"); - AutoLock lock(context_lock_); - // An error occurred. Need to get the host again to reinitialize it. - if (!channel_) - return NULL; - GPUCreateCommandBufferConfig init_params; init_params.share_group_id = share_group ? share_group->GetRouteID() : MSG_ROUTING_NONE; @@ -198,6 +167,8 @@ CommandBufferProxyImpl* GpuChannelHost::CreateOffscreenCommandBuffer( CommandBufferProxyImpl* command_buffer = new CommandBufferProxyImpl(this, route_id); AddRoute(route_id, command_buffer->AsWeakPtr()); + + AutoLock lock(context_lock_); proxies_[route_id] = command_buffer; return command_buffer; } @@ -217,13 +188,12 @@ void GpuChannelHost::DestroyCommandBuffer( CommandBufferProxyImpl* command_buffer) { TRACE_EVENT0("gpu", "GpuChannelHost::DestroyCommandBuffer"); - AutoLock lock(context_lock_); int route_id = command_buffer->GetRouteID(); Send(new GpuChannelMsg_DestroyCommandBuffer(route_id)); - // Check the proxy has not already been removed after a channel error. - if (proxies_.find(route_id) != proxies_.end()) - proxies_.erase(route_id); RemoveRoute(route_id); + + AutoLock lock(context_lock_); + proxies_.erase(route_id); delete command_buffer; } @@ -255,9 +225,7 @@ void GpuChannelHost::RemoveRoute(int route_id) { base::SharedMemoryHandle GpuChannelHost::ShareToGpuProcess( base::SharedMemoryHandle source_handle) { - AutoLock lock(context_lock_); - - if (!channel_) + if (IsLost()) return base::SharedMemory::NULLHandle(); #if defined(OS_WIN) @@ -283,51 +251,38 @@ base::SharedMemoryHandle GpuChannelHost::ShareToGpuProcess( bool GpuChannelHost::GenerateMailboxNames(unsigned num, std::vector<gpu::Mailbox>* names) { + DCHECK(names->empty()); TRACE_EVENT0("gpu", "GenerateMailboxName"); - AutoLock lock(context_lock_); + size_t generate_count = channel_filter_->GetMailboxNames(num, names); - if (num > mailbox_name_pool_.size()) { - if (!Send(new GpuChannelMsg_GenerateMailboxNames(num, names))) + if (names->size() < num) { + std::vector<gpu::Mailbox> new_names; + if (!Send(new GpuChannelMsg_GenerateMailboxNames(num - names->size(), + &new_names))) return false; - } else { - names->insert(names->begin(), - mailbox_name_pool_.end() - num, - mailbox_name_pool_.end()); - mailbox_name_pool_.erase(mailbox_name_pool_.end() - num, - mailbox_name_pool_.end()); + names->insert(names->end(), new_names.begin(), new_names.end()); } - const unsigned ideal_mailbox_pool_size = 100; - if (mailbox_name_pool_.size() < ideal_mailbox_pool_size / 2) { - Send(new GpuChannelMsg_GenerateMailboxNamesAsync( - ideal_mailbox_pool_size - mailbox_name_pool_.size())); - } + if (generate_count > 0) + Send(new GpuChannelMsg_GenerateMailboxNamesAsync(generate_count)); return true; } -void GpuChannelHost::OnGenerateMailboxNamesReply( - const std::vector<gpu::Mailbox>& names) { - TRACE_EVENT0("gpu", "OnGenerateMailboxNamesReply"); - AutoLock lock(context_lock_); - - mailbox_name_pool_.insert(mailbox_name_pool_.end(), - names.begin(), - names.end()); -} - int32 GpuChannelHost::ReserveTransferBufferId() { return next_transfer_buffer_id_.GetNext(); } -GpuChannelHost::~GpuChannelHost() {} +GpuChannelHost::~GpuChannelHost() { + // channel_ must be destroyed on the main thread. + if (!factory_->IsMainThread()) + factory_->GetMainLoop()->DeleteSoon(FROM_HERE, channel_.release()); +} -GpuChannelHost::MessageFilter::MessageFilter( - base::WeakPtr<GpuChannelHost> parent, - GpuChannelHostFactory* factory) - : parent_(parent), - main_thread_loop_(factory->GetMainLoop()->message_loop_proxy()) { +GpuChannelHost::MessageFilter::MessageFilter() + : lost_(false), + requested_mailboxes_(0) { } GpuChannelHost::MessageFilter::~MessageFilter() {} @@ -355,12 +310,8 @@ bool GpuChannelHost::MessageFilter::OnMessageReceived( if (message.is_reply()) return false; - if (message.routing_id() == MSG_ROUTING_CONTROL) { - main_thread_loop_->PostTask( - FROM_HERE, base::Bind( - &GpuChannelHost::OnMessageReceived, parent_, message)); - return true; - } + if (message.routing_id() == MSG_ROUTING_CONTROL) + return OnControlMessageReceived(message); ListenerMap::iterator it = listeners_.find(message.routing_id()); @@ -378,12 +329,14 @@ bool GpuChannelHost::MessageFilter::OnMessageReceived( } void GpuChannelHost::MessageFilter::OnChannelError() { - // Post the task to signal the GpuChannelHost before the proxies. That way, if - // they themselves post a task to recreate the context, they will not try to - // re-use this channel host before it has a chance to mark itself lost. - main_thread_loop_->PostTask( - FROM_HERE, - base::Bind(&GpuChannelHost::OnChannelError, parent_)); + // Set the lost state before signalling the proxies. That way, if they + // themselves post a task to recreate the context, they will not try to re-use + // this channel host. + { + AutoLock lock(lock_); + lost_ = true; + } + // Inform all the proxies that an error has occurred. This will be reported // via OpenGL as a lost context. for (ListenerMap::iterator it = listeners_.begin(); @@ -398,4 +351,55 @@ void GpuChannelHost::MessageFilter::OnChannelError() { listeners_.clear(); } +bool GpuChannelHost::MessageFilter::IsLost() const { + AutoLock lock(lock_); + return lost_; +} + +size_t GpuChannelHost::MessageFilter::GetMailboxNames( + size_t num, std::vector<gpu::Mailbox>* names) { + AutoLock lock(lock_); + size_t count = std::min(num, mailbox_name_pool_.size()); + names->insert(names->begin(), + mailbox_name_pool_.end() - count, + mailbox_name_pool_.end()); + mailbox_name_pool_.erase(mailbox_name_pool_.end() - count, + mailbox_name_pool_.end()); + + const size_t ideal_mailbox_pool_size = 100; + size_t total = mailbox_name_pool_.size() + requested_mailboxes_; + DCHECK_LE(total, ideal_mailbox_pool_size); + if (total >= ideal_mailbox_pool_size / 2) + return 0; + size_t request = ideal_mailbox_pool_size - total; + requested_mailboxes_ += request; + return request; +} + +bool GpuChannelHost::MessageFilter::OnControlMessageReceived( + const IPC::Message& message) { + bool handled = true; + + IPC_BEGIN_MESSAGE_MAP(GpuChannelHost::MessageFilter, message) + IPC_MESSAGE_HANDLER(GpuChannelMsg_GenerateMailboxNamesReply, + OnGenerateMailboxNamesReply) + IPC_MESSAGE_UNHANDLED(handled = false) + IPC_END_MESSAGE_MAP() + + DCHECK(handled); + return handled; +} + +void GpuChannelHost::MessageFilter::OnGenerateMailboxNamesReply( + const std::vector<gpu::Mailbox>& names) { + TRACE_EVENT0("gpu", "OnGenerateMailboxNamesReply"); + AutoLock lock(lock_); + DCHECK_LE(names.size(), requested_mailboxes_); + requested_mailboxes_ -= names.size(); + mailbox_name_pool_.insert(mailbox_name_pool_.end(), + names.begin(), + names.end()); +} + + } // namespace content diff --git a/content/common/gpu/client/gpu_channel_host.h b/content/common/gpu/client/gpu_channel_host.h index 12c1ecf..da651e9 100644 --- a/content/common/gpu/client/gpu_channel_host.h +++ b/content/common/gpu/client/gpu_channel_host.h @@ -65,7 +65,6 @@ class CONTENT_EXPORT GpuChannelHostFactory { virtual ~GpuChannelHostFactory() {} virtual bool IsMainThread() = 0; - virtual bool IsIOThread() = 0; virtual base::MessageLoop* GetMainLoop() = 0; virtual scoped_refptr<base::MessageLoopProxy> GetIOLoopProxy() = 0; virtual base::WaitableEvent* GetShutDownEvent() = 0; @@ -82,39 +81,26 @@ class CONTENT_EXPORT GpuChannelHostFactory { // Encapsulates an IPC channel between the client and one GPU process. // On the GPU process side there's a corresponding GpuChannel. +// Every method can be called on any thread with a message loop, except for the +// IO thread. class GpuChannelHost : public IPC::Sender, - public base::RefCountedThreadSafe<GpuChannelHost>, - public base::SupportsWeakPtr<GpuChannelHost> { + public base::RefCountedThreadSafe<GpuChannelHost> { public: - enum State { - // Not yet connected. - kUnconnected, - // Ready to use. - kConnected, - // An error caused the host to become disconnected. Recreate channel to - // reestablish connection. - kLost - }; - - // Called on the render thread - GpuChannelHost(GpuChannelHostFactory* factory, - int gpu_host_id, - int client_id); - - // Connect to GPU process channel. - void Connect(const IPC::ChannelHandle& channel_handle); - - State state() const { return state_; } - - // Change state to kLost. - void SetStateLost(); + // Must be called on the main thread (as defined by the factory). + static scoped_refptr<GpuChannelHost> Create( + GpuChannelHostFactory* factory, + int gpu_host_id, + int client_id, + const gpu::GPUInfo& gpu_info, + const IPC::ChannelHandle& channel_handle); + + bool IsLost() const { + DCHECK(channel_filter_); + return channel_filter_->IsLost(); + } // The GPU stats reported by the GPU process. - void set_gpu_info(const gpu::GPUInfo& gpu_info); - const gpu::GPUInfo& gpu_info() const; - - void OnMessageReceived(const IPC::Message& message); - void OnChannelError(); + const gpu::GPUInfo& gpu_info() const { return gpu_info_; } // IPC::Sender implementation: virtual bool Send(IPC::Message* msg) OVERRIDE; @@ -165,7 +151,7 @@ class GpuChannelHost : public IPC::Sender, base::SharedMemoryHandle ShareToGpuProcess( base::SharedMemoryHandle source_handle); - // Generates n unique mailbox names that can be used with + // Generates |num| unique mailbox names that can be used with // GL_texture_mailbox_CHROMIUM. Unlike genMailboxCHROMIUM, this IPC is // handled only on the GPU process' IO thread, and so is not effectively // a finish. @@ -176,69 +162,92 @@ class GpuChannelHost : public IPC::Sender, private: friend class base::RefCountedThreadSafe<GpuChannelHost>; + GpuChannelHost(GpuChannelHostFactory* factory, + int gpu_host_id, + int client_id, + const gpu::GPUInfo& gpu_info); virtual ~GpuChannelHost(); - - // Message handlers. - void OnGenerateMailboxNamesReply(const std::vector<gpu::Mailbox>& names); + void Connect(const IPC::ChannelHandle& channel_handle); // A filter used internally to route incoming messages from the IO thread - // to the correct message loop. + // to the correct message loop. It also maintains some shared state between + // all the contexts. class MessageFilter : public IPC::ChannelProxy::MessageFilter { public: - MessageFilter(base::WeakPtr<GpuChannelHost> parent, - GpuChannelHostFactory* factory); + MessageFilter(); + // Called on the IO thread. void AddRoute(int route_id, base::WeakPtr<IPC::Listener> listener, scoped_refptr<base::MessageLoopProxy> loop); + // Called on the IO thread. void RemoveRoute(int route_id); - // IPC::ChannelProxy::MessageFilter implementation: + // IPC::ChannelProxy::MessageFilter implementation + // (called on the IO thread): virtual bool OnMessageReceived(const IPC::Message& msg) OVERRIDE; virtual void OnChannelError() OVERRIDE; + // The following methods can be called on any thread. + + // Whether the channel is lost. + bool IsLost() const; + + // Gets mailboxes from the pool, and return the number of mailboxes to ask + // the GPU process to maintain a good pool size. The caller is responsible + // for sending the GpuChannelMsg_GenerateMailboxNamesAsync message. + size_t GetMailboxNames(size_t num, std::vector<gpu::Mailbox>* names); + private: virtual ~MessageFilter(); + bool OnControlMessageReceived(const IPC::Message& msg); - // Note: this reference can only be used to post tasks back to the - // GpuChannelHost, it is illegal to dereference on the IO thread where the - // MessageFilter lives. - base::WeakPtr<GpuChannelHost> parent_; - - scoped_refptr<base::MessageLoopProxy> main_thread_loop_; + // Message handlers. + void OnGenerateMailboxNamesReply(const std::vector<gpu::Mailbox>& names); + // Threading notes: |listeners_| is only accessed on the IO thread. Every + // other field is protected by |lock_|. typedef base::hash_map<int, GpuListenerInfo> ListenerMap; ListenerMap listeners_; - }; - GpuChannelHostFactory* factory_; - int client_id_; - int gpu_host_id_; + // Protexts all fields below this one. + mutable base::Lock lock_; - State state_; + // Whether the channel has been lost. + bool lost_; - gpu::GPUInfo gpu_info_; + // A pool of valid mailbox names. + std::vector<gpu::Mailbox> mailbox_name_pool_; - scoped_ptr<IPC::SyncChannel> channel_; - scoped_refptr<MessageFilter> channel_filter_; + // Number of pending mailbox requested from the GPU process. + size_t requested_mailboxes_; + }; - // Used to look up a proxy from its routing id. - typedef base::hash_map<int, CommandBufferProxyImpl*> ProxyMap; - ProxyMap proxies_; + // Threading notes: all fields are constant during the lifetime of |this| + // except: + // - |next_transfer_buffer_id_|, atomic type + // - |proxies_|, protected by |context_lock_| + GpuChannelHostFactory* const factory_; + const int client_id_; + const int gpu_host_id_; - // A lock to guard against concurrent access to members like the proxies map - // for calls from contexts that may live on the compositor or main thread. - mutable base::Lock context_lock_; + const gpu::GPUInfo gpu_info_; + + scoped_ptr<IPC::SyncChannel> channel_; + scoped_refptr<MessageFilter> channel_filter_; // A filter for sending messages from thread other than the main thread. scoped_refptr<IPC::SyncMessageFilter> sync_filter_; - // A pool of valid mailbox names. - std::vector<gpu::Mailbox> mailbox_name_pool_; - // Transfer buffer IDs are allocated in sequence. base::AtomicSequenceNumber next_transfer_buffer_id_; + // Protects proxies_. + mutable base::Lock context_lock_; + // Used to look up a proxy from its routing id. + typedef base::hash_map<int, CommandBufferProxyImpl*> ProxyMap; + ProxyMap proxies_; + DISALLOW_COPY_AND_ASSIGN(GpuChannelHost); }; diff --git a/content/common/gpu/client/webgraphicscontext3d_command_buffer_impl.cc b/content/common/gpu/client/webgraphicscontext3d_command_buffer_impl.cc index 19b4edb..cbae1f2 100644 --- a/content/common/gpu/client/webgraphicscontext3d_command_buffer_impl.cc +++ b/content/common/gpu/client/webgraphicscontext3d_command_buffer_impl.cc @@ -303,7 +303,6 @@ bool WebGraphicsContext3DCommandBufferImpl::Initialize( host_ = factory_->EstablishGpuChannelSync(cause); if (!host_.get()) return false; - DCHECK(host_->state() == GpuChannelHost::kConnected); command_buffer_size_ = command_buffer_size; start_transfer_buffer_size_ = start_transfer_buffer_size; @@ -1465,7 +1464,7 @@ WGC3Denum WebGraphicsContext3DCommandBufferImpl::getGraphicsResetStatusARB() { bool WebGraphicsContext3DCommandBufferImpl::IsCommandBufferContextLost() { // If the channel shut down unexpectedly, let that supersede the // command buffer's state. - if (host_.get() && host_->state() == GpuChannelHost::kLost) + if (host_.get() && host_->IsLost()) return true; gpu::CommandBuffer::State state = command_buffer_->GetLastState(); return state.error == gpu::error::kLostContext; @@ -1548,7 +1547,7 @@ void WebGraphicsContext3DCommandBufferImpl::signalSyncPoint( void WebGraphicsContext3DCommandBufferImpl::genMailboxCHROMIUM( WGC3Dbyte* name) { - std::vector<gpu::Mailbox> names(1); + std::vector<gpu::Mailbox> names; if (command_buffer_->GenerateMailboxNames(1, &names)) memcpy(name, names[0].name, GL_MAILBOX_SIZE_CHROMIUM); else diff --git a/content/renderer/media/pepper_platform_video_decoder_impl.cc b/content/renderer/media/pepper_platform_video_decoder_impl.cc index 9fe3798..12fba0b 100644 --- a/content/renderer/media/pepper_platform_video_decoder_impl.cc +++ b/content/renderer/media/pepper_platform_video_decoder_impl.cc @@ -40,8 +40,6 @@ bool PlatformVideoDecoderImpl::Initialize(media::VideoCodecProfile profile) { if (!channel) return false; - DCHECK_EQ(channel->state(), GpuChannelHost::kConnected); - // Send IPC message to initialize decoder in GPU process. decoder_ = channel->CreateVideoDecoder(command_buffer_route_id_, profile, this); diff --git a/content/renderer/pepper/pepper_platform_context_3d_impl.cc b/content/renderer/pepper/pepper_platform_context_3d_impl.cc index 982f0ad..4ffcd63 100644 --- a/content/renderer/pepper/pepper_platform_context_3d_impl.cc +++ b/content/renderer/pepper/pepper_platform_context_3d_impl.cc @@ -52,7 +52,6 @@ bool PlatformContext3DImpl::Init(const int32* attrib_list, CAUSE_FOR_GPU_LAUNCH_PEPPERPLATFORMCONTEXT3DIMPL_INITIALIZE); if (!channel_.get()) return false; - DCHECK(channel_->state() == GpuChannelHost::kConnected); gfx::Size surface_size; std::vector<int32> attribs; diff --git a/content/renderer/render_thread_impl.cc b/content/renderer/render_thread_impl.cc index efe9fe6..67cd0eb 100644 --- a/content/renderer/render_thread_impl.cc +++ b/content/renderer/render_thread_impl.cc @@ -1007,11 +1007,6 @@ bool RenderThreadImpl::IsMainThread() { return !!current(); } -bool RenderThreadImpl::IsIOThread() { - return base::MessageLoop::current() == - ChildProcess::current()->io_message_loop(); -} - base::MessageLoop* RenderThreadImpl::GetMainLoop() { return message_loop(); } @@ -1137,9 +1132,8 @@ GpuChannelHost* RenderThreadImpl::EstablishGpuChannelSync( if (gpu_channel_.get()) { // Do nothing if we already have a GPU channel or are already // establishing one. - if (gpu_channel_->state() == GpuChannelHost::kUnconnected || - gpu_channel_->state() == GpuChannelHost::kConnected) - return GetGpuChannel(); + if (!gpu_channel_->IsLost()) + return gpu_channel_.get(); // Recreate the channel if it has been lost. gpu_channel_ = NULL; @@ -1158,18 +1152,13 @@ GpuChannelHost* RenderThreadImpl::EstablishGpuChannelSync( #endif channel_handle.name.empty()) { // Otherwise cancel the connection. - gpu_channel_ = NULL; return NULL; } - gpu_channel_ = new GpuChannelHost(this, 0, client_id); - gpu_channel_->set_gpu_info(gpu_info); GetContentClient()->SetGpuInfo(gpu_info); - - // Connect to the GPU process if a channel name was received. - gpu_channel_->Connect(channel_handle); - - return GetGpuChannel(); + gpu_channel_ = GpuChannelHost::Create( + this, 0, client_id, gpu_info, channel_handle); + return gpu_channel_.get(); } WebKit::WebMediaStreamCenter* RenderThreadImpl::CreateMediaStreamCenter( @@ -1208,7 +1197,7 @@ GpuChannelHost* RenderThreadImpl::GetGpuChannel() { if (!gpu_channel_.get()) return NULL; - if (gpu_channel_->state() != GpuChannelHost::kConnected) + if (gpu_channel_->IsLost()) return NULL; return gpu_channel_.get(); diff --git a/content/renderer/render_thread_impl.h b/content/renderer/render_thread_impl.h index e407ac7..552e2cb 100644 --- a/content/renderer/render_thread_impl.h +++ b/content/renderer/render_thread_impl.h @@ -158,7 +158,6 @@ class CONTENT_EXPORT RenderThreadImpl : public RenderThread, // GpuChannelHostFactory implementation: virtual bool IsMainThread() OVERRIDE; - virtual bool IsIOThread() OVERRIDE; virtual base::MessageLoop* GetMainLoop() OVERRIDE; virtual scoped_refptr<base::MessageLoopProxy> GetIOLoopProxy() OVERRIDE; virtual base::WaitableEvent* GetShutDownEvent() OVERRIDE; |