// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "content/browser/worker_host/worker_service_impl.h" #include #include "base/command_line.h" #include "base/logging.h" #include "base/threading/thread.h" #include "content/browser/devtools/worker_devtools_manager.h" #include "content/browser/renderer_host/render_widget_host_impl.h" #include "content/browser/worker_host/worker_message_filter.h" #include "content/browser/worker_host/worker_process_host.h" #include "content/common/view_messages.h" #include "content/common/worker_messages.h" #include "content/public/browser/child_process_data.h" #include "content/public/browser/notification_service.h" #include "content/public/browser/notification_types.h" #include "content/public/browser/render_process_host.h" #include "content/public/browser/render_view_host.h" #include "content/public/browser/render_widget_host.h" #include "content/public/browser/render_widget_host_iterator.h" #include "content/public/browser/render_widget_host_view.h" #include "content/public/browser/resource_context.h" #include "content/public/browser/web_contents.h" #include "content/public/browser/worker_service_observer.h" #include "content/public/common/content_switches.h" #include "content/public/common/process_type.h" namespace content { const int WorkerServiceImpl::kMaxWorkersWhenSeparate = 64; const int WorkerServiceImpl::kMaxWorkersPerTabWhenSeparate = 16; class WorkerPrioritySetter : public NotificationObserver, public base::RefCountedThreadSafe { public: WorkerPrioritySetter(); // Posts a task to the UI thread to register to receive notifications. void Initialize(); // Invoked by WorkerServiceImpl when a worker process is created. void NotifyWorkerProcessCreated(); private: friend class base::RefCountedThreadSafe; friend struct BrowserThread::DeleteOnThread; friend class base::DeleteHelper; virtual ~WorkerPrioritySetter(); // Posts a task to perform a worker priority update. void PostTaskToGatherAndUpdateWorkerPriorities(); // Gathers up a list of the visible tabs and then updates priorities for // all the shared workers. void GatherVisibleIDsAndUpdateWorkerPriorities(); // Registers as an observer to receive notifications about // widgets being shown. void RegisterObserver(); // Sets priorities for shared workers given a set of visible tabs (as a // std::set of std::pair ids. void UpdateWorkerPrioritiesFromVisibleSet( const std::set >* visible); // Called to refresh worker priorities when focus changes between tabs. void OnRenderWidgetVisibilityChanged(std::pair); // NotificationObserver implementation. virtual void Observe(int type, const NotificationSource& source, const NotificationDetails& details) OVERRIDE; NotificationRegistrar registrar_; }; WorkerPrioritySetter::WorkerPrioritySetter() { } WorkerPrioritySetter::~WorkerPrioritySetter() { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI)); } void WorkerPrioritySetter::Initialize() { BrowserThread::PostTask( BrowserThread::UI, FROM_HERE, base::Bind(&WorkerPrioritySetter::RegisterObserver, this)); } void WorkerPrioritySetter::NotifyWorkerProcessCreated() { PostTaskToGatherAndUpdateWorkerPriorities(); } void WorkerPrioritySetter::PostTaskToGatherAndUpdateWorkerPriorities() { BrowserThread::PostTask( BrowserThread::UI, FROM_HERE, base::Bind( &WorkerPrioritySetter::GatherVisibleIDsAndUpdateWorkerPriorities, this)); } void WorkerPrioritySetter::GatherVisibleIDsAndUpdateWorkerPriorities() { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI)); std::set >* visible_renderer_ids = new std::set >(); // Gather up all the visible renderer process/view pairs scoped_ptr widgets( RenderWidgetHost::GetRenderWidgetHosts()); while (RenderWidgetHost* widget = widgets->GetNextHost()) { if (widget->GetProcess()->VisibleWidgetCount() == 0) continue; RenderWidgetHostView* render_view = widget->GetView(); if (render_view && render_view->IsShowing()) { visible_renderer_ids->insert( std::pair(widget->GetProcess()->GetID(), widget->GetRoutingID())); } } BrowserThread::PostTask( BrowserThread::IO, FROM_HERE, base::Bind(&WorkerPrioritySetter::UpdateWorkerPrioritiesFromVisibleSet, this, base::Owned(visible_renderer_ids))); } void WorkerPrioritySetter::UpdateWorkerPrioritiesFromVisibleSet( const std::set >* visible_renderer_ids) { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); for (WorkerProcessHostIterator iter; !iter.Done(); ++iter) { if (!iter->process_launched()) continue; bool throttle = true; for (WorkerProcessHost::Instances::const_iterator instance = iter->instances().begin(); instance != iter->instances().end(); ++instance) { // This code assumes one worker per process WorkerProcessHost::Instances::const_iterator first_instance = iter->instances().begin(); if (first_instance == iter->instances().end()) continue; WorkerDocumentSet::DocumentInfoSet::const_iterator info = first_instance->worker_document_set()->documents().begin(); for (; info != first_instance->worker_document_set()->documents().end(); ++info) { std::pair id( info->render_process_id(), info->render_view_id()); if (visible_renderer_ids->find(id) != visible_renderer_ids->end()) { throttle = false; break; } } if (!throttle ) { break; } } iter->SetBackgrounded(throttle); } } void WorkerPrioritySetter::OnRenderWidgetVisibilityChanged( std::pair id) { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); std::set > visible_renderer_ids; visible_renderer_ids.insert(id); UpdateWorkerPrioritiesFromVisibleSet(&visible_renderer_ids); } void WorkerPrioritySetter::RegisterObserver() { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI)); registrar_.Add(this, NOTIFICATION_RENDER_WIDGET_VISIBILITY_CHANGED, NotificationService::AllBrowserContextsAndSources()); registrar_.Add(this, NOTIFICATION_RENDERER_PROCESS_CREATED, NotificationService::AllBrowserContextsAndSources()); } void WorkerPrioritySetter::Observe(int type, const NotificationSource& source, const NotificationDetails& details) { if (type == NOTIFICATION_RENDER_WIDGET_VISIBILITY_CHANGED) { bool visible = *Details(details).ptr(); if (visible) { int render_widget_id = Source(source).ptr()->GetRoutingID(); int render_process_pid = Source(source).ptr()->GetProcess()->GetID(); BrowserThread::PostTask( BrowserThread::IO, FROM_HERE, base::Bind(&WorkerPrioritySetter::OnRenderWidgetVisibilityChanged, this, std::pair(render_process_pid, render_widget_id))); } } else if (type == NOTIFICATION_RENDERER_PROCESS_CREATED) { PostTaskToGatherAndUpdateWorkerPriorities(); } } WorkerService* WorkerService::GetInstance() { return WorkerServiceImpl::GetInstance(); } WorkerServiceImpl* WorkerServiceImpl::GetInstance() { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); return Singleton::get(); } WorkerServiceImpl::WorkerServiceImpl() : priority_setter_(new WorkerPrioritySetter()), next_worker_route_id_(0) { priority_setter_->Initialize(); } WorkerServiceImpl::~WorkerServiceImpl() { // The observers in observers_ can't be used here because they might be // gone already. } void WorkerServiceImpl::PerformTeardownForTesting() { priority_setter_ = NULL; } void WorkerServiceImpl::OnWorkerMessageFilterClosing( WorkerMessageFilter* filter) { for (WorkerProcessHostIterator iter; !iter.Done(); ++iter) { iter->FilterShutdown(filter); } // See if that process had any queued workers. for (WorkerProcessHost::Instances::iterator i = queued_workers_.begin(); i != queued_workers_.end();) { i->RemoveFilters(filter); if (i->NumFilters() == 0) { i = queued_workers_.erase(i); } else { ++i; } } for (WorkerProcessHost::Instances::iterator i = pending_shared_workers_.begin(); i != pending_shared_workers_.end(); ) { i->RemoveFilters(filter); if (i->NumFilters() == 0) { i = pending_shared_workers_.erase(i); } else { ++i; } } // Also, see if that process had any pending shared workers. for (WorkerProcessHost::Instances::iterator iter = pending_shared_workers_.begin(); iter != pending_shared_workers_.end(); ) { iter->worker_document_set()->RemoveAll(filter); if (iter->worker_document_set()->IsEmpty()) { iter = pending_shared_workers_.erase(iter); } else { ++iter; } } // Either a worker proceess has shut down, in which case we can start one of // the queued workers, or a renderer has shut down, in which case it doesn't // affect anything. We call this function in both scenarios because then we // don't have to keep track which filters are from worker processes. TryStartingQueuedWorker(); } void WorkerServiceImpl::CreateWorker( const ViewHostMsg_CreateWorker_Params& params, int route_id, WorkerMessageFilter* filter, ResourceContext* resource_context, const WorkerStoragePartition& partition) { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); // Generate a unique route id for the browser-worker communication that's // unique among all worker processes. That way when the worker process sends // a wrapped IPC message through us, we know which WorkerProcessHost to give // it to. WorkerProcessHost::WorkerInstance instance( params.url, params.name, next_worker_route_id(), 0, params.script_resource_appcache_id, resource_context, partition); instance.AddFilter(filter, route_id); instance.worker_document_set()->Add( filter, params.document_id, filter->render_process_id(), params.render_view_route_id); CreateWorkerFromInstance(instance); } void WorkerServiceImpl::LookupSharedWorker( const ViewHostMsg_CreateWorker_Params& params, int route_id, WorkerMessageFilter* filter, ResourceContext* resource_context, const WorkerStoragePartition& partition, bool* exists, bool* url_mismatch) { *exists = true; WorkerProcessHost::WorkerInstance* instance = FindSharedWorkerInstance( params.url, params.name, partition, resource_context); if (!instance) { // If no worker instance currently exists, we need to create a pending // instance - this is to make sure that any subsequent lookups passing a // mismatched URL get the appropriate url_mismatch error at lookup time. // Having named shared workers was a Really Bad Idea due to details like // this. instance = CreatePendingInstance(params.url, params.name, resource_context, partition); *exists = false; } // Make sure the passed-in instance matches the URL - if not, return an // error. if (params.url != instance->url()) { *url_mismatch = true; *exists = false; } else { *url_mismatch = false; // Add our route ID to the existing instance so we can send messages to it. instance->AddFilter(filter, route_id); // Add the passed filter/document_id to the worker instance. // TODO(atwilson): This won't work if the message is from a worker process. // We don't support that yet though (this message is only sent from // renderers) but when we do, we'll need to add code to pass in the current // worker's document set for nested workers. instance->worker_document_set()->Add( filter, params.document_id, filter->render_process_id(), params.render_view_route_id); } } void WorkerServiceImpl::ForwardToWorker(const IPC::Message& message, WorkerMessageFilter* filter) { for (WorkerProcessHostIterator iter; !iter.Done(); ++iter) { if (iter->FilterMessage(message, filter)) return; } // TODO(jabdelmalek): tell filter that callee is gone } void WorkerServiceImpl::DocumentDetached(unsigned long long document_id, WorkerMessageFilter* filter) { // Any associated shared workers can be shut down. for (WorkerProcessHostIterator iter; !iter.Done(); ++iter) iter->DocumentDetached(filter, document_id); // Remove any queued shared workers for this document. for (WorkerProcessHost::Instances::iterator iter = queued_workers_.begin(); iter != queued_workers_.end();) { iter->worker_document_set()->Remove(filter, document_id); if (iter->worker_document_set()->IsEmpty()) { iter = queued_workers_.erase(iter); continue; } ++iter; } // Remove the document from any pending shared workers. for (WorkerProcessHost::Instances::iterator iter = pending_shared_workers_.begin(); iter != pending_shared_workers_.end(); ) { iter->worker_document_set()->Remove(filter, document_id); if (iter->worker_document_set()->IsEmpty()) { iter = pending_shared_workers_.erase(iter); } else { ++iter; } } } bool WorkerServiceImpl::CreateWorkerFromInstance( WorkerProcessHost::WorkerInstance instance) { if (!CanCreateWorkerProcess(instance)) { queued_workers_.push_back(instance); return true; } // Check to see if this shared worker is already running (two pages may have // tried to start up the worker simultaneously). // See if a worker with this name already exists. WorkerProcessHost::WorkerInstance* existing_instance = FindSharedWorkerInstance( instance.url(), instance.name(), instance.partition(), instance.resource_context()); WorkerProcessHost::WorkerInstance::FilterInfo filter_info = instance.GetFilter(); // If this worker is already running, no need to create a new copy. Just // inform the caller that the worker has been created. if (existing_instance) { // Walk the worker's filter list to see if this client is listed. If not, // then it means that the worker started by the client already exited so // we should not attach to this new one (http://crbug.com/29243). if (!existing_instance->HasFilter(filter_info.first, filter_info.second)) return false; filter_info.first->Send(new ViewMsg_WorkerCreated(filter_info.second)); return true; } // Look to see if there's a pending instance. WorkerProcessHost::WorkerInstance* pending = FindPendingInstance( instance.url(), instance.name(), instance.partition(), instance.resource_context()); // If there's no instance *and* no pending instance (or there is a pending // instance but it does not contain our filter info), then it means the // worker started up and exited already. Log a warning because this should // be a very rare occurrence and is probably a bug, but it *can* happen so // handle it gracefully. if (!pending || !pending->HasFilter(filter_info.first, filter_info.second)) { DLOG(WARNING) << "Pending worker already exited"; return false; } // Assign the accumulated document set and filter list for this pending // worker to the new instance. DCHECK(!pending->worker_document_set()->IsEmpty()); instance.ShareDocumentSet(*pending); for (WorkerProcessHost::WorkerInstance::FilterList::const_iterator i = pending->filters().begin(); i != pending->filters().end(); ++i) { instance.AddFilter(i->first, i->second); } RemovePendingInstances(instance.url(), instance.name(), instance.partition(), instance.resource_context()); // Remove any queued instances of this worker and copy over the filter to // this instance. for (WorkerProcessHost::Instances::iterator iter = queued_workers_.begin(); iter != queued_workers_.end();) { if (iter->Matches(instance.url(), instance.name(), instance.partition(), instance.resource_context())) { DCHECK(iter->NumFilters() == 1); WorkerProcessHost::WorkerInstance::FilterInfo filter_info = iter->GetFilter(); instance.AddFilter(filter_info.first, filter_info.second); iter = queued_workers_.erase(iter); } else { ++iter; } } WorkerMessageFilter* first_filter = instance.filters().begin()->first; WorkerProcessHost* worker = new WorkerProcessHost( instance.resource_context(), instance.partition()); // TODO(atwilson): This won't work if the message is from a worker process. // We don't support that yet though (this message is only sent from // renderers) but when we do, we'll need to add code to pass in the current // worker's document set for nested workers. if (!worker->Init(first_filter->render_process_id())) { delete worker; return false; } worker->CreateWorker(instance); FOR_EACH_OBSERVER( WorkerServiceObserver, observers_, WorkerCreated(instance.url(), instance.name(), worker->GetData().id, instance.worker_route_id())); WorkerDevToolsManager::GetInstance()->WorkerCreated(worker, instance); return true; } bool WorkerServiceImpl::CanCreateWorkerProcess( const WorkerProcessHost::WorkerInstance& instance) { // Worker can be fired off if *any* parent has room. const WorkerDocumentSet::DocumentInfoSet& parents = instance.worker_document_set()->documents(); for (WorkerDocumentSet::DocumentInfoSet::const_iterator parent_iter = parents.begin(); parent_iter != parents.end(); ++parent_iter) { bool hit_total_worker_limit = false; if (TabCanCreateWorkerProcess(parent_iter->render_process_id(), parent_iter->render_view_id(), &hit_total_worker_limit)) { return true; } // Return false if already at the global worker limit (no need to continue // checking parent tabs). if (hit_total_worker_limit) return false; } // If we've reached here, none of the parent tabs is allowed to create an // instance. return false; } bool WorkerServiceImpl::TabCanCreateWorkerProcess( int render_process_id, int render_view_id, bool* hit_total_worker_limit) { int total_workers = 0; int workers_per_tab = 0; *hit_total_worker_limit = false; for (WorkerProcessHostIterator iter; !iter.Done(); ++iter) { for (WorkerProcessHost::Instances::const_iterator cur_instance = iter->instances().begin(); cur_instance != iter->instances().end(); ++cur_instance) { total_workers++; if (total_workers >= kMaxWorkersWhenSeparate) { *hit_total_worker_limit = true; return false; } if (cur_instance->RendererIsParent(render_process_id, render_view_id)) { workers_per_tab++; if (workers_per_tab >= kMaxWorkersPerTabWhenSeparate) return false; } } } return true; } void WorkerServiceImpl::TryStartingQueuedWorker() { if (queued_workers_.empty()) return; for (WorkerProcessHost::Instances::iterator i = queued_workers_.begin(); i != queued_workers_.end();) { if (CanCreateWorkerProcess(*i)) { WorkerProcessHost::WorkerInstance instance = *i; queued_workers_.erase(i); CreateWorkerFromInstance(instance); // CreateWorkerFromInstance can modify the queued_workers_ list when it // coalesces queued instances after starting a shared worker, so we // have to rescan the list from the beginning (our iterator is now // invalid). This is not a big deal as having any queued workers will be // rare in practice so the list will be small. i = queued_workers_.begin(); } else { ++i; } } } bool WorkerServiceImpl::GetRendererForWorker(int worker_process_id, int* render_process_id, int* render_view_id) const { for (WorkerProcessHostIterator iter; !iter.Done(); ++iter) { if (iter.GetData().id != worker_process_id) continue; // This code assumes one worker per process, see function comment in header! WorkerProcessHost::Instances::const_iterator first_instance = iter->instances().begin(); if (first_instance == iter->instances().end()) return false; WorkerDocumentSet::DocumentInfoSet::const_iterator info = first_instance->worker_document_set()->documents().begin(); *render_process_id = info->render_process_id(); *render_view_id = info->render_view_id(); return true; } return false; } const WorkerProcessHost::WorkerInstance* WorkerServiceImpl::FindWorkerInstance( int worker_process_id) { for (WorkerProcessHostIterator iter; !iter.Done(); ++iter) { if (iter.GetData().id != worker_process_id) continue; WorkerProcessHost::Instances::const_iterator instance = iter->instances().begin(); return instance == iter->instances().end() ? NULL : &*instance; } return NULL; } bool WorkerServiceImpl::TerminateWorker(int process_id, int route_id) { for (WorkerProcessHostIterator iter; !iter.Done(); ++iter) { if (iter.GetData().id == process_id) { iter->TerminateWorker(route_id); return true; } } return false; } std::vector WorkerServiceImpl::GetWorkers() { std::vector results; for (WorkerProcessHostIterator iter; !iter.Done(); ++iter) { const WorkerProcessHost::Instances& instances = (*iter)->instances(); for (WorkerProcessHost::Instances::const_iterator i = instances.begin(); i != instances.end(); ++i) { WorkerService::WorkerInfo info; info.url = i->url(); info.name = i->name(); info.route_id = i->worker_route_id(); info.process_id = iter.GetData().id; info.handle = iter.GetData().handle; results.push_back(info); } } return results; } void WorkerServiceImpl::AddObserver(WorkerServiceObserver* observer) { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); observers_.AddObserver(observer); } void WorkerServiceImpl::RemoveObserver(WorkerServiceObserver* observer) { DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO)); observers_.RemoveObserver(observer); } void WorkerServiceImpl::NotifyWorkerDestroyed( WorkerProcessHost* process, int worker_route_id) { WorkerDevToolsManager::GetInstance()->WorkerDestroyed( process, worker_route_id); FOR_EACH_OBSERVER(WorkerServiceObserver, observers_, WorkerDestroyed(process->GetData().id, worker_route_id)); } void WorkerServiceImpl::NotifyWorkerProcessCreated() { priority_setter_->NotifyWorkerProcessCreated(); } WorkerProcessHost::WorkerInstance* WorkerServiceImpl::FindSharedWorkerInstance( const GURL& url, const string16& name, const WorkerStoragePartition& partition, ResourceContext* resource_context) { for (WorkerProcessHostIterator iter; !iter.Done(); ++iter) { for (WorkerProcessHost::Instances::iterator instance_iter = iter->mutable_instances().begin(); instance_iter != iter->mutable_instances().end(); ++instance_iter) { if (instance_iter->Matches(url, name, partition, resource_context)) return &(*instance_iter); } } return NULL; } WorkerProcessHost::WorkerInstance* WorkerServiceImpl::FindPendingInstance( const GURL& url, const string16& name, const WorkerStoragePartition& partition, ResourceContext* resource_context) { // Walk the pending instances looking for a matching pending worker. for (WorkerProcessHost::Instances::iterator iter = pending_shared_workers_.begin(); iter != pending_shared_workers_.end(); ++iter) { if (iter->Matches(url, name, partition, resource_context)) return &(*iter); } return NULL; } void WorkerServiceImpl::RemovePendingInstances( const GURL& url, const string16& name, const WorkerStoragePartition& partition, ResourceContext* resource_context) { // Walk the pending instances looking for a matching pending worker. for (WorkerProcessHost::Instances::iterator iter = pending_shared_workers_.begin(); iter != pending_shared_workers_.end(); ) { if (iter->Matches(url, name, partition, resource_context)) { iter = pending_shared_workers_.erase(iter); } else { ++iter; } } } WorkerProcessHost::WorkerInstance* WorkerServiceImpl::CreatePendingInstance( const GURL& url, const string16& name, ResourceContext* resource_context, const WorkerStoragePartition& partition) { // Look for an existing pending shared worker. WorkerProcessHost::WorkerInstance* instance = FindPendingInstance(url, name, partition, resource_context); if (instance) return instance; // No existing pending worker - create a new one. WorkerProcessHost::WorkerInstance pending( url, true, name, resource_context, partition); pending_shared_workers_.push_back(pending); return &pending_shared_workers_.back(); } } // namespace content