summaryrefslogtreecommitdiffstats
path: root/net/proxy
diff options
context:
space:
mode:
authoreroman@chromium.org <eroman@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-07-08 19:21:10 +0000
committereroman@chromium.org <eroman@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-07-08 19:21:10 +0000
commitb3764212576419eee2fe5b2b1c43d5aef28954a8 (patch)
treec59c39e8201d08f1fa4b417833b859ea909066fb /net/proxy
parentb5edb847c268532709058074328dd5565b7d9e7e (diff)
downloadchromium_src-b3764212576419eee2fe5b2b1c43d5aef28954a8.zip
chromium_src-b3764212576419eee2fe5b2b1c43d5aef28954a8.tar.gz
chromium_src-b3764212576419eee2fe5b2b1c43d5aef28954a8.tar.bz2
Add the capability to run multiple proxy PAC scripts in parallel.
Refactors SingleThreadedProxyResolver into MultiThreadedProxyResolver. New threads are created lazily on demand, up to a fixed maximum. Note that this CL does NOT change the policy in Chrome -- it will continue to use a single thread for proxy resolving, but using the new code to do so. BUG=11079 Review URL: http://codereview.chromium.org/2822043 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@51877 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net/proxy')
-rw-r--r--net/proxy/multi_threaded_proxy_resolver.cc588
-rw-r--r--net/proxy/multi_threaded_proxy_resolver.h144
-rw-r--r--net/proxy/multi_threaded_proxy_resolver_unittest.cc (renamed from net/proxy/single_threaded_proxy_resolver_unittest.cc)388
-rw-r--r--net/proxy/proxy_resolver.h4
-rw-r--r--net/proxy/proxy_resolver_js_bindings.cc6
-rw-r--r--net/proxy/proxy_resolver_js_bindings.h3
-rw-r--r--net/proxy/proxy_resolver_v8.cc4
-rw-r--r--net/proxy/proxy_resolver_v8.h1
-rw-r--r--net/proxy/proxy_resolver_v8_unittest.cc2
-rw-r--r--net/proxy/proxy_service.cc96
-rw-r--r--net/proxy/proxy_service.h4
-rw-r--r--net/proxy/single_threaded_proxy_resolver.cc353
-rw-r--r--net/proxy/single_threaded_proxy_resolver.h94
-rw-r--r--net/proxy/sync_host_resolver_bridge.cc15
-rw-r--r--net/proxy/sync_host_resolver_bridge.h19
-rw-r--r--net/proxy/sync_host_resolver_bridge_unittest.cc41
16 files changed, 1174 insertions, 588 deletions
diff --git a/net/proxy/multi_threaded_proxy_resolver.cc b/net/proxy/multi_threaded_proxy_resolver.cc
new file mode 100644
index 0000000..a55fef9
--- /dev/null
+++ b/net/proxy/multi_threaded_proxy_resolver.cc
@@ -0,0 +1,588 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/proxy/multi_threaded_proxy_resolver.h"
+
+#include "base/message_loop.h"
+#include "base/string_util.h"
+#include "base/thread.h"
+#include "net/base/capturing_net_log.h"
+#include "net/base/net_errors.h"
+#include "net/proxy/proxy_info.h"
+
+// TODO(eroman): Have the MultiThreadedProxyResolver clear its PAC script
+// data when SetPacScript fails. That will reclaim memory when
+// testing bogus scripts.
+
+namespace net {
+
+namespace {
+
+class PurgeMemoryTask : public base::RefCountedThreadSafe<PurgeMemoryTask> {
+ public:
+ explicit PurgeMemoryTask(ProxyResolver* resolver) : resolver_(resolver) {}
+ void PurgeMemory() { resolver_->PurgeMemory(); }
+ private:
+ friend class base::RefCountedThreadSafe<PurgeMemoryTask>;
+ ~PurgeMemoryTask() {}
+ ProxyResolver* resolver_;
+};
+
+} // namespace
+
+// An "executor" is a job-runner for PAC requests. It encapsulates a worker
+// thread and a synchronous ProxyResolver (which will be operated on said
+// thread.)
+class MultiThreadedProxyResolver::Executor
+ : public base::RefCountedThreadSafe<MultiThreadedProxyResolver::Executor > {
+ public:
+ // |coordinator| must remain valid throughout our lifetime. It is used to
+ // signal when the executor is ready to receive work by calling
+ // |coordinator->OnExecutorReady()|.
+ // The constructor takes ownership of |resolver|.
+ // |thread_number| is an identifier used when naming the worker thread.
+ Executor(MultiThreadedProxyResolver* coordinator,
+ ProxyResolver* resolver,
+ int thread_number);
+
+ // Submit a job to this executor.
+ void StartJob(Job* job);
+
+ // Callback for when a job has completed running on the executor's thread.
+ void OnJobCompleted(Job* job);
+
+ // Cleanup the executor. Cancels all outstanding work, and frees the thread
+ // and resolver.
+ void Destroy();
+
+ void PurgeMemory();
+
+ // Returns the outstanding job, or NULL.
+ Job* outstanding_job() const { return outstanding_job_.get(); }
+
+ ProxyResolver* resolver() { return resolver_.get(); }
+
+ int thread_number() const { return thread_number_; }
+
+ private:
+ friend class base::RefCountedThreadSafe<Executor>;
+ ~Executor();
+
+ MultiThreadedProxyResolver* coordinator_;
+ const int thread_number_;
+
+ // The currently active job for this executor (either a SetPacScript or
+ // GetProxyForURL task).
+ scoped_refptr<Job> outstanding_job_;
+
+ // The synchronous resolver implementation.
+ scoped_ptr<ProxyResolver> resolver_;
+
+ // The thread where |resolver_| is run on.
+ // Note that declaration ordering is important here. |thread_| needs to be
+ // destroyed *before* |resolver_|, in case |resolver_| is currently
+ // executing on |thread_|.
+ scoped_ptr<base::Thread> thread_;
+};
+
+// MultiThreadedProxyResolver::Job ---------------------------------------------
+
+class MultiThreadedProxyResolver::Job
+ : public base::RefCountedThreadSafe<MultiThreadedProxyResolver::Job> {
+ public:
+ // Identifies the subclass of Job (only being used for debugging purposes).
+ enum Type {
+ TYPE_GET_PROXY_FOR_URL,
+ TYPE_SET_PAC_SCRIPT,
+ TYPE_SET_PAC_SCRIPT_INTERNAL,
+ };
+
+ Job(Type type, CompletionCallback* user_callback)
+ : type_(type),
+ user_callback_(user_callback),
+ executor_(NULL),
+ was_cancelled_(false) {
+ }
+
+ void set_executor(Executor* executor) {
+ executor_ = executor;
+ }
+
+ // The "executor" is the job runner that is scheduling this job. If
+ // this job has not been submitted to an executor yet, this will be
+ // NULL (and we know it hasn't started yet).
+ Executor* executor() {
+ return executor_;
+ }
+
+ // Mark the job as having been cancelled.
+ void Cancel() {
+ was_cancelled_ = true;
+ }
+
+ // Returns true if Cancel() has been called.
+ bool was_cancelled() const { return was_cancelled_; }
+
+ Type type() const { return type_; }
+
+ // Returns true if this job still has a user callback. Some jobs
+ // do not have a user callback, because they were helper jobs
+ // scheduled internally (for example TYPE_SET_PAC_SCRIPT_INTERNAL).
+ //
+ // Otherwise jobs that correspond with user-initiated work will
+ // have a non-NULL callback up until the callback is run.
+ bool has_user_callback() const { return user_callback_ != NULL; }
+
+ // This method is called when the job is inserted into a wait queue
+ // because no executors were ready to accept it.
+ virtual void WaitingForThread() {}
+
+ // This method is called just before the job is posted to the work thread.
+ virtual void FinishedWaitingForThread() {}
+
+ // This method is called on the worker thread to do the job's work. On
+ // completion, implementors are expected to call OnJobCompleted() on
+ // |origin_loop|.
+ virtual void Run(MessageLoop* origin_loop) = 0;
+
+ protected:
+ void OnJobCompleted() {
+ // |executor_| will be NULL if the executor has already been deleted.
+ if (executor_)
+ executor_->OnJobCompleted(this);
+ }
+
+ void RunUserCallback(int result) {
+ DCHECK(has_user_callback());
+ CompletionCallback* callback = user_callback_;
+ // Null the callback so has_user_callback() will now return false.
+ user_callback_ = NULL;
+ callback->Run(result);
+ }
+
+ friend class base::RefCountedThreadSafe<MultiThreadedProxyResolver::Job>;
+
+ virtual ~Job() {}
+
+ private:
+ const Type type_;
+ CompletionCallback* user_callback_;
+ Executor* executor_;
+ bool was_cancelled_;
+};
+
+// MultiThreadedProxyResolver::SetPacScriptJob ---------------------------------
+
+// Runs on the worker thread to call ProxyResolver::SetPacScript.
+class MultiThreadedProxyResolver::SetPacScriptJob
+ : public MultiThreadedProxyResolver::Job {
+ public:
+ SetPacScriptJob(const GURL& pac_url,
+ const string16& pac_script,
+ CompletionCallback* callback)
+ : Job(callback ? TYPE_SET_PAC_SCRIPT : TYPE_SET_PAC_SCRIPT_INTERNAL,
+ callback),
+ pac_url_(pac_url),
+ pac_script_(pac_script) {
+ }
+
+ // Runs on the worker thread.
+ virtual void Run(MessageLoop* origin_loop) {
+ ProxyResolver* resolver = executor()->resolver();
+ int rv = resolver->expects_pac_bytes() ?
+ resolver->SetPacScriptByData(pac_script_, NULL) :
+ resolver->SetPacScriptByUrl(pac_url_, NULL);
+
+ DCHECK_NE(rv, ERR_IO_PENDING);
+ origin_loop->PostTask(
+ FROM_HERE,
+ NewRunnableMethod(this, &SetPacScriptJob::RequestComplete, rv));
+ }
+
+ private:
+ // Runs the completion callback on the origin thread.
+ void RequestComplete(int result_code) {
+ // The task may have been cancelled after it was started.
+ if (!was_cancelled() && has_user_callback()) {
+ RunUserCallback(result_code);
+ }
+ OnJobCompleted();
+ }
+
+ const GURL pac_url_;
+ const string16 pac_script_;
+};
+
+// MultiThreadedProxyResolver::GetProxyForURLJob ------------------------------
+
+class MultiThreadedProxyResolver::GetProxyForURLJob
+ : public MultiThreadedProxyResolver::Job {
+ public:
+ // |url| -- the URL of the query.
+ // |results| -- the structure to fill with proxy resolve results.
+ GetProxyForURLJob(const GURL& url,
+ ProxyInfo* results,
+ CompletionCallback* callback,
+ const BoundNetLog& net_log)
+ : Job(TYPE_GET_PROXY_FOR_URL, callback),
+ results_(results),
+ net_log_(net_log),
+ url_(url),
+ was_waiting_for_thread_(false) {
+ DCHECK(callback);
+ }
+
+ BoundNetLog* net_log() { return &net_log_; }
+
+ virtual void WaitingForThread() {
+ was_waiting_for_thread_ = true;
+ net_log_.BeginEvent(
+ NetLog::TYPE_WAITING_FOR_PROXY_RESOLVER_THREAD, NULL);
+ }
+
+ virtual void FinishedWaitingForThread() {
+ DCHECK(executor());
+
+ if (was_waiting_for_thread_) {
+ net_log_.EndEvent(
+ NetLog::TYPE_WAITING_FOR_PROXY_RESOLVER_THREAD, NULL);
+ }
+
+ net_log_.AddEvent(
+ NetLog::TYPE_SUBMITTED_TO_RESOLVER_THREAD,
+ new NetLogIntegerParameter(
+ "thread_number", executor()->thread_number()));
+ }
+
+ // Runs on the worker thread.
+ virtual void Run(MessageLoop* origin_loop) {
+ const size_t kNetLogBound = 50u;
+ worker_log_.reset(new CapturingNetLog(kNetLogBound));
+ BoundNetLog bound_worker_log(NetLog::Source(), worker_log_.get());
+
+ ProxyResolver* resolver = executor()->resolver();
+ int rv = resolver->GetProxyForURL(
+ url_, &results_buf_, NULL, NULL, bound_worker_log);
+ DCHECK_NE(rv, ERR_IO_PENDING);
+
+ origin_loop->PostTask(
+ FROM_HERE,
+ NewRunnableMethod(this, &GetProxyForURLJob::QueryComplete, rv));
+ }
+
+ private:
+ // Runs the completion callback on the origin thread.
+ void QueryComplete(int result_code) {
+ // The Job may have been cancelled after it was started.
+ if (!was_cancelled()) {
+ // Merge the load log that was generated on the worker thread, into the
+ // main log.
+ CapturingBoundNetLog bound_worker_log(NetLog::Source(),
+ worker_log_.release());
+ bound_worker_log.AppendTo(net_log_);
+
+ if (result_code >= OK) { // Note: unit-tests use values > 0.
+ results_->Use(results_buf_);
+ }
+ RunUserCallback(result_code);
+ }
+ OnJobCompleted();
+ }
+
+ // Must only be used on the "origin" thread.
+ ProxyInfo* results_;
+ BoundNetLog net_log_;
+ const GURL url_;
+
+ // Usable from within DoQuery on the worker thread.
+ ProxyInfo results_buf_;
+
+ // Used to pass the captured events between DoQuery [worker thread] and
+ // QueryComplete [origin thread].
+ scoped_ptr<CapturingNetLog> worker_log_;
+
+ bool was_waiting_for_thread_;
+};
+
+// MultiThreadedProxyResolver::Executor ----------------------------------------
+
+MultiThreadedProxyResolver::Executor::Executor(
+ MultiThreadedProxyResolver* coordinator,
+ ProxyResolver* resolver,
+ int thread_number)
+ : coordinator_(coordinator),
+ thread_number_(thread_number),
+ resolver_(resolver) {
+ DCHECK(coordinator);
+ DCHECK(resolver);
+ // Start up the thread.
+ // Note that it is safe to pass a temporary C-String to Thread(), as it will
+ // make a copy.
+ std::string thread_name =
+ StringPrintf("PAC thread #%d", thread_number);
+ thread_.reset(new base::Thread(thread_name.c_str()));
+ thread_->Start();
+}
+
+void MultiThreadedProxyResolver::Executor::StartJob(Job* job) {
+ DCHECK(!outstanding_job_);
+ outstanding_job_ = job;
+
+ // Run the job. Once it has completed (regardless of whether it was
+ // cancelled), it will invoke OnJobCompleted() on this thread.
+ job->set_executor(this);
+ job->FinishedWaitingForThread();
+ thread_->message_loop()->PostTask(
+ FROM_HERE,
+ NewRunnableMethod(job, &Job::Run, MessageLoop::current()));
+}
+
+void MultiThreadedProxyResolver::Executor::OnJobCompleted(Job* job) {
+ DCHECK_EQ(job, outstanding_job_.get());
+ outstanding_job_ = NULL;
+ coordinator_->OnExecutorReady(this);
+}
+
+void MultiThreadedProxyResolver::Executor::Destroy() {
+ DCHECK(coordinator_);
+
+ // Give the resolver an opportunity to shutdown from THIS THREAD before
+ // joining on the resolver thread. This allows certain implementations
+ // to avoid deadlocks.
+ resolver_->Shutdown();
+
+ // Join the worker thread.
+ thread_.reset();
+
+ // Cancel any outstanding job.
+ if (outstanding_job_) {
+ outstanding_job_->Cancel();
+ // Orphan the job (since this executor may be deleted soon).
+ outstanding_job_->set_executor(NULL);
+ }
+
+ // It is now safe to free the ProxyResolver, since all the tasks that
+ // were using it on the resolver thread have completed.
+ resolver_.reset();
+
+ // Null some stuff as a precaution.
+ coordinator_ = NULL;
+ outstanding_job_ = NULL;
+}
+
+void MultiThreadedProxyResolver::Executor::PurgeMemory() {
+ scoped_refptr<PurgeMemoryTask> helper(new PurgeMemoryTask(resolver_.get()));
+ thread_->message_loop()->PostTask(
+ FROM_HERE,
+ NewRunnableMethod(helper.get(), &PurgeMemoryTask::PurgeMemory));
+}
+
+MultiThreadedProxyResolver::Executor::~Executor() {
+ // The important cleanup happens as part of Destroy(), which should always be
+ // called first.
+ DCHECK(!coordinator_) << "Destroy() was not called";
+ DCHECK(!thread_.get());
+ DCHECK(!resolver_.get());
+ DCHECK(!outstanding_job_);
+}
+
+// MultiThreadedProxyResolver --------------------------------------------------
+
+MultiThreadedProxyResolver::MultiThreadedProxyResolver(
+ ProxyResolverFactory* resolver_factory,
+ size_t max_num_threads)
+ : ProxyResolver(resolver_factory->resolvers_expect_pac_bytes()),
+ resolver_factory_(resolver_factory),
+ max_num_threads_(max_num_threads),
+ was_set_pac_script_called_(false) {
+ DCHECK_GE(max_num_threads, 1u);
+}
+
+MultiThreadedProxyResolver::~MultiThreadedProxyResolver() {
+ // We will cancel all outstanding requests.
+ pending_jobs_.clear();
+ ReleaseAllExecutors();
+}
+
+int MultiThreadedProxyResolver::GetProxyForURL(const GURL& url,
+ ProxyInfo* results,
+ CompletionCallback* callback,
+ RequestHandle* request,
+ const BoundNetLog& net_log) {
+ DCHECK(CalledOnValidThread());
+ DCHECK(callback);
+ DCHECK(was_set_pac_script_called_)
+ << "Resolver is un-initialized. Must call SetPacScript() first!";
+
+ scoped_refptr<GetProxyForURLJob> job =
+ new GetProxyForURLJob(url, results, callback, net_log);
+
+ // Completion will be notified through |callback|, unless the caller cancels
+ // the request using |request|.
+ if (request)
+ *request = reinterpret_cast<RequestHandle>(job.get());
+
+ // If there is an executor that is ready to run this request, submit it!
+ Executor* executor = FindIdleExecutor();
+ if (executor) {
+ DCHECK_EQ(0u, pending_jobs_.size());
+ executor->StartJob(job);
+ return ERR_IO_PENDING;
+ }
+
+ // Otherwise queue this request. (We will schedule it to a thread once one
+ // becomes available).
+ job->WaitingForThread();
+ pending_jobs_.push_back(job);
+
+ // If we haven't already reached the thread limit, provision a new thread to
+ // drain the requests more quickly.
+ if (executors_.size() < max_num_threads_) {
+ executor = AddNewExecutor();
+ executor->StartJob(
+ new SetPacScriptJob(current_pac_url_, current_pac_script_, NULL));
+ }
+
+ return ERR_IO_PENDING;
+}
+
+void MultiThreadedProxyResolver::CancelRequest(RequestHandle req) {
+ DCHECK(CalledOnValidThread());
+ DCHECK(req);
+
+ Job* job = reinterpret_cast<Job*>(req);
+ DCHECK_EQ(Job::TYPE_GET_PROXY_FOR_URL, job->type());
+
+ if (job->executor()) {
+ // If the job was already submitted to the executor, just mark it
+ // as cancelled so the user callback isn't run on completion.
+ job->Cancel();
+ } else {
+ // Otherwise the job is just sitting in a queue.
+ PendingJobsQueue::iterator it =
+ std::find(pending_jobs_.begin(), pending_jobs_.end(), job);
+ DCHECK(it != pending_jobs_.end());
+ pending_jobs_.erase(it);
+ }
+}
+
+void MultiThreadedProxyResolver::CancelSetPacScript() {
+ DCHECK(CalledOnValidThread());
+ DCHECK_EQ(0u, pending_jobs_.size());
+ DCHECK_EQ(1u, executors_.size());
+ DCHECK_EQ(Job::TYPE_SET_PAC_SCRIPT,
+ executors_[0]->outstanding_job()->type());
+
+ // Defensively clear some data which shouldn't be getting used
+ // anymore.
+ was_set_pac_script_called_ = false;
+ current_pac_url_ = GURL();
+ current_pac_script_ = string16();
+
+ ReleaseAllExecutors();
+}
+
+void MultiThreadedProxyResolver::PurgeMemory() {
+ DCHECK(CalledOnValidThread());
+ for (ExecutorList::iterator it = executors_.begin();
+ it != executors_.end(); ++it) {
+ Executor* executor = *it;
+ executor->PurgeMemory();
+ }
+}
+
+int MultiThreadedProxyResolver::SetPacScript(
+ const GURL& pac_url,
+ const string16& pac_script,
+ CompletionCallback* callback) {
+ DCHECK(CalledOnValidThread());
+ DCHECK(callback);
+
+ // Save the script details, so we can provision new executors later.
+ // (We rely on internal reference counting of strings to avoid this memory
+ // being duplicated by each of the resolver threads).
+ was_set_pac_script_called_ = true;
+ current_pac_url_ = pac_url;
+ current_pac_script_ = pac_script;
+
+ // The user should not have any outstanding requests when they call
+ // SetPacScript().
+ CheckNoOutstandingUserRequests();
+
+ // Destroy all of the current threads and their proxy resolvers.
+ ReleaseAllExecutors();
+
+ // Provision a new executor, and run the SetPacScript request. On completion
+ // notification will be sent through |callback|.
+ Executor* executor = AddNewExecutor();
+ executor->StartJob(new SetPacScriptJob(pac_url, pac_script, callback));
+ return ERR_IO_PENDING;
+}
+
+void MultiThreadedProxyResolver::CheckNoOutstandingUserRequests() const {
+ DCHECK(CalledOnValidThread());
+ CHECK_EQ(0u, pending_jobs_.size());
+
+ for (ExecutorList::const_iterator it = executors_.begin();
+ it != executors_.end(); ++it) {
+ const Executor* executor = *it;
+ Job* job = executor->outstanding_job();
+ // The "has_user_callback()" is to exclude jobs for which the callback
+ // has already been invoked, or was not user-initiated (as in the case of
+ // lazy thread provisions). User-initiated jobs may !has_user_callback()
+ // when the callback has already been run. (Since we only clear the
+ // outstanding job AFTER the callback has been invoked, it is possible
+ // for a new request to be started from within the callback).
+ CHECK(!job || job->was_cancelled() || !job->has_user_callback());
+ }
+}
+
+void MultiThreadedProxyResolver::ReleaseAllExecutors() {
+ DCHECK(CalledOnValidThread());
+ for (ExecutorList::iterator it = executors_.begin();
+ it != executors_.end(); ++it) {
+ Executor* executor = *it;
+ executor->Destroy();
+ }
+ executors_.clear();
+}
+
+MultiThreadedProxyResolver::Executor*
+MultiThreadedProxyResolver::FindIdleExecutor() {
+ DCHECK(CalledOnValidThread());
+ for (ExecutorList::iterator it = executors_.begin();
+ it != executors_.end(); ++it) {
+ Executor* executor = *it;
+ if (!executor->outstanding_job())
+ return executor;
+ }
+ return NULL;
+}
+
+MultiThreadedProxyResolver::Executor*
+MultiThreadedProxyResolver::AddNewExecutor() {
+ DCHECK(CalledOnValidThread());
+ DCHECK_LT(executors_.size(), max_num_threads_);
+ // The "thread number" is used to give the thread a unique name.
+ int thread_number = executors_.size();
+ ProxyResolver* resolver = resolver_factory_->CreateProxyResolver();
+ Executor* executor = new Executor(
+ this, resolver, thread_number);
+ executors_.push_back(executor);
+ return executor;
+}
+
+void MultiThreadedProxyResolver::OnExecutorReady(Executor* executor) {
+ DCHECK(CalledOnValidThread());
+ if (pending_jobs_.empty())
+ return;
+
+ // Get the next job to process (FIFO). Transfer it from the pending queue
+ // to the executor.
+ scoped_refptr<Job> job = pending_jobs_.front();
+ pending_jobs_.pop_front();
+ executor->StartJob(job);
+}
+
+} // namespace net
diff --git a/net/proxy/multi_threaded_proxy_resolver.h b/net/proxy/multi_threaded_proxy_resolver.h
new file mode 100644
index 0000000..c192251
--- /dev/null
+++ b/net/proxy/multi_threaded_proxy_resolver.h
@@ -0,0 +1,144 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_PROXY_MULTI_THREADED_PROXY_RESOLVER_H_
+#define NET_PROXY_MULTI_THREADED_PROXY_RESOLVER_H_
+
+#include <deque>
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/non_thread_safe.h"
+#include "base/ref_counted.h"
+#include "base/scoped_ptr.h"
+#include "net/proxy/proxy_resolver.h"
+
+namespace base {
+class Thread;
+} // namespace base
+
+namespace net {
+
+// ProxyResolverFactory is an interface for creating ProxyResolver instances.
+class ProxyResolverFactory {
+ public:
+ explicit ProxyResolverFactory(bool resolvers_expect_pac_bytes)
+ : resolvers_expect_pac_bytes_(resolvers_expect_pac_bytes) {}
+
+ virtual ~ProxyResolverFactory() {}
+
+ // Creates a new ProxyResolver. The caller is responsible for freeing this
+ // object.
+ virtual ProxyResolver* CreateProxyResolver() = 0;
+
+ bool resolvers_expect_pac_bytes() const {
+ return resolvers_expect_pac_bytes_;
+ }
+
+ private:
+ bool resolvers_expect_pac_bytes_;
+ DISALLOW_COPY_AND_ASSIGN(ProxyResolverFactory);
+};
+
+// MultiThreadedProxyResolver is a ProxyResolver implementation that runs
+// synchronous ProxyResolver implementations on worker threads.
+//
+// Threads are created lazily on demand, up to a maximum total. The advantage
+// of having a pool of threads, is faster performance. In particular, being
+// able to keep servicing PAC requests even if one blocks its execution.
+//
+// During initialization (SetPacScript), a single thread is spun up to test
+// the script. If this succeeds, we cache the input script, and will re-use
+// this to lazily provision any new threads as needed.
+//
+// For each new thread that we spawn, a corresponding new ProxyResolver is
+// created using ProxyResolverFactory.
+//
+// Because we are creating multiple ProxyResolver instances, this means we
+// are duplicating script contexts for what is ordinarily seen as being a
+// single script. This can affect compatibility on some classes of PAC
+// script:
+//
+// (a) Scripts whose initialization has external dependencies on network or
+// time may end up successfully initializing on some threads, but not
+// others. So depending on what thread services the request, the result
+// may jump between several possibilities.
+//
+// (b) Scripts whose FindProxyForURL() depends on side-effects may now
+// work differently. For example, a PAC script which was incrementing
+// a global counter and using that to make a decision. In the
+// multi-threaded model, each thread may have a different value for this
+// counter, so it won't globally be seen as monotonically increasing!
+class MultiThreadedProxyResolver : public ProxyResolver, public NonThreadSafe {
+ public:
+ // Creates an asynchronous ProxyResolver that runs requests on up to
+ // |max_num_threads|.
+ //
+ // For each thread that is created, an accompanying synchronous ProxyResolver
+ // will be provisioned using |resolver_factory|. All methods on these
+ // ProxyResolvers will be called on the one thread, with the exception of
+ // ProxyResolver::Shutdown() which will be called from the origin thread
+ // prior to destruction.
+ //
+ // The constructor takes ownership of |resolver_factory|.
+ MultiThreadedProxyResolver(ProxyResolverFactory* resolver_factory,
+ size_t max_num_threads);
+
+ virtual ~MultiThreadedProxyResolver();
+
+ // ProxyResolver implementation:
+ virtual int GetProxyForURL(const GURL& url,
+ ProxyInfo* results,
+ CompletionCallback* callback,
+ RequestHandle* request,
+ const BoundNetLog& net_log);
+ virtual void CancelRequest(RequestHandle request);
+ virtual void CancelSetPacScript();
+ virtual void PurgeMemory();
+
+ private:
+ class Executor;
+ class Job;
+ class SetPacScriptJob;
+ class GetProxyForURLJob;
+ // FIFO queue of pending jobs waiting to be started.
+ // TODO(eroman): Make this priority queue.
+ typedef std::deque<scoped_refptr<Job> > PendingJobsQueue;
+ typedef std::vector<scoped_refptr<Executor> > ExecutorList;
+
+ // ProxyResolver implementation:
+ virtual int SetPacScript(const GURL& pac_url,
+ const string16& pac_script,
+ CompletionCallback* callback);
+
+ // Asserts that there are no outstanding user-initiated jobs on any of the
+ // worker threads.
+ void CheckNoOutstandingUserRequests() const;
+
+ // Stops and deletes all of the worker threads.
+ void ReleaseAllExecutors();
+
+ // Returns an idle worker thread which is ready to receive GetProxyForURL()
+ // requests. If all threads are occupied, returns NULL.
+ Executor* FindIdleExecutor();
+
+ // Creates a new worker thread, and appends it to |executors_|.
+ Executor* AddNewExecutor();
+
+ // Starts the next job from |pending_jobs_| if possible.
+ void OnExecutorReady(Executor* executor);
+
+ const scoped_ptr<ProxyResolverFactory> resolver_factory_;
+ const size_t max_num_threads_;
+ PendingJobsQueue pending_jobs_;
+ ExecutorList executors_;
+ bool was_set_pac_script_called_;
+ GURL current_pac_url_;
+ string16 current_pac_script_;
+};
+
+} // namespace net
+
+#endif // NET_PROXY_MULTI_THREADED_PROXY_RESOLVER_H_
diff --git a/net/proxy/single_threaded_proxy_resolver_unittest.cc b/net/proxy/multi_threaded_proxy_resolver_unittest.cc
index a84253c..f451c3a 100644
--- a/net/proxy/single_threaded_proxy_resolver_unittest.cc
+++ b/net/proxy/multi_threaded_proxy_resolver_unittest.cc
@@ -2,6 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "net/proxy/multi_threaded_proxy_resolver.h"
+
+#include "base/stl_util-inl.h"
#include "base/string_util.h"
#include "base/waitable_event.h"
#include "googleurl/src/gurl.h"
@@ -10,14 +13,14 @@
#include "net/base/net_errors.h"
#include "net/base/test_completion_callback.h"
#include "net/proxy/proxy_info.h"
-#include "net/proxy/single_threaded_proxy_resolver.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace net {
+
namespace {
// A synchronous mock ProxyResolver implementation, which can be used in
-// conjunction with SingleThreadedProxyResolver.
+// conjunction with MultiThreadedProxyResolver.
// - returns a single-item proxy list with the query's host.
class MockProxyResolver : public ProxyResolver {
public:
@@ -69,6 +72,7 @@ class MockProxyResolver : public ProxyResolver {
}
int purge_count() const { return purge_count_; }
+ int request_count() const { return request_count_; }
const string16& last_pac_script() const { return last_pac_script_; }
@@ -79,7 +83,7 @@ class MockProxyResolver : public ProxyResolver {
private:
void CheckIsOnWorkerThread() {
// We should be running on the worker thread -- while we don't know the
- // message loop of SingleThreadedProxyResolver's worker thread, we do
+ // message loop of MultiThreadedProxyResolver's worker thread, we do
// know that it is going to be distinct from the loop running the
// test, so at least make sure it isn't the main loop.
EXPECT_NE(MessageLoop::current(), wrong_loop_);
@@ -141,9 +145,87 @@ class BlockableProxyResolver : public MockProxyResolver {
base::WaitableEvent blocked_;
};
-TEST(SingleThreadedProxyResolverTest, Basic) {
- MockProxyResolver* mock = new MockProxyResolver;
- SingleThreadedProxyResolver resolver(mock);
+// ForwardingProxyResolver forwards all requests to |impl|.
+class ForwardingProxyResolver : public ProxyResolver {
+ public:
+ explicit ForwardingProxyResolver(ProxyResolver* impl)
+ : ProxyResolver(impl->expects_pac_bytes()),
+ impl_(impl) {}
+
+ virtual int GetProxyForURL(const GURL& query_url,
+ ProxyInfo* results,
+ CompletionCallback* callback,
+ RequestHandle* request,
+ const BoundNetLog& net_log) {
+ return impl_->GetProxyForURL(
+ query_url, results, callback, request, net_log);
+ }
+
+ virtual void CancelRequest(RequestHandle request) {
+ impl_->CancelRequest(request);
+ }
+
+ virtual int SetPacScript(const GURL& pac_url,
+ const string16& script,
+ CompletionCallback* callback) {
+ if (impl_->expects_pac_bytes())
+ return impl_->SetPacScriptByData(script, callback);
+ else
+ return impl_->SetPacScriptByUrl(pac_url, callback);
+ }
+
+ virtual void PurgeMemory() {
+ impl_->PurgeMemory();
+ }
+
+ private:
+ ProxyResolver* impl_;
+};
+
+// This factory returns ProxyResolvers that forward all requests to
+// |resolver|.
+class ForwardingProxyResolverFactory : public ProxyResolverFactory {
+ public:
+ explicit ForwardingProxyResolverFactory(ProxyResolver* resolver)
+ : ProxyResolverFactory(resolver->expects_pac_bytes()),
+ resolver_(resolver) {}
+
+ virtual ProxyResolver* CreateProxyResolver() {
+ return new ForwardingProxyResolver(resolver_);
+ }
+
+ private:
+ ProxyResolver* resolver_;
+};
+
+// This factory returns new instances of BlockableProxyResolver.
+class BlockableProxyResolverFactory : public ProxyResolverFactory {
+ public:
+ BlockableProxyResolverFactory() : ProxyResolverFactory(true) {}
+
+ ~BlockableProxyResolverFactory() {
+ STLDeleteElements(&resolvers_);
+ }
+
+ virtual ProxyResolver* CreateProxyResolver() {
+ BlockableProxyResolver* resolver = new BlockableProxyResolver;
+ resolvers_.push_back(resolver);
+ return new ForwardingProxyResolver(resolver);
+ }
+
+ std::vector<BlockableProxyResolver*> resolvers() {
+ return resolvers_;
+ }
+
+ private:
+ std::vector<BlockableProxyResolver*> resolvers_;
+};
+
+TEST(MultiThreadedProxyResolverTest, SingleThread_Basic) {
+ const size_t kNumThreads = 1u;
+ scoped_ptr<MockProxyResolver> mock(new MockProxyResolver);
+ MultiThreadedProxyResolver resolver(
+ new ForwardingProxyResolverFactory(mock.get()), kNumThreads);
int rv;
@@ -173,7 +255,11 @@ TEST(SingleThreadedProxyResolverTest, Basic) {
// The mock proxy resolver should have written 1 log entry. And
// on completion, this should have been copied into |log0|.
- EXPECT_EQ(1u, log0.entries().size());
+ // We also have 1 log entry that was emitted by the
+ // MultiThreadedProxyResolver.
+ ASSERT_EQ(2u, log0.entries().size());
+ EXPECT_EQ(NetLog::TYPE_SUBMITTED_TO_RESOLVER_THREAD,
+ log0.entries()[0].type);
// Start 3 more requests (request1 to request3).
@@ -225,12 +311,20 @@ TEST(SingleThreadedProxyResolverTest, Basic) {
// Tests that the NetLog is updated to include the time the request was waiting
// to be scheduled to a thread.
-TEST(SingleThreadedProxyResolverTest, UpdatesNetLogWithThreadWait) {
- BlockableProxyResolver* mock = new BlockableProxyResolver;
- SingleThreadedProxyResolver resolver(mock);
+TEST(MultiThreadedProxyResolverTest,
+ SingleThread_UpdatesNetLogWithThreadWait) {
+ const size_t kNumThreads = 1u;
+ scoped_ptr<BlockableProxyResolver> mock(new BlockableProxyResolver);
+ MultiThreadedProxyResolver resolver(
+ new ForwardingProxyResolverFactory(mock.get()), kNumThreads);
int rv;
+ // Initialize the resolver.
+ TestCompletionCallback init_callback;
+ rv = resolver.SetPacScriptByData(ASCIIToUTF16("foo"), &init_callback);
+ EXPECT_EQ(OK, init_callback.WaitForResult());
+
// Block the proxy resolver, so no request can complete.
mock->Block();
@@ -265,42 +359,53 @@ TEST(SingleThreadedProxyResolverTest, UpdatesNetLogWithThreadWait) {
mock->Unblock();
// Check that request 0 completed as expected.
- // The NetLog only has 1 entry (that came from the mock proxy resolver.)
+ // The NetLog has 1 entry that came from the MultiThreadedProxyResolver, and
+ // 1 entry from the mock proxy resolver.
EXPECT_EQ(0, callback0.WaitForResult());
EXPECT_EQ("PROXY request0:80", results0.ToPacString());
- ASSERT_EQ(1u, log0.entries().size());
+ ASSERT_EQ(2u, log0.entries().size());
+ EXPECT_EQ(NetLog::TYPE_SUBMITTED_TO_RESOLVER_THREAD,
+ log0.entries()[0].type);
// Check that request 1 completed as expected.
EXPECT_EQ(1, callback1.WaitForResult());
EXPECT_EQ("PROXY request1:80", results1.ToPacString());
- ASSERT_EQ(3u, log1.entries().size());
+ ASSERT_EQ(4u, log1.entries().size());
EXPECT_TRUE(LogContainsBeginEvent(
log1.entries(), 0,
- NetLog::TYPE_WAITING_FOR_SINGLE_PROXY_RESOLVER_THREAD));
+ NetLog::TYPE_WAITING_FOR_PROXY_RESOLVER_THREAD));
EXPECT_TRUE(LogContainsEndEvent(
log1.entries(), 1,
- NetLog::TYPE_WAITING_FOR_SINGLE_PROXY_RESOLVER_THREAD));
+ NetLog::TYPE_WAITING_FOR_PROXY_RESOLVER_THREAD));
// Check that request 2 completed as expected.
EXPECT_EQ(2, callback2.WaitForResult());
EXPECT_EQ("PROXY request2:80", results2.ToPacString());
- ASSERT_EQ(3u, log2.entries().size());
+ ASSERT_EQ(4u, log2.entries().size());
EXPECT_TRUE(LogContainsBeginEvent(
log2.entries(), 0,
- NetLog::TYPE_WAITING_FOR_SINGLE_PROXY_RESOLVER_THREAD));
+ NetLog::TYPE_WAITING_FOR_PROXY_RESOLVER_THREAD));
EXPECT_TRUE(LogContainsEndEvent(
log2.entries(), 1,
- NetLog::TYPE_WAITING_FOR_SINGLE_PROXY_RESOLVER_THREAD));
+ NetLog::TYPE_WAITING_FOR_PROXY_RESOLVER_THREAD));
}
// Cancel a request which is in progress, and then cancel a request which
// is pending.
-TEST(SingleThreadedProxyResolverTest, CancelRequest) {
- BlockableProxyResolver* mock = new BlockableProxyResolver;
- SingleThreadedProxyResolver resolver(mock);
+TEST(MultiThreadedProxyResolverTest, SingleThread_CancelRequest) {
+ const size_t kNumThreads = 1u;
+ scoped_ptr<BlockableProxyResolver> mock(new BlockableProxyResolver);
+ MultiThreadedProxyResolver resolver(
+ new ForwardingProxyResolverFactory(mock.get()),
+ kNumThreads);
int rv;
+ // Initialize the resolver.
+ TestCompletionCallback init_callback;
+ rv = resolver.SetPacScriptByData(ASCIIToUTF16("foo"), &init_callback);
+ EXPECT_EQ(OK, init_callback.WaitForResult());
+
// Block the proxy resolver, so no request can complete.
mock->Block();
@@ -361,15 +466,22 @@ TEST(SingleThreadedProxyResolverTest, CancelRequest) {
EXPECT_FALSE(callback2.have_result());
}
-// Test that deleting SingleThreadedProxyResolver while requests are
+// Test that deleting MultiThreadedProxyResolver while requests are
// outstanding cancels them (and doesn't leak anything).
-TEST(SingleThreadedProxyResolverTest, CancelRequestByDeleting) {
- BlockableProxyResolver* mock = new BlockableProxyResolver;
- scoped_ptr<SingleThreadedProxyResolver> resolver(
- new SingleThreadedProxyResolver(mock));
+TEST(MultiThreadedProxyResolverTest, SingleThread_CancelRequestByDeleting) {
+ const size_t kNumThreads = 1u;
+ scoped_ptr<BlockableProxyResolver> mock(new BlockableProxyResolver);
+ scoped_ptr<MultiThreadedProxyResolver> resolver(
+ new MultiThreadedProxyResolver(
+ new ForwardingProxyResolverFactory(mock.get()), kNumThreads));
int rv;
+ // Initialize the resolver.
+ TestCompletionCallback init_callback;
+ rv = resolver->SetPacScriptByData(ASCIIToUTF16("foo"), &init_callback);
+ EXPECT_EQ(OK, init_callback.WaitForResult());
+
// Block the proxy resolver, so no request can complete.
mock->Block();
@@ -397,14 +509,14 @@ TEST(SingleThreadedProxyResolverTest, CancelRequestByDeleting) {
mock->WaitUntilBlocked();
// Add some latency, to improve the chance that when
- // SingleThreadedProxyResolver is deleted below we are still running inside
+ // MultiThreadedProxyResolver is deleted below we are still running inside
// of the worker thread. The test will pass regardless, so this race doesn't
// cause flakiness. However the destruction during execution is a more
// interesting case to test.
mock->SetResolveLatency(100);
// Unblock the worker thread and delete the underlying
- // SingleThreadedProxyResolver immediately.
+ // MultiThreadedProxyResolver immediately.
mock->Unblock();
resolver.reset();
@@ -418,59 +530,213 @@ TEST(SingleThreadedProxyResolverTest, CancelRequestByDeleting) {
}
// Cancel an outstanding call to SetPacScriptByData().
-TEST(SingleThreadedProxyResolverTest, CancelSetPacScript) {
- BlockableProxyResolver* mock = new BlockableProxyResolver;
- SingleThreadedProxyResolver resolver(mock);
+TEST(MultiThreadedProxyResolverTest, SingleThread_CancelSetPacScript) {
+ const size_t kNumThreads = 1u;
+ scoped_ptr<BlockableProxyResolver> mock(new BlockableProxyResolver);
+ MultiThreadedProxyResolver resolver(
+ new ForwardingProxyResolverFactory(mock.get()), kNumThreads);
int rv;
- // Block the proxy resolver, so no request can complete.
- mock->Block();
+ TestCompletionCallback set_pac_script_callback;
+ rv = resolver.SetPacScriptByData(ASCIIToUTF16("data"),
+ &set_pac_script_callback);
+ EXPECT_EQ(ERR_IO_PENDING, rv);
- // Start request 0.
- ProxyResolver::RequestHandle request0;
- TestCompletionCallback callback0;
- ProxyInfo results0;
+ // Cancel the SetPacScriptByData request.
+ resolver.CancelSetPacScript();
+
+ // Start another SetPacScript request
+ TestCompletionCallback set_pac_script_callback2;
+ rv = resolver.SetPacScriptByData(ASCIIToUTF16("data2"),
+ &set_pac_script_callback2);
+ EXPECT_EQ(ERR_IO_PENDING, rv);
+
+ // Wait for the initialization to complete.
+
+ rv = set_pac_script_callback2.WaitForResult();
+ EXPECT_EQ(0, rv);
+ EXPECT_EQ(ASCIIToUTF16("data2"), mock->last_pac_script());
+
+ // The first SetPacScript callback should never have been completed.
+ EXPECT_FALSE(set_pac_script_callback.have_result());
+}
+
+// Tests setting the PAC script once, lazily creating new threads, and
+// cancelling requests.
+TEST(MultiThreadedProxyResolverTest, ThreeThreads_Basic) {
+ const size_t kNumThreads = 3u;
+ BlockableProxyResolverFactory* factory = new BlockableProxyResolverFactory;
+ MultiThreadedProxyResolver resolver(factory, kNumThreads);
+
+ int rv;
+
+ EXPECT_TRUE(resolver.expects_pac_bytes());
+
+ // Call SetPacScriptByData() -- verify that it reaches the synchronous
+ // resolver.
+ TestCompletionCallback set_script_callback;
+ rv = resolver.SetPacScriptByData(ASCIIToUTF16("pac script bytes"),
+ &set_script_callback);
+ EXPECT_EQ(ERR_IO_PENDING, rv);
+ EXPECT_EQ(OK, set_script_callback.WaitForResult());
+ // One thread has been provisioned (i.e. one ProxyResolver was created).
+ ASSERT_EQ(1u, factory->resolvers().size());
+ EXPECT_EQ(ASCIIToUTF16("pac script bytes"),
+ factory->resolvers()[0]->last_pac_script());
+
+ const int kNumRequests = 9;
+ TestCompletionCallback callback[kNumRequests];
+ ProxyInfo results[kNumRequests];
+ ProxyResolver::RequestHandle request[kNumRequests];
+
+ // Start request 0 -- this should run on thread 0 as there is nothing else
+ // going on right now.
rv = resolver.GetProxyForURL(
- GURL("http://request0"), &results0, &callback0, &request0, BoundNetLog());
+ GURL("http://request0"), &results[0], &callback[0], &request[0],
+ BoundNetLog());
EXPECT_EQ(ERR_IO_PENDING, rv);
- // Wait until requests 0 reaches the worker thread.
- mock->WaitUntilBlocked();
+ // Wait for request 0 to finish.
+ rv = callback[0].WaitForResult();
+ EXPECT_EQ(0, rv);
+ EXPECT_EQ("PROXY request0:80", results[0].ToPacString());
+ ASSERT_EQ(1u, factory->resolvers().size());
+ EXPECT_EQ(1, factory->resolvers()[0]->request_count());
- TestCompletionCallback set_pac_script_callback;
- rv = resolver.SetPacScriptByData(ASCIIToUTF16("data"),
- &set_pac_script_callback);
+ MessageLoop::current()->RunAllPending();
+
+ // We now start 8 requests in parallel -- this will cause the maximum of
+ // three threads to be provisioned (an additional two from what we already
+ // have).
+
+ for (int i = 1; i < kNumRequests; ++i) {
+ rv = resolver.GetProxyForURL(
+ GURL(StringPrintf("http://request%d", i)), &results[i], &callback[i],
+ &request[i], BoundNetLog());
+ EXPECT_EQ(ERR_IO_PENDING, rv);
+ }
+
+ // We should now have a total of 3 threads, each with its own ProxyResolver
+ // that will get initialized with the same data. (We check this later since
+ // the assignment happens on the worker threads and may not have occurred
+ // yet.)
+ ASSERT_EQ(3u, factory->resolvers().size());
+
+ // Cancel 3 of the 8 oustanding requests.
+ resolver.CancelRequest(request[1]);
+ resolver.CancelRequest(request[3]);
+ resolver.CancelRequest(request[6]);
+
+ // Wait for the remaining requests to complete.
+ int kNonCancelledRequests[] = {2, 4, 5, 7, 8};
+ for (size_t i = 0; i < arraysize(kNonCancelledRequests); ++i) {
+ int request_index = kNonCancelledRequests[i];
+ EXPECT_GE(callback[request_index].WaitForResult(), 0);
+ }
+
+ // Check that the cancelled requests never invoked their callback.
+ EXPECT_FALSE(callback[1].have_result());
+ EXPECT_FALSE(callback[3].have_result());
+ EXPECT_FALSE(callback[6].have_result());
+
+ // We call SetPacScript again, solely to stop the current worker threads.
+ // (That way we can test to see the values observed by the synchronous
+ // resolvers in a non-racy manner).
+ TestCompletionCallback set_script_callback2;
+ rv = resolver.SetPacScriptByData(ASCIIToUTF16("xyz"), &set_script_callback2);
EXPECT_EQ(ERR_IO_PENDING, rv);
+ EXPECT_EQ(OK, set_script_callback2.WaitForResult());
+ ASSERT_EQ(4u, factory->resolvers().size());
- // Cancel the SetPacScriptByData request (it can't have finished yet,
- // since the single-thread is currently blocked).
- resolver.CancelSetPacScript();
+ for (int i = 0; i < 3; ++i) {
+ EXPECT_EQ(ASCIIToUTF16("pac script bytes"),
+ factory->resolvers()[i]->last_pac_script()) << "i=" << i;
+ }
- // Start 1 more request.
+ EXPECT_EQ(ASCIIToUTF16("xyz"),
+ factory->resolvers()[3]->last_pac_script());
+
+ // We don't know the exact ordering that requests ran on threads with,
+ // but we do know the total count that should have reached the threads.
+ // 8 total were submitted, and three were cancelled. Of the three that
+ // were cancelled, one of them (request 1) was cancelled after it had
+ // already been posted to the worker thread. So the resolvers will
+ // have seen 6 total (and 1 from the run prior).
+ ASSERT_EQ(4u, factory->resolvers().size());
+ int total_count = 0;
+ for (int i = 0; i < 3; ++i) {
+ total_count += factory->resolvers()[i]->request_count();
+ }
+ EXPECT_EQ(7, total_count);
+}
+
+// Tests using two threads. The first request hangs the first thread. Checks
+// that other requests are able to complete while this first request remains
+// stalled.
+TEST(MultiThreadedProxyResolverTest, OneThreadBlocked) {
+ const size_t kNumThreads = 2u;
+ BlockableProxyResolverFactory* factory = new BlockableProxyResolverFactory;
+ MultiThreadedProxyResolver resolver(factory, kNumThreads);
+
+ int rv;
+
+ EXPECT_TRUE(resolver.expects_pac_bytes());
+
+ // Initialize the resolver.
+ TestCompletionCallback set_script_callback;
+ rv = resolver.SetPacScriptByData(ASCIIToUTF16("pac script bytes"),
+ &set_script_callback);
+ EXPECT_EQ(ERR_IO_PENDING, rv);
+ EXPECT_EQ(OK, set_script_callback.WaitForResult());
+ // One thread has been provisioned (i.e. one ProxyResolver was created).
+ ASSERT_EQ(1u, factory->resolvers().size());
+ EXPECT_EQ(ASCIIToUTF16("pac script bytes"),
+ factory->resolvers()[0]->last_pac_script());
+
+ const int kNumRequests = 4;
+ TestCompletionCallback callback[kNumRequests];
+ ProxyInfo results[kNumRequests];
+ ProxyResolver::RequestHandle request[kNumRequests];
+
+ // Start a request that will block the first thread.
+
+ factory->resolvers()[0]->Block();
- TestCompletionCallback callback1;
- ProxyInfo results1;
rv = resolver.GetProxyForURL(
- GURL("http://request1"), &results1, &callback1, NULL, BoundNetLog());
+ GURL("http://request0"), &results[0], &callback[0], &request[0],
+ BoundNetLog());
+
EXPECT_EQ(ERR_IO_PENDING, rv);
+ factory->resolvers()[0]->WaitUntilBlocked();
- // Unblock the worker thread so the requests can continue running.
- mock->Unblock();
+ // Start 3 more requests -- they should all be serviced by thread #2
+ // since thread #1 is blocked.
- // Wait for requests 0 and 1 to finish.
+ for (int i = 1; i < kNumRequests; ++i) {
+ rv = resolver.GetProxyForURL(
+ GURL(StringPrintf("http://request%d", i)),
+ &results[i], &callback[i], &request[i], BoundNetLog());
+ EXPECT_EQ(ERR_IO_PENDING, rv);
+ }
- rv = callback0.WaitForResult();
- EXPECT_EQ(0, rv);
- EXPECT_EQ("PROXY request0:80", results0.ToPacString());
+ // Wait for the three requests to complete (they should complete in FIFO
+ // order).
+ for (int i = 1; i < kNumRequests; ++i) {
+ EXPECT_EQ(i - 1, callback[i].WaitForResult());
+ }
- rv = callback1.WaitForResult();
- EXPECT_EQ(1, rv);
- EXPECT_EQ("PROXY request1:80", results1.ToPacString());
+ // Unblock the first thread.
+ factory->resolvers()[0]->Unblock();
+ EXPECT_EQ(0, callback[0].WaitForResult());
- // The SetPacScript callback should never have been completed.
- EXPECT_FALSE(set_pac_script_callback.have_result());
+ // All in all, the first thread should have seen just 1 request. And the
+ // second thread 3 requests.
+ ASSERT_EQ(2u, factory->resolvers().size());
+ EXPECT_EQ(1, factory->resolvers()[0]->request_count());
+ EXPECT_EQ(3, factory->resolvers()[1]->request_count());
}
} // namespace
+
} // namespace net
diff --git a/net/proxy/proxy_resolver.h b/net/proxy/proxy_resolver.h
index b200206..37b1a3a 100644
--- a/net/proxy/proxy_resolver.h
+++ b/net/proxy/proxy_resolver.h
@@ -74,6 +74,10 @@ class ProxyResolver {
// no-op implementation.
virtual void PurgeMemory() {}
+ // Optional shutdown code to be run before destruction. This is only used
+ // by the multithreaded runner to signal cleanup from origin thread
+ virtual void Shutdown() {}
+
private:
// Called to set the PAC script backend to use. If |pac_url| is invalid,
// this is a request to use WPAD (auto detect). |pac_script| may be empty if
diff --git a/net/proxy/proxy_resolver_js_bindings.cc b/net/proxy/proxy_resolver_js_bindings.cc
index 3bf1bad..08a7fe7 100644
--- a/net/proxy/proxy_resolver_js_bindings.cc
+++ b/net/proxy/proxy_resolver_js_bindings.cc
@@ -110,8 +110,12 @@ class DefaultJSBindings : public ProxyResolverJSBindings {
LOG(INFO) << "PAC-error: " << "line: " << line_number << ": " << message;
}
+ virtual void Shutdown() {
+ host_resolver_->Shutdown();
+ }
+
private:
- // Helper to execute a syncrhonous DNS resolve, using the per-request
+ // Helper to execute a synchronous DNS resolve, using the per-request
// DNS cache if there is one.
int DnsResolveHelper(const HostResolver::RequestInfo& info,
AddressList* address_list) {
diff --git a/net/proxy/proxy_resolver_js_bindings.h b/net/proxy/proxy_resolver_js_bindings.h
index 834ea73..b5912c4 100644
--- a/net/proxy/proxy_resolver_js_bindings.h
+++ b/net/proxy/proxy_resolver_js_bindings.h
@@ -54,6 +54,9 @@ class ProxyResolverJSBindings {
// if a line number is not applicable to this error.
virtual void OnError(int line_number, const string16& error) = 0;
+ // Called before the thread running the proxy resolver is stopped.
+ virtual void Shutdown() = 0;
+
// Creates a default javascript bindings implementation that will:
// - Send script error messages to LOG(INFO)
// - Send script alert()s to LOG(INFO)
diff --git a/net/proxy/proxy_resolver_v8.cc b/net/proxy/proxy_resolver_v8.cc
index 5860872..4a47b345 100644
--- a/net/proxy/proxy_resolver_v8.cc
+++ b/net/proxy/proxy_resolver_v8.cc
@@ -640,6 +640,10 @@ void ProxyResolverV8::PurgeMemory() {
context_->PurgeMemory();
}
+void ProxyResolverV8::Shutdown() {
+ js_bindings_->Shutdown();
+}
+
int ProxyResolverV8::SetPacScript(const GURL& /*url*/,
const string16& pac_script,
CompletionCallback* /*callback*/) {
diff --git a/net/proxy/proxy_resolver_v8.h b/net/proxy/proxy_resolver_v8.h
index bc4d731..20f31fc 100644
--- a/net/proxy/proxy_resolver_v8.h
+++ b/net/proxy/proxy_resolver_v8.h
@@ -50,6 +50,7 @@ class ProxyResolverV8 : public ProxyResolver {
const BoundNetLog& net_log);
virtual void CancelRequest(RequestHandle request);
virtual void PurgeMemory();
+ virtual void Shutdown();
ProxyResolverJSBindings* js_bindings() const { return js_bindings_.get(); }
diff --git a/net/proxy/proxy_resolver_v8_unittest.cc b/net/proxy/proxy_resolver_v8_unittest.cc
index 2e09e4a..ae664b2 100644
--- a/net/proxy/proxy_resolver_v8_unittest.cc
+++ b/net/proxy/proxy_resolver_v8_unittest.cc
@@ -62,6 +62,8 @@ class MockJSBindings : public ProxyResolverJSBindings {
errors_line_number.push_back(line_number);
}
+ virtual void Shutdown() {}
+
// Mock values to return.
std::string my_ip_address_result;
std::string my_ip_address_ex_result;
diff --git a/net/proxy/proxy_service.cc b/net/proxy/proxy_service.cc
index 26cb1b6..060745b 100644
--- a/net/proxy/proxy_service.cc
+++ b/net/proxy/proxy_service.cc
@@ -15,6 +15,7 @@
#include "net/base/net_errors.h"
#include "net/base/net_util.h"
#include "net/proxy/init_proxy_resolver.h"
+#include "net/proxy/multi_threaded_proxy_resolver.h"
#include "net/proxy/proxy_config_service_fixed.h"
#include "net/proxy/proxy_script_fetcher.h"
#if defined(OS_WIN)
@@ -29,7 +30,6 @@
#include "net/proxy/proxy_resolver.h"
#include "net/proxy/proxy_resolver_js_bindings.h"
#include "net/proxy/proxy_resolver_v8.h"
-#include "net/proxy/single_threaded_proxy_resolver.h"
#include "net/proxy/sync_host_resolver_bridge.h"
#include "net/url_request/url_request_context.h"
@@ -75,6 +75,55 @@ class ProxyResolverNull : public ProxyResolver {
}
};
+// This factory creates V8ProxyResolvers with appropriate javascript bindings.
+class ProxyResolverFactoryForV8 : public ProxyResolverFactory {
+ public:
+ // Both |async_host_resolver| and |host_resolver_loop| must remain valid for
+ // duration of our lifetime.
+ ProxyResolverFactoryForV8(HostResolver* async_host_resolver,
+ MessageLoop* host_resolver_loop)
+ : ProxyResolverFactory(true /*expects_pac_bytes*/),
+ async_host_resolver_(async_host_resolver),
+ host_resolver_loop_(host_resolver_loop) {
+ }
+
+ virtual ProxyResolver* CreateProxyResolver() {
+ // Create a synchronous host resolver wrapper that operates
+ // |async_host_resolver_| on |host_resolver_loop_|.
+ SyncHostResolverBridge* sync_host_resolver =
+ new SyncHostResolverBridge(async_host_resolver_, host_resolver_loop_);
+
+ ProxyResolverJSBindings* js_bindings =
+ ProxyResolverJSBindings::CreateDefault(sync_host_resolver);
+
+ // ProxyResolverV8 takes ownership of |js_bindings|.
+ return new ProxyResolverV8(js_bindings);
+ }
+
+ private:
+ scoped_refptr<HostResolver> async_host_resolver_;
+ MessageLoop* host_resolver_loop_;
+};
+
+// Creates ProxyResolvers using a non-V8 implementation.
+class ProxyResolverFactoryForNonV8 : public ProxyResolverFactory {
+ public:
+ ProxyResolverFactoryForNonV8()
+ : ProxyResolverFactory(false /*expects_pac_bytes*/) {}
+
+ virtual ProxyResolver* CreateProxyResolver() {
+#if defined(OS_WIN)
+ return new ProxyResolverWinHttp();
+#elif defined(OS_MACOSX)
+ return new ProxyResolverMac();
+#else
+ LOG(WARNING) << "PAC support disabled because there is no fallback "
+ "non-V8 implementation";
+ return new ProxyResolverNull();
+#endif
+ }
+};
+
// ProxyService::PacRequest ---------------------------------------------------
class ProxyService::PacRequest
@@ -218,32 +267,22 @@ ProxyService* ProxyService::Create(
URLRequestContext* url_request_context,
NetLog* net_log,
MessageLoop* io_loop) {
- ProxyResolver* proxy_resolver = NULL;
+ ProxyResolverFactory* sync_resolver_factory;
if (use_v8_resolver) {
- // Use the IO thread's host resolver (but since it is not threadsafe,
- // bridge requests from the PAC thread over to the IO thread).
- SyncHostResolverBridge* sync_host_resolver =
- new SyncHostResolverBridge(url_request_context->host_resolver(),
- io_loop);
-
- // Send javascript errors and alerts to LOG(INFO).
- ProxyResolverJSBindings* js_bindings =
- ProxyResolverJSBindings::CreateDefault(sync_host_resolver);
-
- // Wrap the (synchronous) ProxyResolver implementation in a single-threaded
- // asynchronous resolver. This version of SingleThreadedProxyResolver
- // additionally aborts any synchronous host resolves to avoid deadlock
- // during shutdown.
- proxy_resolver =
- new SingleThreadedProxyResolverUsingBridgedHostResolver(
- new ProxyResolverV8(js_bindings),
- sync_host_resolver);
+ sync_resolver_factory =
+ new ProxyResolverFactoryForV8(
+ url_request_context->host_resolver(),
+ io_loop);
} else {
- proxy_resolver =
- new SingleThreadedProxyResolver(CreateNonV8ProxyResolver());
+ sync_resolver_factory = new ProxyResolverFactoryForNonV8();
}
+ const size_t kMaxNumResolverThreads = 1u;
+ ProxyResolver* proxy_resolver =
+ new MultiThreadedProxyResolver(sync_resolver_factory,
+ kMaxNumResolverThreads);
+
ProxyService* proxy_service =
new ProxyService(proxy_config_service, proxy_resolver, net_log);
@@ -559,19 +598,6 @@ ProxyConfigService* ProxyService::CreateSystemProxyConfigService(
#endif
}
-// static
-ProxyResolver* ProxyService::CreateNonV8ProxyResolver() {
-#if defined(OS_WIN)
- return new ProxyResolverWinHttp();
-#elif defined(OS_MACOSX)
- return new ProxyResolverMac();
-#else
- LOG(WARNING) << "PAC support disabled because there is no fallback "
- "non-V8 implementation";
- return new ProxyResolverNull();
-#endif
-}
-
void ProxyService::UpdateConfig(const BoundNetLog& net_log) {
bool is_first_update = !config_has_been_initialized();
diff --git a/net/proxy/proxy_service.h b/net/proxy/proxy_service.h
index 22435e4..5c981f8 100644
--- a/net/proxy/proxy_service.h
+++ b/net/proxy/proxy_service.h
@@ -181,10 +181,6 @@ class ProxyService : public base::RefCountedThreadSafe<ProxyService>,
~ProxyService();
- // Creates a proxy resolver appropriate for this platform that doesn't rely
- // on V8.
- static ProxyResolver* CreateNonV8ProxyResolver();
-
// Identifies the proxy configuration.
ProxyConfig::ID config_id() const { return config_.id(); }
diff --git a/net/proxy/single_threaded_proxy_resolver.cc b/net/proxy/single_threaded_proxy_resolver.cc
deleted file mode 100644
index 2fdc3ac..0000000
--- a/net/proxy/single_threaded_proxy_resolver.cc
+++ /dev/null
@@ -1,353 +0,0 @@
-// Copyright (c) 2010 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "net/proxy/single_threaded_proxy_resolver.h"
-
-#include "base/message_loop.h"
-#include "base/thread.h"
-#include "net/base/capturing_net_log.h"
-#include "net/base/net_errors.h"
-#include "net/proxy/proxy_info.h"
-
-namespace net {
-
-namespace {
-
-class PurgeMemoryTask : public base::RefCountedThreadSafe<PurgeMemoryTask> {
- public:
- explicit PurgeMemoryTask(ProxyResolver* resolver) : resolver_(resolver) {}
- void PurgeMemory() { resolver_->PurgeMemory(); }
- private:
- friend class base::RefCountedThreadSafe<PurgeMemoryTask>;
- ~PurgeMemoryTask() {}
- ProxyResolver* resolver_;
-};
-
-} // namespace
-
-// SingleThreadedProxyResolver::SetPacScriptTask ------------------------------
-
-// Runs on the worker thread to call ProxyResolver::SetPacScript.
-class SingleThreadedProxyResolver::SetPacScriptTask
- : public base::RefCountedThreadSafe<
- SingleThreadedProxyResolver::SetPacScriptTask> {
- public:
- SetPacScriptTask(SingleThreadedProxyResolver* coordinator,
- const GURL& pac_url,
- const string16& pac_script,
- CompletionCallback* callback)
- : coordinator_(coordinator),
- callback_(callback),
- pac_script_(pac_script),
- pac_url_(pac_url),
- origin_loop_(MessageLoop::current()) {
- DCHECK(callback);
- }
-
- // Start the SetPacScript request on the worker thread.
- void Start() {
- coordinator_->thread()->message_loop()->PostTask(
- FROM_HERE, NewRunnableMethod(this, &SetPacScriptTask::DoRequest,
- coordinator_->resolver_.get()));
- }
-
- void Cancel() {
- // Clear these to inform RequestComplete that it should not try to
- // access them.
- coordinator_ = NULL;
- callback_ = NULL;
- }
-
- // Returns true if Cancel() has been called.
- bool was_cancelled() const { return callback_ == NULL; }
-
- private:
- friend class base::RefCountedThreadSafe<
- SingleThreadedProxyResolver::SetPacScriptTask>;
-
- ~SetPacScriptTask() {}
-
- // Runs on the worker thread.
- void DoRequest(ProxyResolver* resolver) {
- int rv = resolver->expects_pac_bytes() ?
- resolver->SetPacScriptByData(pac_script_, NULL) :
- resolver->SetPacScriptByUrl(pac_url_, NULL);
-
- DCHECK_NE(rv, ERR_IO_PENDING);
- origin_loop_->PostTask(FROM_HERE,
- NewRunnableMethod(this, &SetPacScriptTask::RequestComplete, rv));
- }
-
- // Runs the completion callback on the origin thread.
- void RequestComplete(int result_code) {
- // The task may have been cancelled after it was started.
- if (!was_cancelled()) {
- CompletionCallback* callback = callback_;
- coordinator_->RemoveOutstandingSetPacScriptTask(this);
- callback->Run(result_code);
- }
- }
-
- // Must only be used on the "origin" thread.
- SingleThreadedProxyResolver* coordinator_;
- CompletionCallback* callback_;
- string16 pac_script_;
- GURL pac_url_;
-
- // Usable from within DoQuery on the worker thread.
- MessageLoop* origin_loop_;
-};
-
-// SingleThreadedProxyResolver::Job -------------------------------------------
-
-class SingleThreadedProxyResolver::Job
- : public base::RefCountedThreadSafe<SingleThreadedProxyResolver::Job> {
- public:
- // |coordinator| -- the SingleThreadedProxyResolver that owns this job.
- // |url| -- the URL of the query.
- // |results| -- the structure to fill with proxy resolve results.
- Job(SingleThreadedProxyResolver* coordinator,
- const GURL& url,
- ProxyInfo* results,
- CompletionCallback* callback,
- const BoundNetLog& net_log)
- : coordinator_(coordinator),
- callback_(callback),
- results_(results),
- net_log_(net_log),
- url_(url),
- is_started_(false),
- origin_loop_(MessageLoop::current()) {
- DCHECK(callback);
- }
-
- // Start the resolve proxy request on the worker thread.
- void Start() {
- is_started_ = true;
-
- size_t load_log_bound = 100;
-
- coordinator_->thread()->message_loop()->PostTask(
- FROM_HERE, NewRunnableMethod(this, &Job::DoQuery,
- coordinator_->resolver_.get(),
- load_log_bound));
- }
-
- bool is_started() const { return is_started_; }
-
- void Cancel() {
- // Clear these to inform QueryComplete that it should not try to
- // access them.
- coordinator_ = NULL;
- callback_ = NULL;
- results_ = NULL;
- }
-
- // Returns true if Cancel() has been called.
- bool was_cancelled() const { return callback_ == NULL; }
-
- BoundNetLog* net_log() { return &net_log_; }
-
- private:
- friend class base::RefCountedThreadSafe<SingleThreadedProxyResolver::Job>;
-
- ~Job() {}
-
- // Runs on the worker thread.
- void DoQuery(ProxyResolver* resolver, size_t load_log_bound) {
- worker_log_.reset(new CapturingNetLog(load_log_bound));
- BoundNetLog bound_worker_log(NetLog::Source(), worker_log_.get());
-
- int rv = resolver->GetProxyForURL(url_, &results_buf_, NULL, NULL,
- bound_worker_log);
- DCHECK_NE(rv, ERR_IO_PENDING);
-
- origin_loop_->PostTask(FROM_HERE,
- NewRunnableMethod(this, &Job::QueryComplete, rv));
- }
-
- // Runs the completion callback on the origin thread.
- void QueryComplete(int result_code) {
- // Merge the load log that was generated on the worker thread, into the
- // main log.
- CapturingBoundNetLog bound_worker_log(NetLog::Source(),
- worker_log_.release());
- bound_worker_log.AppendTo(net_log_);
-
- // The Job may have been cancelled after it was started.
- if (!was_cancelled()) {
- if (result_code >= OK) { // Note: unit-tests use values > 0.
- results_->Use(results_buf_);
- }
- callback_->Run(result_code);
-
- // We check for cancellation once again, in case the callback deleted
- // the owning ProxyService (whose destructor will in turn cancel us).
- if (!was_cancelled())
- coordinator_->RemoveFrontOfJobsQueueAndStartNext(this);
- }
- }
-
- // Must only be used on the "origin" thread.
- SingleThreadedProxyResolver* coordinator_;
- CompletionCallback* callback_;
- ProxyInfo* results_;
- BoundNetLog net_log_;
- GURL url_;
- bool is_started_;
-
- // Usable from within DoQuery on the worker thread.
- ProxyInfo results_buf_;
- MessageLoop* origin_loop_;
-
- // Used to pass the captured events between DoQuery [worker thread] and
- // QueryComplete [origin thread].
- scoped_ptr<CapturingNetLog> worker_log_;
-};
-
-// SingleThreadedProxyResolver ------------------------------------------------
-
-SingleThreadedProxyResolver::SingleThreadedProxyResolver(
- ProxyResolver* resolver)
- : ProxyResolver(resolver->expects_pac_bytes()),
- resolver_(resolver) {
-}
-
-SingleThreadedProxyResolver::~SingleThreadedProxyResolver() {
- // Cancel the inprogress job (if any), and free the rest.
- for (PendingJobsQueue::iterator it = pending_jobs_.begin();
- it != pending_jobs_.end();
- ++it) {
- (*it)->Cancel();
- }
-
- if (outstanding_set_pac_script_task_)
- outstanding_set_pac_script_task_->Cancel();
-
- // Note that |thread_| is destroyed before |resolver_|. This is important
- // since |resolver_| could be running on |thread_|.
-}
-
-int SingleThreadedProxyResolver::GetProxyForURL(const GURL& url,
- ProxyInfo* results,
- CompletionCallback* callback,
- RequestHandle* request,
- const BoundNetLog& net_log) {
- DCHECK(callback);
-
- scoped_refptr<Job> job = new Job(this, url, results, callback, net_log);
- bool is_first_job = pending_jobs_.empty();
- pending_jobs_.push_back(job); // Jobs can never finish synchronously.
-
- if (is_first_job) {
- // If there is nothing already running, start the job now.
- EnsureThreadStarted();
- job->Start();
- } else {
- // Otherwise the job will get started eventually by ProcessPendingJobs().
- job->net_log()->BeginEvent(
- NetLog::TYPE_WAITING_FOR_SINGLE_PROXY_RESOLVER_THREAD, NULL);
- }
-
- // Completion will be notified through |callback|, unless the caller cancels
- // the request using |request|.
- if (request)
- *request = reinterpret_cast<RequestHandle>(job.get());
-
- return ERR_IO_PENDING;
-}
-
-// There are three states of the request we need to handle:
-// (1) Not started (just sitting in the queue).
-// (2) Executing Job::DoQuery in the worker thread.
-// (3) Waiting for Job::QueryComplete to be run on the origin thread.
-void SingleThreadedProxyResolver::CancelRequest(RequestHandle req) {
- DCHECK(req);
-
- Job* job = reinterpret_cast<Job*>(req);
-
- bool is_active_job = job->is_started() && !pending_jobs_.empty() &&
- pending_jobs_.front().get() == job;
-
- job->Cancel();
-
- if (is_active_job) {
- RemoveFrontOfJobsQueueAndStartNext(job);
- return;
- }
-
- // Otherwise just delete the job from the queue.
- PendingJobsQueue::iterator it = std::find(
- pending_jobs_.begin(), pending_jobs_.end(), job);
- DCHECK(it != pending_jobs_.end());
- pending_jobs_.erase(it);
-}
-
-void SingleThreadedProxyResolver::CancelSetPacScript() {
- DCHECK(outstanding_set_pac_script_task_);
- outstanding_set_pac_script_task_->Cancel();
- outstanding_set_pac_script_task_ = NULL;
-}
-
-void SingleThreadedProxyResolver::PurgeMemory() {
- if (thread_.get()) {
- scoped_refptr<PurgeMemoryTask> helper(new PurgeMemoryTask(resolver_.get()));
- thread_->message_loop()->PostTask(FROM_HERE,
- NewRunnableMethod(helper.get(), &PurgeMemoryTask::PurgeMemory));
- }
-}
-
-int SingleThreadedProxyResolver::SetPacScript(
- const GURL& pac_url,
- const string16& pac_script,
- CompletionCallback* callback) {
- EnsureThreadStarted();
- DCHECK(!outstanding_set_pac_script_task_);
-
- SetPacScriptTask* task = new SetPacScriptTask(
- this, pac_url, pac_script, callback);
- outstanding_set_pac_script_task_ = task;
- task->Start();
- return ERR_IO_PENDING;
-}
-
-void SingleThreadedProxyResolver::EnsureThreadStarted() {
- if (!thread_.get()) {
- thread_.reset(new base::Thread("pac-thread"));
- thread_->Start();
- }
-}
-
-void SingleThreadedProxyResolver::ProcessPendingJobs() {
- if (pending_jobs_.empty())
- return;
-
- // Get the next job to process (FIFO).
- Job* job = pending_jobs_.front().get();
- if (job->is_started())
- return;
-
- job->net_log()->EndEvent(
- NetLog::TYPE_WAITING_FOR_SINGLE_PROXY_RESOLVER_THREAD, NULL);
-
- EnsureThreadStarted();
- job->Start();
-}
-
-void SingleThreadedProxyResolver::RemoveFrontOfJobsQueueAndStartNext(
- Job* expected_job) {
- DCHECK_EQ(expected_job, pending_jobs_.front().get());
- pending_jobs_.pop_front();
-
- // Start next work item.
- ProcessPendingJobs();
-}
-
-void SingleThreadedProxyResolver::RemoveOutstandingSetPacScriptTask(
- SetPacScriptTask* task) {
- DCHECK_EQ(outstanding_set_pac_script_task_.get(), task);
- outstanding_set_pac_script_task_ = NULL;
-}
-
-} // namespace net
diff --git a/net/proxy/single_threaded_proxy_resolver.h b/net/proxy/single_threaded_proxy_resolver.h
deleted file mode 100644
index 1c9582c..0000000
--- a/net/proxy/single_threaded_proxy_resolver.h
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright (c) 2009 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef NET_PROXY_SINGLE_THREADED_PROXY_RESOLVER_H_
-#define NET_PROXY_SINGLE_THREADED_PROXY_RESOLVER_H_
-
-#include <deque>
-#include <string>
-
-#include "base/ref_counted.h"
-#include "base/scoped_ptr.h"
-#include "net/proxy/proxy_resolver.h"
-
-namespace base {
-class Thread;
-} // namespace base
-
-namespace net {
-
-// ProxyResolver implementation that wraps a synchronous ProxyResolver, and
-// runs it on a single worker thread. If multiple requests accumulate, they
-// are serviced in FIFO order.
-class SingleThreadedProxyResolver : public ProxyResolver {
- public:
- // |resolver| is a synchronous ProxyResolver implementation. It doesn't
- // have to be thread-safe, since it is run on exactly one thread. The
- // constructor takes ownership of |resolver|.
- explicit SingleThreadedProxyResolver(ProxyResolver* resolver);
-
- virtual ~SingleThreadedProxyResolver();
-
- // ProxyResolver implementation:
- virtual int GetProxyForURL(const GURL& url,
- ProxyInfo* results,
- CompletionCallback* callback,
- RequestHandle* request,
- const BoundNetLog& net_log);
- virtual void CancelRequest(RequestHandle request);
- virtual void CancelSetPacScript();
- virtual void PurgeMemory();
-
- protected:
- // The wrapped (synchronous) ProxyResolver.
- ProxyResolver* resolver() { return resolver_.get(); }
-
- private:
- // Refcounted helper class that bridges between origin thread and worker
- // thread.
- class Job;
- friend class Job;
- class SetPacScriptTask;
- friend class SetPacScriptTask;
- // FIFO queue that contains the in-progress job, and any pending jobs.
- typedef std::deque<scoped_refptr<Job> > PendingJobsQueue;
-
- base::Thread* thread() { return thread_.get(); }
-
- // ProxyResolver implementation:
- virtual int SetPacScript(const GURL& pac_url,
- const string16& pac_script,
- CompletionCallback* callback);
-
- // Starts the worker thread if it isn't already running.
- void EnsureThreadStarted();
-
- // Starts the next job from |pending_jobs_| if possible.
- void ProcessPendingJobs();
-
- // Removes the front entry of the jobs queue. |expected_job| is our
- // expectation of what the front of the job queue is; it is only used by
- // DCHECK for verification purposes.
- void RemoveFrontOfJobsQueueAndStartNext(Job* expected_job);
-
- // Clears |outstanding_set_pac_script_task_|.
- // Called when |task| has just finished.
- void RemoveOutstandingSetPacScriptTask(SetPacScriptTask* task);
-
- // The synchronous resolver implementation.
- scoped_ptr<ProxyResolver> resolver_;
-
- // The thread where |resolver_| is run on.
- // Note that declaration ordering is important here. |thread_| needs to be
- // destroyed *before* |resolver_|, in case |resolver_| is currently
- // executing on |thread_|.
- scoped_ptr<base::Thread> thread_;
-
- PendingJobsQueue pending_jobs_;
- scoped_refptr<SetPacScriptTask> outstanding_set_pac_script_task_;
-};
-
-} // namespace net
-
-#endif // NET_PROXY_SINGLE_THREADED_PROXY_RESOLVER_H_
diff --git a/net/proxy/sync_host_resolver_bridge.cc b/net/proxy/sync_host_resolver_bridge.cc
index caf4549..6c62c7d 100644
--- a/net/proxy/sync_host_resolver_bridge.cc
+++ b/net/proxy/sync_host_resolver_bridge.cc
@@ -188,19 +188,4 @@ void SyncHostResolverBridge::Shutdown() {
core_->Shutdown();
}
-// SingleThreadedProxyResolverUsingBridgedHostResolver -----------------------
-
-SingleThreadedProxyResolverUsingBridgedHostResolver::
-SingleThreadedProxyResolverUsingBridgedHostResolver(
- ProxyResolver* proxy_resolver,
- SyncHostResolverBridge* bridged_host_resolver)
- : SingleThreadedProxyResolver(proxy_resolver),
- bridged_host_resolver_(bridged_host_resolver) {
-}
-
-SingleThreadedProxyResolverUsingBridgedHostResolver::
-~SingleThreadedProxyResolverUsingBridgedHostResolver() {
- bridged_host_resolver_->Shutdown();
-}
-
} // namespace net
diff --git a/net/proxy/sync_host_resolver_bridge.h b/net/proxy/sync_host_resolver_bridge.h
index 4d25b4d..b02d496 100644
--- a/net/proxy/sync_host_resolver_bridge.h
+++ b/net/proxy/sync_host_resolver_bridge.h
@@ -7,7 +7,6 @@
#include "base/scoped_ptr.h"
#include "net/base/host_resolver.h"
-#include "net/proxy/single_threaded_proxy_resolver.h"
class MessageLoop;
@@ -35,7 +34,7 @@ class SyncHostResolverBridge : public HostResolver {
// The Shutdown() method should be called prior to destruction, from
// |host_resolver_loop_|. It aborts any in progress synchronous resolves, to
// prevent deadlocks from happening.
- void Shutdown();
+ virtual void Shutdown();
private:
class Core;
@@ -44,22 +43,6 @@ class SyncHostResolverBridge : public HostResolver {
scoped_refptr<Core> core_;
};
-// Subclass of SingleThreadedProxyResolver that additionally calls
-// |bridged_host_resolver_->Shutdown()| during its destructor.
-class SingleThreadedProxyResolverUsingBridgedHostResolver
- : public SingleThreadedProxyResolver {
- public:
- SingleThreadedProxyResolverUsingBridgedHostResolver(
- ProxyResolver* proxy_resolver,
- SyncHostResolverBridge* bridged_host_resolver);
-
- virtual ~SingleThreadedProxyResolverUsingBridgedHostResolver();
-
- private:
- scoped_refptr<SyncHostResolverBridge> bridged_host_resolver_;
-};
-
-
} // namespace net
#endif // NET_PROXY_SYNC_HOST_RESOLVER_BRIDGE_H_
diff --git a/net/proxy/sync_host_resolver_bridge_unittest.cc b/net/proxy/sync_host_resolver_bridge_unittest.cc
index 62c662a..5f02213 100644
--- a/net/proxy/sync_host_resolver_bridge_unittest.cc
+++ b/net/proxy/sync_host_resolver_bridge_unittest.cc
@@ -9,10 +9,14 @@
#include "net/base/address_list.h"
#include "net/base/net_errors.h"
#include "net/base/net_log.h"
+#include "net/proxy/multi_threaded_proxy_resolver.h"
#include "net/base/test_completion_callback.h"
#include "net/proxy/proxy_info.h"
#include "testing/gtest/include/gtest/gtest.h"
+// TODO(eroman): This test should be moved into
+// multi_threaded_proxy_resolver_unittest.cc.
+
namespace net {
namespace {
@@ -75,7 +79,7 @@ class BlockableHostResolver : public HostResolver {
// on |host_resolver| in response to GetProxyForURL().
class SyncProxyResolver : public ProxyResolver {
public:
- explicit SyncProxyResolver(HostResolver* host_resolver)
+ explicit SyncProxyResolver(SyncHostResolverBridge* host_resolver)
: ProxyResolver(false), host_resolver_(host_resolver) {}
virtual int GetProxyForURL(const GURL& url,
@@ -101,15 +105,33 @@ class SyncProxyResolver : public ProxyResolver {
NOTREACHED();
}
+ virtual void Shutdown() {
+ host_resolver_->Shutdown();
+ }
+
private:
virtual int SetPacScript(const GURL& pac_url,
const string16& pac_script,
CompletionCallback* callback) {
- NOTREACHED();
return OK;
}
- scoped_refptr<HostResolver> host_resolver_;
+ scoped_refptr<SyncHostResolverBridge> host_resolver_;
+};
+
+class SyncProxyResolverFactory : public ProxyResolverFactory {
+ public:
+ explicit SyncProxyResolverFactory(SyncHostResolverBridge* sync_host_resolver)
+ : ProxyResolverFactory(false),
+ sync_host_resolver_(sync_host_resolver) {
+ }
+
+ virtual ProxyResolver* CreateProxyResolver() {
+ return new SyncProxyResolver(sync_host_resolver_);
+ }
+
+ private:
+ scoped_refptr<SyncHostResolverBridge> sync_host_resolver_;
};
// This helper thread is used to create the circumstances for the deadlock.
@@ -137,9 +159,14 @@ class IOThread : public base::Thread {
new SyncHostResolverBridge(async_resolver_, message_loop());
proxy_resolver_.reset(
- new SingleThreadedProxyResolverUsingBridgedHostResolver(
- new SyncProxyResolver(sync_resolver),
- sync_resolver));
+ new MultiThreadedProxyResolver(
+ new SyncProxyResolverFactory(sync_resolver),
+ 1u));
+
+ // Initialize the resolver.
+ TestCompletionCallback callback;
+ proxy_resolver_->SetPacScriptByUrl(GURL(), &callback);
+ EXPECT_EQ(OK, callback.WaitForResult());
// Start an asynchronous request to the proxy resolver
// (note that it will never complete).
@@ -178,7 +205,7 @@ class IOThread : public base::Thread {
// Test that a deadlock does not happen during shutdown when a host resolve
// is outstanding on the SyncHostResolverBridge.
// This is a regression test for http://crbug.com/41244.
-TEST(SingleThreadedProxyResolverWithBridgedHostResolverTest, ShutdownDeadlock) {
+TEST(MultiThreadedProxyResolverTest, ShutdownIsCalledBeforeThreadJoin) {
IOThread io_thread;
base::Thread::Options options;
options.message_loop_type = MessageLoop::TYPE_IO;