diff options
author | mbelshe@chromium.org <mbelshe@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2010-03-14 08:37:32 +0000 |
---|---|---|
committer | mbelshe@chromium.org <mbelshe@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2010-03-14 08:37:32 +0000 |
commit | 6b624c635e92dd4b5fe215ef764fd6f12dcbd0b7 (patch) | |
tree | 61b4eb06ecdc407f6932b8178f347ae344455315 /net | |
parent | caa55f3e114021dccc1935ed6fbac6247dca3eb7 (diff) | |
download | chromium_src-6b624c635e92dd4b5fe215ef764fd6f12dcbd0b7.zip chromium_src-6b624c635e92dd4b5fe215ef764fd6f12dcbd0b7.tar.gz chromium_src-6b624c635e92dd4b5fe215ef764fd6f12dcbd0b7.tar.bz2 |
When connect takes too long for a new socket group, issue a single
backup socket request to retry the connect. This reduces latency in the
presence of packet loss.
BUG=36629
TEST=TCPClientSocketPoolTest.BackupSocket*
Review URL: http://codereview.chromium.org/842004
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@41543 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net')
-rw-r--r-- | net/base/load_log_event_type_list.h | 6 | ||||
-rw-r--r-- | net/socket/client_socket_pool.h | 3 | ||||
-rw-r--r-- | net/socket/client_socket_pool_base.cc | 74 | ||||
-rw-r--r-- | net/socket/client_socket_pool_base.h | 46 | ||||
-rw-r--r-- | net/socket/client_socket_pool_base_unittest.cc | 28 | ||||
-rw-r--r-- | net/socket/tcp_client_socket_pool_unittest.cc | 145 |
6 files changed, 283 insertions, 19 deletions
diff --git a/net/base/load_log_event_type_list.h b/net/base/load_log_event_type_list.h index 99d1e63..be43760 100644 --- a/net/base/load_log_event_type_list.h +++ b/net/base/load_log_event_type_list.h @@ -118,6 +118,12 @@ EVENT_TYPE(SOCKET_POOL_STALLED_MAX_SOCKETS) // The request stalled because there are too many sockets in the group. EVENT_TYPE(SOCKET_POOL_STALLED_MAX_SOCKETS_PER_GROUP) +// A backup socket is created due to slow connect +EVENT_TYPE(SOCKET_BACKUP_CREATED) + +// A backup socket is created due to slow connect +EVENT_TYPE(SOCKET_BACKUP_TIMER_EXTENDED) + // ------------------------------------------------------------------------ // URLRequest // ------------------------------------------------------------------------ diff --git a/net/socket/client_socket_pool.h b/net/socket/client_socket_pool.h index 4bd1a58..718b1f8 100644 --- a/net/socket/client_socket_pool.h +++ b/net/socket/client_socket_pool.h @@ -82,6 +82,9 @@ class ClientSocketPool : public base::RefCounted<ClientSocketPool> { virtual LoadState GetLoadState(const std::string& group_name, const ClientSocketHandle* handle) const = 0; + // Returns the maximum amount of time to wait before retrying a connect. + static const int kMaxConnectRetryIntervalMs = 250; + protected: ClientSocketPool() {} virtual ~ClientSocketPool() {} diff --git a/net/socket/client_socket_pool_base.cc b/net/socket/client_socket_pool_base.cc index 52b31a6..65823a5 100644 --- a/net/socket/client_socket_pool_base.cc +++ b/net/socket/client_socket_pool_base.cc @@ -7,6 +7,7 @@ #include "base/compiler_specific.h" #include "base/format_macros.h" #include "base/message_loop.h" +#include "base/stats_counters.h" #include "base/stl_util-inl.h" #include "base/string_util.h" #include "base/time.h" @@ -118,7 +119,8 @@ ClientSocketPoolBaseHelper::ClientSocketPoolBaseHelper( used_idle_socket_timeout_(used_idle_socket_timeout), may_have_stalled_group_(false), connect_job_factory_(connect_job_factory), - network_change_notifier_(network_change_notifier) { + network_change_notifier_(network_change_notifier), + ALLOW_THIS_IN_INITIALIZER_LIST(method_factory_(this)) { DCHECK_LE(0, max_sockets_per_group); DCHECK_LE(max_sockets_per_group, max_sockets); @@ -133,7 +135,7 @@ ClientSocketPoolBaseHelper::~ClientSocketPoolBaseHelper() { // sockets or pending requests. They should have all been cleaned up prior // to the manager being destroyed. CloseIdleSockets(); - DCHECK(group_map_.empty()); + CHECK(group_map_.empty()); DCHECK_EQ(0, connecting_socket_count_); if (network_change_notifier_) @@ -243,6 +245,17 @@ int ClientSocketPoolBaseHelper::RequestSocketInternal( HandOutSocket(connect_job->ReleaseSocket(), false /* not reused */, handle, base::TimeDelta(), &group, request->load_log()); } else if (rv == ERR_IO_PENDING) { + // If we don't have any sockets in this group, set a timer for potentially + // creating a new one. If the SYN is lost, this backup socket may complete + // before the slow socket, improving end user latency. + if (group.IsEmpty() && !group.backup_job) { + group.backup_job = connect_job_factory_->NewConnectJob(group_name, + *request, + this, + job_load_log); + StartBackupSocketTimer(group_name); + } + connecting_socket_count_++; ConnectJob* job = connect_job.release(); @@ -254,6 +267,54 @@ int ClientSocketPoolBaseHelper::RequestSocketInternal( return rv; } +void ClientSocketPoolBaseHelper::StartBackupSocketTimer( + const std::string& group_name) { + CHECK(ContainsKey(group_map_, group_name)); + Group& group = group_map_[group_name]; + + // Only allow one timer pending to create a backup socket. + if (group.backup_task) + return; + + group.backup_task = method_factory_.NewRunnableMethod( + &ClientSocketPoolBaseHelper::OnBackupSocketTimerFired, group_name); + MessageLoop::current()->PostDelayedTask(FROM_HERE, group.backup_task, + ConnectRetryIntervalMs()); +} + +void ClientSocketPoolBaseHelper::OnBackupSocketTimerFired( + const std::string& group_name) { + CHECK(ContainsKey(group_map_, group_name)); + + Group& group = group_map_[group_name]; + + CHECK(group.backup_task); + group.backup_task = NULL; + + CHECK(group.backup_job); + + // If our backup job is waiting on DNS, just reset the timer. + CHECK(group.jobs.size()); + if ((*group.jobs.begin())->GetLoadState() == LOAD_STATE_RESOLVING_HOST) { + LoadLog::AddEvent(group.backup_job->load_log(), + LoadLog::TYPE_SOCKET_BACKUP_TIMER_EXTENDED); + StartBackupSocketTimer(group_name); + return; + } + + LoadLog::AddEvent(group.backup_job->load_log(), + LoadLog::TYPE_SOCKET_BACKUP_CREATED); + SIMPLE_STATS_COUNTER("socket.backup_created"); + int rv = group.backup_job->Connect(); + if (rv == ERR_IO_PENDING) { + connecting_socket_count_++; + group.jobs.insert(group.backup_job); + group.backup_job = NULL; + } else { + OnConnectJobComplete(rv, group.backup_job); + } +} + void ClientSocketPoolBaseHelper::CancelRequest( const std::string& group_name, const ClientSocketHandle* handle) { CHECK(ContainsKey(group_map_, group_name)); @@ -486,6 +547,10 @@ void ClientSocketPoolBaseHelper::OnConnectJobComplete( CHECK(group_it != group_map_.end()); Group& group = group_it->second; + // We've had a connect on the socket; discard any pending backup job + // for this group and kill the pending task. + group.CleanupBackupJob(); + scoped_ptr<ClientSocket> socket(job->ReleaseSocket()); scoped_refptr<LoadLog> job_load_log(job->load_log()); @@ -632,6 +697,11 @@ void ClientSocketPoolBaseHelper::CancelAllConnectJobs() { connecting_socket_count_ -= group.jobs.size(); STLDeleteElements(&group.jobs); + if (group.backup_task) { + group.backup_task->Cancel(); + group.backup_task = NULL; + } + // Delete group if no longer needed. if (group.IsEmpty()) { group_map_.erase(i++); diff --git a/net/socket/client_socket_pool_base.h b/net/socket/client_socket_pool_base.h index 3f400da..82068c4 100644 --- a/net/socket/client_socket_pool_base.h +++ b/net/socket/client_socket_pool_base.h @@ -28,8 +28,10 @@ #include <string> #include "base/basictypes.h" +#include "base/compiler_specific.h" #include "base/ref_counted.h" #include "base/scoped_ptr.h" +#include "base/task.h" #include "base/time.h" #include "base/timer.h" #include "net/base/address_list.h" @@ -197,6 +199,12 @@ class ClientSocketPoolBaseHelper LoadState GetLoadState(const std::string& group_name, const ClientSocketHandle* handle) const; + int ConnectRetryIntervalMs() const { + // TODO(mbelshe): Make this tuned dynamically based on measured RTT. + // For now, just use the max retry interval. + return ClientSocketPool::kMaxConnectRetryIntervalMs; + } + // ConnectJob::Delegate methods: virtual void OnConnectJobComplete(int result, ConnectJob* job); @@ -246,10 +254,19 @@ class ClientSocketPoolBaseHelper // this number of sockets held by clients, some of them may be released soon, // since ReleaseSocket() was called of them, but the DoReleaseSocket() task // has not run yet for them. |num_releasing_sockets| tracks these values, - // which is useful for not starting up new ConnectJobs when sockets may become - // available really soon. + // which is useful for not starting up new ConnectJobs when sockets may + // become available really soon. struct Group { - Group() : active_socket_count(0), num_releasing_sockets(0) {} + Group() + : active_socket_count(0), + num_releasing_sockets(0), + backup_job(NULL), + backup_task(NULL) { + } + + ~Group() { + CleanupBackupJob(); + } bool IsEmpty() const { return active_socket_count == 0 && idle_sockets.empty() && jobs.empty() && @@ -269,12 +286,26 @@ class ClientSocketPoolBaseHelper return pending_requests.front()->priority(); } + void CleanupBackupJob() { + if (backup_job) { + delete backup_job; + backup_job = NULL; + } + if (backup_task) { + backup_task->Cancel(); + backup_task = NULL; + } + } + std::deque<IdleSocket> idle_sockets; std::set<const ConnectJob*> jobs; RequestQueue pending_requests; int active_socket_count; // number of active sockets used by clients // Number of sockets being released within one loop through the MessageLoop. int num_releasing_sockets; + // A backup job in case the connect for this group takes too long. + ConnectJob* backup_job; + CancelableTask* backup_task; }; typedef std::map<std::string, Group> GroupMap; @@ -345,6 +376,12 @@ class ClientSocketPoolBaseHelper int RequestSocketInternal(const std::string& group_name, const Request* request); + // Set a timer to create a backup socket if it takes too long to create one. + void StartBackupSocketTimer(const std::string& group_name); + + // Called when the backup socket timer fires. + void OnBackupSocketTimerFired(const std::string& group_name); + GroupMap group_map_; // Timer used to periodically prune idle sockets that timed out or can't be @@ -389,6 +426,9 @@ class ClientSocketPoolBaseHelper const scoped_ptr<ConnectJobFactory> connect_job_factory_; NetworkChangeNotifier* const network_change_notifier_; + + // A factory to pin the backup_job tasks. + ScopedRunnableMethodFactory<ClientSocketPoolBaseHelper> method_factory_; }; } // namespace internal diff --git a/net/socket/client_socket_pool_base_unittest.cc b/net/socket/client_socket_pool_base_unittest.cc index 9fe7684..8ad24cf 100644 --- a/net/socket/client_socket_pool_base_unittest.cc +++ b/net/socket/client_socket_pool_base_unittest.cc @@ -1120,14 +1120,18 @@ TEST_F(ClientSocketPoolBaseTest, BasicAsynchronous) { EXPECT_TRUE(req.handle()->socket()); req.handle()->Reset(); - EXPECT_EQ(4u, log->entries().size()); + EXPECT_EQ(6u, log->entries().size()); EXPECT_TRUE(LogContainsBeginEvent(*log, 0, LoadLog::TYPE_SOCKET_POOL)); EXPECT_TRUE(LogContainsBeginEvent( *log, 1, LoadLog::TYPE_SOCKET_POOL_CONNECT_JOB)); EXPECT_TRUE(LogContainsEndEvent( *log, 2, LoadLog::TYPE_SOCKET_POOL_CONNECT_JOB)); + EXPECT_TRUE(LogContainsEvent( + *log, 3, LoadLog::TYPE_CANCELLED, LoadLog::PHASE_NONE)); + EXPECT_TRUE(LogContainsEndEvent( + *log, 4, LoadLog::TYPE_SOCKET_POOL_CONNECT_JOB)); EXPECT_TRUE(LogContainsEndEvent( - *log, 3, LoadLog::TYPE_SOCKET_POOL)); + *log, 5, LoadLog::TYPE_SOCKET_POOL)); } TEST_F(ClientSocketPoolBaseTest, @@ -1143,13 +1147,18 @@ TEST_F(ClientSocketPoolBaseTest, EXPECT_EQ(LOAD_STATE_CONNECTING, pool_->GetLoadState("a", req.handle())); EXPECT_EQ(ERR_CONNECTION_FAILED, req.WaitForResult()); - EXPECT_EQ(4u, log->entries().size()); + EXPECT_EQ(6u, log->entries().size()); EXPECT_TRUE(LogContainsBeginEvent(*log, 0, LoadLog::TYPE_SOCKET_POOL)); EXPECT_TRUE(LogContainsBeginEvent( *log, 1, LoadLog::TYPE_SOCKET_POOL_CONNECT_JOB)); EXPECT_TRUE(LogContainsEndEvent( *log, 2, LoadLog::TYPE_SOCKET_POOL_CONNECT_JOB)); - EXPECT_TRUE(LogContainsEndEvent(*log, 3, LoadLog::TYPE_SOCKET_POOL)); + EXPECT_TRUE(LogContainsEvent( + *log, 3, LoadLog::TYPE_CANCELLED, LoadLog::PHASE_NONE)); + EXPECT_TRUE(LogContainsEndEvent( + *log, 4, LoadLog::TYPE_SOCKET_POOL_CONNECT_JOB)); + EXPECT_TRUE(LogContainsEndEvent( + *log, 5, LoadLog::TYPE_SOCKET_POOL)); } TEST_F(ClientSocketPoolBaseTest, TwoRequestsCancelOne) { @@ -1184,13 +1193,18 @@ TEST_F(ClientSocketPoolBaseTest, TwoRequestsCancelOne) { req2.handle()->Reset(); // Now request 2 has actually finished. - EXPECT_EQ(4u, log2->entries().size()); + EXPECT_EQ(6u, log2->entries().size()); EXPECT_TRUE(LogContainsBeginEvent(*log2, 0, LoadLog::TYPE_SOCKET_POOL)); EXPECT_TRUE(LogContainsBeginEvent( *log2, 1, LoadLog::TYPE_SOCKET_POOL_CONNECT_JOB)); EXPECT_TRUE(LogContainsEndEvent( *log2, 2, LoadLog::TYPE_SOCKET_POOL_CONNECT_JOB)); - EXPECT_TRUE(LogContainsEndEvent(*log2, 3, LoadLog::TYPE_SOCKET_POOL)); + EXPECT_TRUE(LogContainsEvent( + *log2, 3, LoadLog::TYPE_CANCELLED, LoadLog::PHASE_NONE)); + EXPECT_TRUE(LogContainsEndEvent( + *log2, 4, LoadLog::TYPE_SOCKET_POOL_CONNECT_JOB)); + EXPECT_TRUE(LogContainsEndEvent( + *log2, 5, LoadLog::TYPE_SOCKET_POOL)); } @@ -1484,7 +1498,7 @@ TEST_F(ClientSocketPoolBaseTest, ReleasedSocketReleasesToo) { EXPECT_EQ(OK, InitHandle( &handle, "a", kDefaultPriority, &callback, pool_.get(), NULL)); handle.Reset(); - + // Before the DoReleaseSocket() task has run, start up a // TestReleasingSocketRequest. This one will be ERR_IO_PENDING since // num_releasing_sockets > 0 and there was no idle socket to use yet. diff --git a/net/socket/tcp_client_socket_pool_unittest.cc b/net/socket/tcp_client_socket_pool_unittest.cc index c9d80f1..d5576f0 100644 --- a/net/socket/tcp_client_socket_pool_unittest.cc +++ b/net/socket/tcp_client_socket_pool_unittest.cc @@ -100,17 +100,23 @@ class MockFailingClientSocket : public ClientSocket { class MockPendingClientSocket : public ClientSocket { public: - MockPendingClientSocket(bool should_connect) + // |should_connect| indicates whether the socket should successfully complete + // or fail. + // |should_stall| indicates that this socket should never connect. + // |delay_ms| is the delay, in milliseconds, before simulating a connect. + MockPendingClientSocket(bool should_connect, bool should_stall, int delay_ms) : method_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)), should_connect_(should_connect), + should_stall_(should_stall), + delay_ms_(delay_ms), is_connected_(false) {} // ClientSocket methods: virtual int Connect(CompletionCallback* callback, LoadLog* /* load_log */) { - MessageLoop::current()->PostTask( + MessageLoop::current()->PostDelayedTask( FROM_HERE, method_factory_.NewRunnableMethod( - &MockPendingClientSocket::DoCallback, callback)); + &MockPendingClientSocket::DoCallback, callback), delay_ms_); return ERR_IO_PENDING; } @@ -141,6 +147,9 @@ class MockPendingClientSocket : public ClientSocket { private: void DoCallback(CompletionCallback* callback) { + if (should_stall_) + return; + if (should_connect_) { is_connected_ = true; callback->Run(OK); @@ -152,6 +161,8 @@ class MockPendingClientSocket : public ClientSocket { ScopedRunnableMethodFactory<MockPendingClientSocket> method_factory_; bool should_connect_; + bool should_stall_; + int delay_ms_; bool is_connected_; }; @@ -162,22 +173,37 @@ class MockClientSocketFactory : public ClientSocketFactory { MOCK_FAILING_CLIENT_SOCKET, MOCK_PENDING_CLIENT_SOCKET, MOCK_PENDING_FAILING_CLIENT_SOCKET, + // A delayed socket will pause before connecting through the message loop. + MOCK_DELAYED_CLIENT_SOCKET, + // A stalled socket that never connects at all. + MOCK_STALLED_CLIENT_SOCKET, }; MockClientSocketFactory() - : allocation_count_(0), client_socket_type_(MOCK_CLIENT_SOCKET) {} + : allocation_count_(0), client_socket_type_(MOCK_CLIENT_SOCKET), + client_socket_types_(NULL), client_socket_index_(0) {} virtual ClientSocket* CreateTCPClientSocket(const AddressList& addresses) { allocation_count_++; - switch (client_socket_type_) { + + ClientSocketType type = client_socket_type_; + if (client_socket_types_) + type = client_socket_types_[client_socket_index_++]; + + switch (type) { case MOCK_CLIENT_SOCKET: return new MockClientSocket(); case MOCK_FAILING_CLIENT_SOCKET: return new MockFailingClientSocket(); case MOCK_PENDING_CLIENT_SOCKET: - return new MockPendingClientSocket(true); + return new MockPendingClientSocket(true, false, 0); case MOCK_PENDING_FAILING_CLIENT_SOCKET: - return new MockPendingClientSocket(false); + return new MockPendingClientSocket(false, false, 0); + case MOCK_DELAYED_CLIENT_SOCKET: + return new MockPendingClientSocket(true, false, + ClientSocketPool::kMaxConnectRetryIntervalMs); + case MOCK_STALLED_CLIENT_SOCKET: + return new MockPendingClientSocket(true, true, 0); default: NOTREACHED(); return new MockClientSocket(); @@ -194,13 +220,21 @@ class MockClientSocketFactory : public ClientSocketFactory { int allocation_count() const { return allocation_count_; } + // Set the default ClientSocketType. void set_client_socket_type(ClientSocketType type) { client_socket_type_ = type; } + // Set a list of ClientSocketTypes to be used. + void set_client_socket_types(ClientSocketType* type_list) { + client_socket_types_ = type_list; + } + private: int allocation_count_; ClientSocketType client_socket_type_; + ClientSocketType* client_socket_types_; + int client_socket_index_; }; class TCPClientSocketPoolTest : public ClientSocketPoolTest { @@ -626,6 +660,103 @@ TEST_F(TCPClientSocketPoolTest, ResetIdleSocketsOnIPAddressChange) { EXPECT_EQ(0, pool_->IdleSocketCount()); } +TEST_F(TCPClientSocketPoolTest, BackupSocketConnect) { + // Case 1 tests the first socket stalling, and the backup connecting. + MockClientSocketFactory::ClientSocketType case1_types[] = { + // The first socket will not connect. + MockClientSocketFactory::MOCK_STALLED_CLIENT_SOCKET, + // The second socket will connect more quickly. + MockClientSocketFactory::MOCK_CLIENT_SOCKET + }; + + // Case 2 tests the first socket being slow, so that we start the + // second connect, but the second connect stalls, and we still + // complete the first. + MockClientSocketFactory::ClientSocketType case2_types[] = { + // The first socket will connect, although delayed. + MockClientSocketFactory::MOCK_DELAYED_CLIENT_SOCKET, + // The second socket will not connect. + MockClientSocketFactory::MOCK_STALLED_CLIENT_SOCKET + }; + + MockClientSocketFactory::ClientSocketType* cases[2] = { + case1_types, + case2_types + }; + + for (size_t index = 0; index < arraysize(cases); ++index) { + client_socket_factory_.set_client_socket_types(cases[index]); + + EXPECT_EQ(0, pool_->IdleSocketCount()); + + TestCompletionCallback callback; + ClientSocketHandle handle; + TCPSocketParams dest("www.google.com", 80, LOW, GURL(), false); + int rv = handle.Init("b", dest, LOW, &callback, pool_.get(), NULL); + EXPECT_EQ(ERR_IO_PENDING, rv); + EXPECT_FALSE(handle.is_initialized()); + EXPECT_FALSE(handle.socket()); + + // Create the first socket, set the timer. + MessageLoop::current()->RunAllPending(); + + // Wait for the backup socket timer to fire. + PlatformThread::Sleep(ClientSocketPool::kMaxConnectRetryIntervalMs); + + // Let the appropriate socket connect. + MessageLoop::current()->RunAllPending(); + + EXPECT_EQ(OK, callback.WaitForResult()); + EXPECT_TRUE(handle.is_initialized()); + EXPECT_TRUE(handle.socket()); + + // One socket is stalled, the other is active. + EXPECT_EQ(0, pool_->IdleSocketCount()); + handle.Reset(); + } +} + +// Test the case where a socket took long enough to start the creation +// of the backup socket, but then we cancelled the request after that. +TEST_F(TCPClientSocketPoolTest, BackupSocketCancel) { + client_socket_factory_.set_client_socket_type( + MockClientSocketFactory::MOCK_STALLED_CLIENT_SOCKET); + + enum { CANCEL_BEFORE_WAIT, CANCEL_AFTER_WAIT }; + + for (int index = CANCEL_BEFORE_WAIT; index < CANCEL_AFTER_WAIT; ++index) { + EXPECT_EQ(0, pool_->IdleSocketCount()); + + TestCompletionCallback callback; + ClientSocketHandle handle; + TCPSocketParams dest("www.google.com", 80, LOW, GURL(), false); + int rv = handle.Init("c", dest, LOW, &callback, pool_.get(), NULL); + EXPECT_EQ(ERR_IO_PENDING, rv); + EXPECT_FALSE(handle.is_initialized()); + EXPECT_FALSE(handle.socket()); + + // Create the first socket, set the timer. + MessageLoop::current()->RunAllPending(); + + if (index == CANCEL_AFTER_WAIT) { + // Wait for the backup socket timer to fire. + PlatformThread::Sleep(ClientSocketPool::kMaxConnectRetryIntervalMs); + } + + // Let the appropriate socket connect. + MessageLoop::current()->RunAllPending(); + + handle.Reset(); + + EXPECT_FALSE(callback.have_result()); + EXPECT_FALSE(handle.is_initialized()); + EXPECT_FALSE(handle.socket()); + + // One socket is stalled, the other is active. + EXPECT_EQ(0, pool_->IdleSocketCount()); + } +} + } // namespace } // namespace net |