diff options
author | willchan@chromium.org <willchan@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2010-06-10 21:30:54 +0000 |
---|---|---|
committer | willchan@chromium.org <willchan@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2010-06-10 21:30:54 +0000 |
commit | 43a21b827aef4bb36eaa4f555696e7d64abc1885 (patch) | |
tree | 69d1b40344998b2c84fba498db81c75ee228b090 /net | |
parent | a804fa4829632d13c40296b06864b99b00b3d844 (diff) | |
download | chromium_src-43a21b827aef4bb36eaa4f555696e7d64abc1885.zip chromium_src-43a21b827aef4bb36eaa4f555696e7d64abc1885.tar.gz chromium_src-43a21b827aef4bb36eaa4f555696e7d64abc1885.tar.bz2 |
Reland my close on idle socket change (r43882+r44150).
I reverted it the first time because it was suspected of causing a hang on the IO thread. A different CL caused that.
When I relanded it previously, the fix for the hang on the IO thread broke this (changed an assertion). I've now deleted that assertion.
BUG=32817
Review URL: http://codereview.chromium.org/2716004
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@49444 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net')
-rw-r--r-- | net/socket/client_socket_pool_base.cc | 73 | ||||
-rw-r--r-- | net/socket/client_socket_pool_base.h | 21 | ||||
-rw-r--r-- | net/socket/client_socket_pool_base_unittest.cc | 117 | ||||
-rw-r--r-- | net/socket/tcp_client_socket_pool.cc | 2 |
4 files changed, 169 insertions, 44 deletions
diff --git a/net/socket/client_socket_pool_base.cc b/net/socket/client_socket_pool_base.cc index 80857e5..1ff2789 100644 --- a/net/socket/client_socket_pool_base.cc +++ b/net/socket/client_socket_pool_base.cc @@ -212,23 +212,6 @@ int ClientSocketPoolBaseHelper::RequestSocketInternal( CHECK(handle); Group& group = group_map_[group_name]; - // Can we make another active socket now? - if (ReachedMaxSocketsLimit() || - !group.HasAvailableSocketSlot(max_sockets_per_group_)) { - if (ReachedMaxSocketsLimit()) { - // We could check if we really have a stalled group here, but it requires - // a scan of all groups, so just flip a flag here, and do the check later. - may_have_stalled_group_ = true; - - request->net_log().AddEvent(NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS, - NULL); - } else { - request->net_log().AddEvent( - NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS_PER_GROUP, NULL); - } - return ERR_IO_PENDING; - } - // Try to reuse a socket. while (!group.idle_sockets.empty()) { IdleSocket idle_socket = group.idle_sockets.back(); @@ -246,6 +229,26 @@ int ClientSocketPoolBaseHelper::RequestSocketInternal( delete idle_socket.socket; } + // Can we make another active socket now? + if (!group.HasAvailableSocketSlot(max_sockets_per_group_)) { + request->net_log().AddEvent( + NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS_PER_GROUP, NULL); + return ERR_IO_PENDING; + } + + if (ReachedMaxSocketsLimit()) { + if (idle_socket_count() > 0) { + CloseOneIdleSocket(); + } else { + // We could check if we really have a stalled group here, but it requires + // a scan of all groups, so just flip a flag here, and do the check later. + may_have_stalled_group_ = true; + request->net_log().AddEvent( + NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS, NULL); + return ERR_IO_PENDING; + } + } + // See if we already have enough connect jobs or sockets that will be released // soon. if (group.HasReleasingSockets()) { @@ -532,10 +535,14 @@ void ClientSocketPoolBaseHelper::DoReleaseSocket(const std::string& group_name, int stalled_group_count = FindTopStalledGroup(&top_group, &top_group_name); if (stalled_group_count >= 1) { if (ReachedMaxSocketsLimit()) { - // We can't activate more sockets since we're already at our global - // limit. - may_have_stalled_group_ = true; - return; + if (idle_socket_count() > 0) { + CloseOneIdleSocket(); + } else { + // We can't activate more sockets since we're already at our global + // limit. + may_have_stalled_group_ = true; + return; + } } ProcessPendingRequest(top_group_name, top_group); @@ -768,7 +775,8 @@ void ClientSocketPoolBaseHelper::CancelAllConnectJobs() { bool ClientSocketPoolBaseHelper::ReachedMaxSocketsLimit() const { // Each connecting socket will eventually connect and be handed out. - int total = handed_out_socket_count_ + connecting_socket_count_; + int total = handed_out_socket_count_ + connecting_socket_count_ + + idle_socket_count(); DCHECK_LE(total, max_sockets_); if (total < max_sockets_) return false; @@ -776,6 +784,27 @@ bool ClientSocketPoolBaseHelper::ReachedMaxSocketsLimit() const { return true; } +void ClientSocketPoolBaseHelper::CloseOneIdleSocket() { + CHECK_GT(idle_socket_count(), 0); + + for (GroupMap::iterator i = group_map_.begin(); i != group_map_.end(); ++i) { + Group& group = i->second; + + if (!group.idle_sockets.empty()) { + std::deque<IdleSocket>::iterator j = group.idle_sockets.begin(); + delete j->socket; + group.idle_sockets.erase(j); + DecrementIdleCount(); + if (group.IsEmpty()) + group_map_.erase(i); + + return; + } + } + + LOG(DFATAL) << "No idle socket found to close!."; +} + } // namespace internal } // namespace net diff --git a/net/socket/client_socket_pool_base.h b/net/socket/client_socket_pool_base.h index a037b0e..3e1de22 100644 --- a/net/socket/client_socket_pool_base.h +++ b/net/socket/client_socket_pool_base.h @@ -237,7 +237,7 @@ class ClientSocketPoolBaseHelper return connect_job_factory_->ConnectionTimeout(); } - void enable_backup_jobs() { backup_jobs_enabled_ = true; } + void EnableBackupJobs() { backup_jobs_enabled_ = true; } private: friend class base::RefCounted<ClientSocketPoolBaseHelper>; @@ -403,6 +403,13 @@ class ClientSocketPoolBaseHelper // Called when the backup socket timer fires. void OnBackupSocketTimerFired(const std::string& group_name); + // Closes one idle socket. Picks the first one encountered. + // TODO(willchan): Consider a better algorithm for doing this. Perhaps we + // should keep an ordered list of idle sockets, and close them in order. + // Requires maintaining more state. It's not clear if it's worth it since + // I'm not sure if we hit this situation often. + void CloseOneIdleSocket(); + GroupMap group_map_; // Timer used to periodically prune idle sockets that timed out or can't be @@ -442,9 +449,13 @@ class ClientSocketPoolBaseHelper // |max_sockets_per_group_| limit. So choosing the next request involves // selecting the highest priority request across *all* groups. // - // Since reaching the maximum number of sockets is an edge case, we make note - // of when it happens, and thus avoid doing the slower "scan all groups" - // in the common case. + // |may_have_stalled_group_| is not conclusive, since when we cancel pending + // requests, we may reach the situation where we have the maximum number of + // sockets, but no request is stalled because of the global socket limit + // (although some requests may be blocked on the socket per group limit). + // We don't strictly maintain |may_have_stalled_group_|, since that would + // require a linear search through all groups in |group_map_| to see if one + // of them is stalled. bool may_have_stalled_group_; const scoped_ptr<ConnectJobFactory> connect_job_factory_; @@ -594,7 +605,7 @@ class ClientSocketPoolBase { return histograms_; } - void enable_backup_jobs() { helper_->enable_backup_jobs(); } + void EnableBackupJobs() { helper_->EnableBackupJobs(); } void Flush() { helper_->Flush(); } diff --git a/net/socket/client_socket_pool_base_unittest.cc b/net/socket/client_socket_pool_base_unittest.cc index 4b10cdb..8c0e6ea 100644 --- a/net/socket/client_socket_pool_base_unittest.cc +++ b/net/socket/client_socket_pool_base_unittest.cc @@ -9,6 +9,7 @@ #include "base/message_loop.h" #include "base/platform_thread.h" #include "base/scoped_vector.h" +#include "base/string_util.h" #include "net/base/net_log.h" #include "net/base/net_log_unittest.h" #include "net/base/net_errors.h" @@ -350,6 +351,8 @@ class TestClientSocketPool : public ClientSocketPool { void CleanupTimedOutIdleSockets() { base_.CleanupIdleSockets(false); } + void EnableBackupJobs() { base_.EnableBackupJobs(); } + private: ~TestClientSocketPool() {} @@ -595,7 +598,7 @@ TEST_F(ClientSocketPoolBaseTest, TotalLimit) { EXPECT_EQ(ERR_IO_PENDING, StartRequest("f", kDefaultPriority)); EXPECT_EQ(ERR_IO_PENDING, StartRequest("g", kDefaultPriority)); - ReleaseAllConnections(KEEP_ALIVE); + ReleaseAllConnections(NO_KEEP_ALIVE); EXPECT_EQ(static_cast<int>(requests_.size()), client_socket_factory_.allocation_count()); @@ -631,7 +634,7 @@ TEST_F(ClientSocketPoolBaseTest, TotalLimitReachedNewGroup) { // Now create a new group and verify that we don't starve it. EXPECT_EQ(ERR_IO_PENDING, StartRequest("c", kDefaultPriority)); - ReleaseAllConnections(KEEP_ALIVE); + ReleaseAllConnections(NO_KEEP_ALIVE); EXPECT_EQ(static_cast<int>(requests_.size()), client_socket_factory_.allocation_count()); @@ -662,11 +665,8 @@ TEST_F(ClientSocketPoolBaseTest, TotalLimitRespectsPriority) { EXPECT_EQ(ERR_IO_PENDING, StartRequest("a", MEDIUM)); EXPECT_EQ(ERR_IO_PENDING, StartRequest("b", HIGHEST)); - ReleaseAllConnections(KEEP_ALIVE); + ReleaseAllConnections(NO_KEEP_ALIVE); - // We're re-using one socket for group "a", and one for "b". - EXPECT_EQ(static_cast<int>(requests_.size()) - 2, - client_socket_factory_.allocation_count()); EXPECT_EQ(requests_.size() - kDefaultMaxSockets, completion_count_); // First 4 requests don't have to wait, and finish in order. @@ -700,10 +700,9 @@ TEST_F(ClientSocketPoolBaseTest, TotalLimitRespectsGroupLimit) { EXPECT_EQ(ERR_IO_PENDING, StartRequest("a", LOW)); EXPECT_EQ(ERR_IO_PENDING, StartRequest("b", HIGHEST)); - ReleaseAllConnections(KEEP_ALIVE); + ReleaseAllConnections(NO_KEEP_ALIVE); - // We're re-using one socket for group "a", and one for "b". - EXPECT_EQ(static_cast<int>(requests_.size()) - 2, + EXPECT_EQ(static_cast<int>(requests_.size()), client_socket_factory_.allocation_count()); EXPECT_EQ(requests_.size() - kDefaultMaxSockets, completion_count_); @@ -748,7 +747,7 @@ TEST_F(ClientSocketPoolBaseTest, TotalLimitCountsConnectingSockets) { connect_job_factory_->set_job_type(TestConnectJob::kMockJob); EXPECT_EQ(ERR_IO_PENDING, StartRequest("e", kDefaultPriority)); - ReleaseAllConnections(KEEP_ALIVE); + ReleaseAllConnections(NO_KEEP_ALIVE); EXPECT_EQ(static_cast<int>(requests_.size()), client_socket_factory_.allocation_count()); @@ -789,7 +788,7 @@ TEST_F(ClientSocketPoolBaseTest, MayHaveStalledGroupReset) { // After releasing first connection for "a", we're still at the // maximum sockets limit, but every group's pending queue is empty, // so we reset the flag. - EXPECT_TRUE(ReleaseOneConnection(KEEP_ALIVE)); + EXPECT_TRUE(ReleaseOneConnection(NO_KEEP_ALIVE)); EXPECT_FALSE(pool_->base()->may_have_stalled_group()); // Requesting additional socket while at the total limit should @@ -804,14 +803,14 @@ TEST_F(ClientSocketPoolBaseTest, MayHaveStalledGroupReset) { // We're at the maximum socket limit, and still have one request pending // for "d". Flag should be "on". - EXPECT_TRUE(ReleaseOneConnection(KEEP_ALIVE)); + EXPECT_TRUE(ReleaseOneConnection(NO_KEEP_ALIVE)); EXPECT_TRUE(pool_->base()->may_have_stalled_group()); // Now every group's pending queue should be empty again. - EXPECT_TRUE(ReleaseOneConnection(KEEP_ALIVE)); + EXPECT_TRUE(ReleaseOneConnection(NO_KEEP_ALIVE)); EXPECT_FALSE(pool_->base()->may_have_stalled_group()); - ReleaseAllConnections(KEEP_ALIVE); + ReleaseAllConnections(NO_KEEP_ALIVE); EXPECT_FALSE(pool_->base()->may_have_stalled_group()); } @@ -867,6 +866,93 @@ TEST_F(ClientSocketPoolBaseTest, StallAndThenCancelAndTriggerAvailableSocket) { handles[i].Reset(); } +TEST_F(ClientSocketPoolBaseTest, CloseIdleSocketAtSocketLimit) { + CreatePool(kDefaultMaxSockets, kDefaultMaxSocketsPerGroup); + connect_job_factory_->set_job_type(TestConnectJob::kMockJob); + + for (int i = 0; i < kDefaultMaxSockets; ++i) { + ClientSocketHandle handle; + TestCompletionCallback callback; + EXPECT_EQ(OK, + InitHandle(&handle, IntToString(i), kDefaultPriority, &callback, + pool_, BoundNetLog())); + } + + // Stall a group + ClientSocketHandle handle; + TestCompletionCallback callback; + EXPECT_EQ(ERR_IO_PENDING, + InitHandle(&handle, "foo", kDefaultPriority, &callback, pool_, + BoundNetLog())); + + // Cancel the stalled request. + handle.Reset(); + + // Flush all the DoReleaseSocket tasks. + MessageLoop::current()->RunAllPending(); + + EXPECT_EQ(kDefaultMaxSockets, client_socket_factory_.allocation_count()); + EXPECT_EQ(kDefaultMaxSockets, pool_->IdleSocketCount()); + + for (int i = 0; i < kDefaultMaxSockets; ++i) { + ClientSocketHandle handle; + TestCompletionCallback callback; + EXPECT_EQ(OK, + InitHandle(&handle, StringPrintf("Take 2: %d", i), + kDefaultPriority, &callback, pool_, BoundNetLog())); + } + + EXPECT_EQ(2 * kDefaultMaxSockets, client_socket_factory_.allocation_count()); + EXPECT_EQ(0, pool_->IdleSocketCount()); + + // Before the next round of DoReleaseSocket tasks run, we will hit the + // socket limit. + + EXPECT_EQ(ERR_IO_PENDING, + InitHandle(&handle, "foo", kDefaultPriority, &callback, pool_, + BoundNetLog())); + + // But if we wait for it, the released idle sockets will be closed in + // preference of the waiting request. + + EXPECT_EQ(OK, callback.WaitForResult()); +} + +// Regression test for http://crbug.com/40952. +TEST_F(ClientSocketPoolBaseTest, CloseIdleSocketAtSocketLimitDeleteGroup) { + CreatePool(kDefaultMaxSockets, kDefaultMaxSocketsPerGroup); + pool_->EnableBackupJobs(); + connect_job_factory_->set_job_type(TestConnectJob::kMockJob); + + for (int i = 0; i < kDefaultMaxSockets; ++i) { + ClientSocketHandle handle; + TestCompletionCallback callback; + EXPECT_EQ(OK, + InitHandle(&handle, IntToString(i), kDefaultPriority, &callback, + pool_, BoundNetLog())); + } + + // Flush all the DoReleaseSocket tasks. + MessageLoop::current()->RunAllPending(); + + // Stall a group. Set a pending job so it'll trigger a backup job if we don't + // reuse a socket. + connect_job_factory_->set_job_type(TestConnectJob::kMockPendingJob); + ClientSocketHandle handle; + TestCompletionCallback callback; + + // "0" is special here, since it should be the first entry in the sorted map, + // which is the one which we would close an idle socket for. We shouldn't + // close an idle socket though, since we should reuse the idle socket. + EXPECT_EQ( + OK, + InitHandle( + &handle, "0", kDefaultPriority, &callback, pool_, BoundNetLog())); + + EXPECT_EQ(kDefaultMaxSockets, client_socket_factory_.allocation_count()); + EXPECT_EQ(kDefaultMaxSockets - 1, pool_->IdleSocketCount()); +} + TEST_F(ClientSocketPoolBaseTest, PendingRequests) { CreatePool(kDefaultMaxSockets, kDefaultMaxSocketsPerGroup); @@ -1193,9 +1279,8 @@ TEST_F(ClientSocketPoolBaseTest, GroupWithPendingRequestsIsNotEmpty) { // Closing idle sockets should not get us into trouble, but in the bug // we were hitting a CHECK here. - EXPECT_EQ(2, pool_->IdleSocketCountInGroup("a")); - pool_->CloseIdleSockets(); EXPECT_EQ(0, pool_->IdleSocketCountInGroup("a")); + pool_->CloseIdleSockets(); } TEST_F(ClientSocketPoolBaseTest, BasicAsynchronous) { diff --git a/net/socket/tcp_client_socket_pool.cc b/net/socket/tcp_client_socket_pool.cc index c7eac14..b798bc4 100644 --- a/net/socket/tcp_client_socket_pool.cc +++ b/net/socket/tcp_client_socket_pool.cc @@ -186,7 +186,7 @@ TCPClientSocketPool::TCPClientSocketPool( new TCPConnectJobFactory(client_socket_factory, host_resolver, net_log), network_change_notifier) { - base_.enable_backup_jobs(); + base_.EnableBackupJobs(); } TCPClientSocketPool::~TCPClientSocketPool() {} |