diff options
author | willchan@chromium.org <willchan@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2010-04-13 22:02:56 +0000 |
---|---|---|
committer | willchan@chromium.org <willchan@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2010-04-13 22:02:56 +0000 |
commit | 42df4e8eba0ccd4dc81875482fe0786228b38702 (patch) | |
tree | 41912df99a194ebd43439bef5b1d17e240ab9ffd /net | |
parent | 148e47de6904b89adf7239e1f3bdb831cddcbf4c (diff) | |
download | chromium_src-42df4e8eba0ccd4dc81875482fe0786228b38702.zip chromium_src-42df4e8eba0ccd4dc81875482fe0786228b38702.tar.gz chromium_src-42df4e8eba0ccd4dc81875482fe0786228b38702.tar.bz2 |
Revert r44150 and r43882.
r44150 is a fix for r43882. r43882 is a bugfix to include the idle sockets in the socket limit and close them when we hit the limit. Including the idle sockets in the socket limit check make us hit the socket limit more often, which exposed some bugs in the existing code. These two patches may also have introduced some separate crashes of their own (it's unclear from the stacktraces). I'm reverting them for now to mitigate the crashes.
BUG=32817,41228
Review URL: http://codereview.chromium.org/1657001
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@44402 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net')
-rw-r--r-- | net/socket/client_socket_pool_base.cc | 62 | ||||
-rw-r--r-- | net/socket/client_socket_pool_base.h | 21 | ||||
-rw-r--r-- | net/socket/client_socket_pool_base_unittest.cc | 115 | ||||
-rw-r--r-- | net/socket/tcp_client_socket_pool.cc | 2 |
4 files changed, 39 insertions, 161 deletions
diff --git a/net/socket/client_socket_pool_base.cc b/net/socket/client_socket_pool_base.cc index c635a20..7389532 100644 --- a/net/socket/client_socket_pool_base.cc +++ b/net/socket/client_socket_pool_base.cc @@ -182,6 +182,21 @@ int ClientSocketPoolBaseHelper::RequestSocketInternal( CHECK(handle); Group& group = group_map_[group_name]; + // Can we make another active socket now? + if (ReachedMaxSocketsLimit() || + !group.HasAvailableSocketSlot(max_sockets_per_group_)) { + if (ReachedMaxSocketsLimit()) { + // We could check if we really have a stalled group here, but it requires + // a scan of all groups, so just flip a flag here, and do the check later. + may_have_stalled_group_ = true; + + request->net_log().AddEvent(NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS); + } else { + request->net_log().AddEvent(NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS_PER_GROUP); + } + return ERR_IO_PENDING; + } + // Try to reuse a socket. while (!group.idle_sockets.empty()) { IdleSocket idle_socket = group.idle_sockets.back(); @@ -199,25 +214,6 @@ int ClientSocketPoolBaseHelper::RequestSocketInternal( delete idle_socket.socket; } - // Can we make another active socket now? - if (!group.HasAvailableSocketSlot(max_sockets_per_group_)) { - request->net_log().AddEvent( - NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS_PER_GROUP); - return ERR_IO_PENDING; - } - - if (ReachedMaxSocketsLimit()) { - if (idle_socket_count() > 0) { - CloseOneIdleSocket(); - } else { - // We could check if we really have a stalled group here, but it requires - // a scan of all groups, so just flip a flag here, and do the check later. - may_have_stalled_group_ = true; - request->net_log().AddEvent(NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS); - return ERR_IO_PENDING; - } - } - // See if we already have enough connect jobs or sockets that will be released // soon. if (group.HasReleasingSockets()) { @@ -615,10 +611,8 @@ void ClientSocketPoolBaseHelper::OnAvailableSocketSlot( int stalled_group_count = FindTopStalledGroup(&top_group, &top_group_name); if (stalled_group_count <= 1) may_have_stalled_group_ = false; - if (stalled_group_count >= 1) { - CHECK_GE(1, idle_socket_count()); + if (stalled_group_count >= 1) ProcessPendingRequest(top_group_name, top_group); - } } else if (!group->pending_requests.empty()) { ProcessPendingRequest(group_name, group); // |group| may no longer be valid after this point. Be careful not to @@ -707,33 +701,11 @@ void ClientSocketPoolBaseHelper::CancelAllConnectJobs() { bool ClientSocketPoolBaseHelper::ReachedMaxSocketsLimit() const { // Each connecting socket will eventually connect and be handed out. - int total = handed_out_socket_count_ + connecting_socket_count_ + - idle_socket_count(); + int total = handed_out_socket_count_ + connecting_socket_count_; DCHECK_LE(total, max_sockets_); return total == max_sockets_; } -void ClientSocketPoolBaseHelper::CloseOneIdleSocket() { - CHECK_GT(idle_socket_count(), 0); - - for (GroupMap::iterator i = group_map_.begin(); i != group_map_.end(); ++i) { - Group& group = i->second; - - if (!group.idle_sockets.empty()) { - std::deque<IdleSocket>::iterator j = group.idle_sockets.begin(); - delete j->socket; - group.idle_sockets.erase(j); - DecrementIdleCount(); - if (group.IsEmpty()) - group_map_.erase(i); - - return; - } - } - - NOTREACHED(); -} - } // namespace internal } // namespace net diff --git a/net/socket/client_socket_pool_base.h b/net/socket/client_socket_pool_base.h index b740067..75572cf 100644 --- a/net/socket/client_socket_pool_base.h +++ b/net/socket/client_socket_pool_base.h @@ -223,7 +223,7 @@ class ClientSocketPoolBaseHelper return connect_job_factory_->ConnectionTimeout(); } - void EnableBackupJobs() { backup_jobs_enabled_ = true; }; + void enable_backup_jobs() { backup_jobs_enabled_ = true; }; private: friend class base::RefCounted<ClientSocketPoolBaseHelper>; @@ -385,13 +385,6 @@ class ClientSocketPoolBaseHelper // Called when the backup socket timer fires. void OnBackupSocketTimerFired(const std::string& group_name); - // Closes one idle socket. Picks the first one encountered. - // TODO(willchan): Consider a better algorithm for doing this. Perhaps we - // should keep an ordered list of idle sockets, and close them in order. - // Requires maintaining more state. It's not clear if it's worth it since - // I'm not sure if we hit this situation often. - void CloseOneIdleSocket(); - GroupMap group_map_; // Timer used to periodically prune idle sockets that timed out or can't be @@ -428,13 +421,9 @@ class ClientSocketPoolBaseHelper // |max_sockets_per_group_| limit. So choosing the next request involves // selecting the highest priority request across *all* groups. // - // |may_have_stalled_group_| is not conclusive, since when we cancel pending - // requests, we may reach the situation where we have the maximum number of - // sockets, but not request is stalled because of the global socket limit - // (although some requests may be blocked on the socket per group limit). - // We don't strictly maintain |may_have_stalled_group_|, since that would - // require a linear search through all groups in |group_map_| to see if one - // of them is stalled. + // Since reaching the maximum number of sockets is an edge case, we make note + // of when it happens, and thus avoid doing the slower "scan all groups" + // in the common case. bool may_have_stalled_group_; const scoped_ptr<ConnectJobFactory> connect_job_factory_; @@ -578,7 +567,7 @@ class ClientSocketPoolBase { const std::string& name() const { return name_; } - void EnableBackupJobs() { helper_->EnableBackupJobs(); }; + void enable_backup_jobs() { helper_->enable_backup_jobs(); }; private: // This adaptor class exists to bridge the diff --git a/net/socket/client_socket_pool_base_unittest.cc b/net/socket/client_socket_pool_base_unittest.cc index bf5a30f..a6ee466 100644 --- a/net/socket/client_socket_pool_base_unittest.cc +++ b/net/socket/client_socket_pool_base_unittest.cc @@ -9,7 +9,6 @@ #include "base/message_loop.h" #include "base/platform_thread.h" #include "base/scoped_vector.h" -#include "base/string_util.h" #include "net/base/net_log.h" #include "net/base/net_log_unittest.h" #include "net/base/net_errors.h" @@ -337,8 +336,6 @@ class TestClientSocketPool : public ClientSocketPool { void CleanupTimedOutIdleSockets() { base_.CleanupIdleSockets(false); } - void EnableBackupJobs() { base_.EnableBackupJobs(); } - private: ~TestClientSocketPool() {} @@ -575,7 +572,7 @@ TEST_F(ClientSocketPoolBaseTest, TotalLimit) { EXPECT_EQ(ERR_IO_PENDING, StartRequest("f", kDefaultPriority)); EXPECT_EQ(ERR_IO_PENDING, StartRequest("g", kDefaultPriority)); - ReleaseAllConnections(NO_KEEP_ALIVE); + ReleaseAllConnections(KEEP_ALIVE); EXPECT_EQ(static_cast<int>(requests_.size()), client_socket_factory_.allocation_count()); @@ -611,7 +608,7 @@ TEST_F(ClientSocketPoolBaseTest, TotalLimitReachedNewGroup) { // Now create a new group and verify that we don't starve it. EXPECT_EQ(ERR_IO_PENDING, StartRequest("c", kDefaultPriority)); - ReleaseAllConnections(NO_KEEP_ALIVE); + ReleaseAllConnections(KEEP_ALIVE); EXPECT_EQ(static_cast<int>(requests_.size()), client_socket_factory_.allocation_count()); @@ -642,8 +639,11 @@ TEST_F(ClientSocketPoolBaseTest, TotalLimitRespectsPriority) { EXPECT_EQ(ERR_IO_PENDING, StartRequest("a", MEDIUM)); EXPECT_EQ(ERR_IO_PENDING, StartRequest("b", HIGHEST)); - ReleaseAllConnections(NO_KEEP_ALIVE); + ReleaseAllConnections(KEEP_ALIVE); + // We're re-using one socket for group "a", and one for "b". + EXPECT_EQ(static_cast<int>(requests_.size()) - 2, + client_socket_factory_.allocation_count()); EXPECT_EQ(requests_.size() - kDefaultMaxSockets, completion_count_); // First 4 requests don't have to wait, and finish in order. @@ -677,9 +677,10 @@ TEST_F(ClientSocketPoolBaseTest, TotalLimitRespectsGroupLimit) { EXPECT_EQ(ERR_IO_PENDING, StartRequest("a", LOW)); EXPECT_EQ(ERR_IO_PENDING, StartRequest("b", HIGHEST)); - ReleaseAllConnections(NO_KEEP_ALIVE); + ReleaseAllConnections(KEEP_ALIVE); - EXPECT_EQ(static_cast<int>(requests_.size()), + // We're re-using one socket for group "a", and one for "b". + EXPECT_EQ(static_cast<int>(requests_.size()) - 2, client_socket_factory_.allocation_count()); EXPECT_EQ(requests_.size() - kDefaultMaxSockets, completion_count_); @@ -724,7 +725,7 @@ TEST_F(ClientSocketPoolBaseTest, TotalLimitCountsConnectingSockets) { connect_job_factory_->set_job_type(TestConnectJob::kMockJob); EXPECT_EQ(ERR_IO_PENDING, StartRequest("e", kDefaultPriority)); - ReleaseAllConnections(NO_KEEP_ALIVE); + ReleaseAllConnections(KEEP_ALIVE); EXPECT_EQ(static_cast<int>(requests_.size()), client_socket_factory_.allocation_count()); @@ -765,7 +766,7 @@ TEST_F(ClientSocketPoolBaseTest, MayHaveStalledGroupReset) { // After releasing first connection for "a", we're still at the // maximum sockets limit, but every group's pending queue is empty, // so we reset the flag. - EXPECT_TRUE(ReleaseOneConnection(NO_KEEP_ALIVE)); + EXPECT_TRUE(ReleaseOneConnection(KEEP_ALIVE)); EXPECT_FALSE(pool_->base()->may_have_stalled_group()); // Requesting additional socket while at the total limit should @@ -780,102 +781,17 @@ TEST_F(ClientSocketPoolBaseTest, MayHaveStalledGroupReset) { // We're at the maximum socket limit, and still have one request pending // for "d". Flag should be "on". - EXPECT_TRUE(ReleaseOneConnection(NO_KEEP_ALIVE)); + EXPECT_TRUE(ReleaseOneConnection(KEEP_ALIVE)); EXPECT_TRUE(pool_->base()->may_have_stalled_group()); // Now every group's pending queue should be empty again. - EXPECT_TRUE(ReleaseOneConnection(NO_KEEP_ALIVE)); + EXPECT_TRUE(ReleaseOneConnection(KEEP_ALIVE)); EXPECT_FALSE(pool_->base()->may_have_stalled_group()); - ReleaseAllConnections(NO_KEEP_ALIVE); + ReleaseAllConnections(KEEP_ALIVE); EXPECT_FALSE(pool_->base()->may_have_stalled_group()); } -TEST_F(ClientSocketPoolBaseTest, CloseIdleSocketAtSocketLimit) { - CreatePool(kDefaultMaxSockets, kDefaultMaxSocketsPerGroup); - connect_job_factory_->set_job_type(TestConnectJob::kMockJob); - - for (int i = 0; i < kDefaultMaxSockets; ++i) { - ClientSocketHandle handle; - TestCompletionCallback callback; - EXPECT_EQ(OK, - InitHandle(&handle, IntToString(i), kDefaultPriority, &callback, - pool_, NULL)); - } - - // Stall a group - ClientSocketHandle handle; - TestCompletionCallback callback; - EXPECT_EQ(ERR_IO_PENDING, - InitHandle(&handle, "foo", kDefaultPriority, &callback, pool_, - NULL)); - - // Cancel the stalled request. - handle.Reset(); - - // Flush all the DoReleaseSocket tasks. - MessageLoop::current()->RunAllPending(); - - EXPECT_EQ(kDefaultMaxSockets, client_socket_factory_.allocation_count()); - EXPECT_EQ(kDefaultMaxSockets, pool_->IdleSocketCount()); - - for (int i = 0; i < kDefaultMaxSockets; ++i) { - ClientSocketHandle handle; - TestCompletionCallback callback; - EXPECT_EQ(OK, - InitHandle(&handle, StringPrintf("Take 2: %d", i), - kDefaultPriority, &callback, pool_, NULL)); - } - - EXPECT_EQ(2 * kDefaultMaxSockets, client_socket_factory_.allocation_count()); - EXPECT_EQ(0, pool_->IdleSocketCount()); - - // Before the next round of DoReleaseSocket tasks run, we will hit the - // socket limit. - - EXPECT_EQ(ERR_IO_PENDING, - InitHandle(&handle, "foo", kDefaultPriority, &callback, pool_, - NULL)); - - // But if we wait for it, the released idle sockets will be closed in - // preference of the waiting request. - - EXPECT_EQ(OK, callback.WaitForResult()); -} - -// Regression test for http://crbug.com/40952. -TEST_F(ClientSocketPoolBaseTest, CloseIdleSocketAtSocketLimitDeleteGroup) { - CreatePool(kDefaultMaxSockets, kDefaultMaxSocketsPerGroup); - pool_->EnableBackupJobs(); - connect_job_factory_->set_job_type(TestConnectJob::kMockJob); - - for (int i = 0; i < kDefaultMaxSockets; ++i) { - ClientSocketHandle handle; - TestCompletionCallback callback; - EXPECT_EQ(OK, - InitHandle(&handle, IntToString(i), kDefaultPriority, &callback, - pool_, NULL)); - } - - // Flush all the DoReleaseSocket tasks. - MessageLoop::current()->RunAllPending(); - - // Stall a group. Set a pending job so it'll trigger a backup job if we don't - // reuse a socket. - connect_job_factory_->set_job_type(TestConnectJob::kMockPendingJob); - ClientSocketHandle handle; - TestCompletionCallback callback; - - // "0" is special here, since it should be the first entry in the sorted map, - // which is the one which we would close an idle socket for. We shouldn't - // close an idle socket though, since we should reuse the idle socket. - EXPECT_EQ(OK, - InitHandle(&handle, "0", kDefaultPriority, &callback, pool_, NULL)); - - EXPECT_EQ(kDefaultMaxSockets, client_socket_factory_.allocation_count()); - EXPECT_EQ(kDefaultMaxSockets - 1, pool_->IdleSocketCount()); -} - TEST_F(ClientSocketPoolBaseTest, PendingRequests) { CreatePool(kDefaultMaxSockets, kDefaultMaxSocketsPerGroup); @@ -1196,8 +1112,9 @@ TEST_F(ClientSocketPoolBaseTest, GroupWithPendingRequestsIsNotEmpty) { // Closing idle sockets should not get us into trouble, but in the bug // we were hitting a CHECK here. - EXPECT_EQ(0, pool_->IdleSocketCountInGroup("a")); + EXPECT_EQ(2, pool_->IdleSocketCountInGroup("a")); pool_->CloseIdleSockets(); + EXPECT_EQ(0, pool_->IdleSocketCountInGroup("a")); } TEST_F(ClientSocketPoolBaseTest, BasicAsynchronous) { diff --git a/net/socket/tcp_client_socket_pool.cc b/net/socket/tcp_client_socket_pool.cc index 32952af..db8a88a 100644 --- a/net/socket/tcp_client_socket_pool.cc +++ b/net/socket/tcp_client_socket_pool.cc @@ -181,7 +181,7 @@ TCPClientSocketPool::TCPClientSocketPool( base::TimeDelta::FromSeconds(kUnusedIdleSocketTimeout), base::TimeDelta::FromSeconds(kUsedIdleSocketTimeout), new TCPConnectJobFactory(client_socket_factory, host_resolver)) { - base_.EnableBackupJobs(); + base_.enable_backup_jobs(); } TCPClientSocketPool::~TCPClientSocketPool() {} |