diff options
author | willchan@chromium.org <willchan@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2010-05-18 22:59:15 +0000 |
---|---|---|
committer | willchan@chromium.org <willchan@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2010-05-18 22:59:15 +0000 |
commit | a7fedd46c5167b048b6fa61e95969ca475e06904 (patch) | |
tree | b493fc4c7c0dddc6f47a1072ed9ca339d7e0e3f2 /net/socket/client_socket_pool_base.cc | |
parent | 57105fe4d3b6b91ed17ba52c4c51e0d2fc784790 (diff) | |
download | chromium_src-a7fedd46c5167b048b6fa61e95969ca475e06904.zip chromium_src-a7fedd46c5167b048b6fa61e95969ca475e06904.tar.gz chromium_src-a7fedd46c5167b048b6fa61e95969ca475e06904.tar.bz2 |
Reland my close on idle socket change.
This reverts 44402 which reverted r44150 (fix for r43882) and r43882 (original change). These changes were suspected for causing the infinite loop on the IO thread and also a crash. It turned out that the backup socket was the cause. These should be able to be safely relanded. There's one minor change from the original, due to merging in the weeks of changes in between.
BUG=32817
Review URL: http://codereview.chromium.org/2077004
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@47583 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net/socket/client_socket_pool_base.cc')
-rw-r--r-- | net/socket/client_socket_pool_base.cc | 77 |
1 files changed, 54 insertions, 23 deletions
diff --git a/net/socket/client_socket_pool_base.cc b/net/socket/client_socket_pool_base.cc index b008c54..820e9e2 100644 --- a/net/socket/client_socket_pool_base.cc +++ b/net/socket/client_socket_pool_base.cc @@ -187,23 +187,6 @@ int ClientSocketPoolBaseHelper::RequestSocketInternal( CHECK(handle); Group& group = group_map_[group_name]; - // Can we make another active socket now? - if (ReachedMaxSocketsLimit() || - !group.HasAvailableSocketSlot(max_sockets_per_group_)) { - if (ReachedMaxSocketsLimit()) { - // We could check if we really have a stalled group here, but it requires - // a scan of all groups, so just flip a flag here, and do the check later. - may_have_stalled_group_ = true; - - request->net_log().AddEvent(NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS, - NULL); - } else { - request->net_log().AddEvent( - NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS_PER_GROUP, NULL); - } - return ERR_IO_PENDING; - } - // Try to reuse a socket. while (!group.idle_sockets.empty()) { IdleSocket idle_socket = group.idle_sockets.back(); @@ -221,6 +204,26 @@ int ClientSocketPoolBaseHelper::RequestSocketInternal( delete idle_socket.socket; } + // Can we make another active socket now? + if (!group.HasAvailableSocketSlot(max_sockets_per_group_)) { + request->net_log().AddEvent( + NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS_PER_GROUP, NULL); + return ERR_IO_PENDING; + } + + if (ReachedMaxSocketsLimit()) { + if (idle_socket_count() > 0) { + CloseOneIdleSocket(); + } else { + // We could check if we really have a stalled group here, but it requires + // a scan of all groups, so just flip a flag here, and do the check later. + may_have_stalled_group_ = true; + request->net_log().AddEvent( + NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS, NULL); + return ERR_IO_PENDING; + } + } + // See if we already have enough connect jobs or sockets that will be released // soon. if (group.HasReleasingSockets()) { @@ -502,10 +505,14 @@ void ClientSocketPoolBaseHelper::DoReleaseSocket(const std::string& group_name, int stalled_group_count = FindTopStalledGroup(&top_group, &top_group_name); if (stalled_group_count >= 1) { if (ReachedMaxSocketsLimit()) { - // We can't activate more sockets since we're already at our global - // limit. - may_have_stalled_group_ = true; - return; + if (idle_socket_count() > 0) { + CloseOneIdleSocket(); + } else { + // We can't activate more sockets since we're already at our global + // limit. + may_have_stalled_group_ = true; + return; + } } ProcessPendingRequest(top_group_name, top_group); @@ -636,8 +643,10 @@ void ClientSocketPoolBaseHelper::OnAvailableSocketSlot( (stalled_group_count == 1 && top_group->num_releasing_sockets == 0)) { may_have_stalled_group_ = false; } - if (stalled_group_count >= 1) + if (stalled_group_count >= 1) { + CHECK_GE(1, idle_socket_count()); ProcessPendingRequest(top_group_name, top_group); + } } else if (!group->pending_requests.empty()) { ProcessPendingRequest(group_name, group); // |group| may no longer be valid after this point. Be careful not to @@ -726,7 +735,8 @@ void ClientSocketPoolBaseHelper::CancelAllConnectJobs() { bool ClientSocketPoolBaseHelper::ReachedMaxSocketsLimit() const { // Each connecting socket will eventually connect and be handed out. - int total = handed_out_socket_count_ + connecting_socket_count_; + int total = handed_out_socket_count_ + connecting_socket_count_ + + idle_socket_count(); DCHECK_LE(total, max_sockets_); if (total < max_sockets_) return false; @@ -734,6 +744,27 @@ bool ClientSocketPoolBaseHelper::ReachedMaxSocketsLimit() const { return true; } +void ClientSocketPoolBaseHelper::CloseOneIdleSocket() { + CHECK_GT(idle_socket_count(), 0); + + for (GroupMap::iterator i = group_map_.begin(); i != group_map_.end(); ++i) { + Group& group = i->second; + + if (!group.idle_sockets.empty()) { + std::deque<IdleSocket>::iterator j = group.idle_sockets.begin(); + delete j->socket; + group.idle_sockets.erase(j); + DecrementIdleCount(); + if (group.IsEmpty()) + group_map_.erase(i); + + return; + } + } + + LOG(DFATAL) << "No idle socket found to close!."; +} + } // namespace internal } // namespace net |