diff options
author | rch@chromium.org <rch@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2012-04-02 15:22:43 +0000 |
---|---|---|
committer | rch@chromium.org <rch@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2012-04-02 15:22:43 +0000 |
commit | 711f7e73c80c86bd4e6e5fe02a2f4825c5358608 (patch) | |
tree | 6af617b573968b2021d08711cd4beccf6a3c10ed /net/socket/client_socket_pool_base.cc | |
parent | 31d8f5f265d7e715f1555c42c10e6741976b1065 (diff) | |
download | chromium_src-711f7e73c80c86bd4e6e5fe02a2f4825c5358608.zip chromium_src-711f7e73c80c86bd4e6e5fe02a2f4825c5358608.tar.gz chromium_src-711f7e73c80c86bd4e6e5fe02a2f4825c5358608.tar.bz2 |
Attempting to re-land this feature with instrumentation to track down the use-after-free.
Revert 129034 - Revert 127893 - Revert 127730 - Revert 127717 - Revert 118788 - Revert 113405 - Revert 113305 - Revert 113300 - Revert 112134 - Revert 112130 - Close idle connections / SPDY sessions when needed
Due to the idle connection state being held by different socket pools, it's possible for one socket pool to hold an idle socket in a lower layer socket pool. From the lower level socket pool's perspective, the socket is being "actively" used. From the higher socket pool's (including SpdySession, which is more of a connection manager) perspective, the connection is idle and can be closed if we have hit a limit.
Normally this isn't a big deal, except when we have a lot of idle SPDY connections and are connecting via a proxy, so we have low connection limits through the proxy server. We address this problem by allowing lower-level socket pools to tell higher level socket pools to close a socket.
Fixed ASAN test failures by removing .Times(1) and .Times(2) from CloseMultipleIdleSocketsHeldByLayeredPoolWhenNeeded unittest (this removes the tests relying on the order of std::set in CloseOneIdleConnectionInLayeredPool). ASAN is prob
ably causing the memory allocator to allocate the pools differently. The std::set is ordered by LayeredPool* which is the address of the LayeredPool (willchan).
Added NET_EXPORT for layered_pool class defintion to fix windows shared compile.
BUG=62364, 92244, 109876, 110368, 119847
TEST=
Review URL: http://codereview.chromium.org/9861032
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@130129 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net/socket/client_socket_pool_base.cc')
-rw-r--r-- | net/socket/client_socket_pool_base.cc | 102 |
1 files changed, 87 insertions, 15 deletions
diff --git a/net/socket/client_socket_pool_base.cc b/net/socket/client_socket_pool_base.cc index d230ad4..22fd29d 100644 --- a/net/socket/client_socket_pool_base.cc +++ b/net/socket/client_socket_pool_base.cc @@ -207,6 +207,7 @@ ClientSocketPoolBaseHelper::~ClientSocketPoolBaseHelper() { DCHECK(group_map_.empty()); DCHECK(pending_callback_map_.empty()); DCHECK_EQ(0, connecting_socket_count_); + CHECK(higher_layer_pools_.empty()); NetworkChangeNotifier::RemoveIPAddressObserver(this); } @@ -238,6 +239,18 @@ ClientSocketPoolBaseHelper::RemoveRequestFromQueue( return req; } +void ClientSocketPoolBaseHelper::AddLayeredPool(LayeredPool* pool) { + CHECK(pool); + CHECK(!ContainsKey(higher_layer_pools_, pool)); + higher_layer_pools_.insert(pool); +} + +void ClientSocketPoolBaseHelper::RemoveLayeredPool(LayeredPool* pool) { + CHECK(pool); + CHECK(ContainsKey(higher_layer_pools_, pool)); + higher_layer_pools_.erase(pool); +} + int ClientSocketPoolBaseHelper::RequestSocket( const std::string& group_name, const Request* request) { @@ -336,26 +349,46 @@ int ClientSocketPoolBaseHelper::RequestSocketInternal( // Can we make another active socket now? if (!group->HasAvailableSocketSlot(max_sockets_per_group_) && !request->ignore_limits()) { + // TODO(willchan): Consider whether or not we need to close a socket in a + // higher layered group. I don't think this makes sense since we would just + // reuse that socket then if we needed one and wouldn't make it down to this + // layer. request->net_log().AddEvent( NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS_PER_GROUP, NULL); return ERR_IO_PENDING; } if (ReachedMaxSocketsLimit() && !request->ignore_limits()) { + // NOTE(mmenke): Wonder if we really need different code for each case + // here. Only reason for them now seems to be preconnects. if (idle_socket_count() > 0) { + // There's an idle socket in this pool. Either that's because there's + // still one in this group, but we got here due to preconnecting bypassing + // idle sockets, or because there's an idle socket in another group. bool closed = CloseOneIdleSocketExceptInGroup(group); if (preconnecting && !closed) return ERR_PRECONNECT_MAX_SOCKET_LIMIT; } else { - // We could check if we really have a stalled group here, but it requires - // a scan of all groups, so just flip a flag here, and do the check later. - request->net_log().AddEvent( - NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS, NULL); - return ERR_IO_PENDING; + do { + if (!CloseOneIdleConnectionInLayeredPool()) { + // We could check if we really have a stalled group here, but it + // requires a scan of all groups, so just flip a flag here, and do + // the check later. + request->net_log().AddEvent( + NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS, NULL); + return ERR_IO_PENDING; + } + } while (ReachedMaxSocketsLimit()); + + // It is possible that CloseOneIdleConnectionInLayeredPool() has deleted + // our Group (see http://crbug.com/109876), so look it up again + // to be safe. + group = GetOrCreateGroup(group_name); } } - // We couldn't find a socket to reuse, so allocate and connect a new one. + // We couldn't find a socket to reuse, and there's space to allocate one, + // so allocate and connect a new one. scoped_ptr<ConnectJob> connect_job( connect_job_factory_->NewConnectJob(group_name, *request, this)); @@ -617,7 +650,8 @@ DictionaryValue* ClientSocketPoolBaseHelper::GetInfoAsValue( group_dict->Set("connect_jobs", connect_jobs_list); group_dict->SetBoolean("is_stalled", - group->IsStalled(max_sockets_per_group_)); + group->IsStalledOnPoolMaxSockets( + max_sockets_per_group_)); group_dict->SetBoolean("has_backup_job", group->HasBackupJob()); all_groups_dict->SetWithoutPathExpansion(it->first, group_dict); @@ -792,18 +826,22 @@ void ClientSocketPoolBaseHelper::CheckForStalledSocketGroups() { // are not at the |max_sockets_per_group_| limit. Note: for requests with // the same priority, the winner is based on group hash ordering (and not // insertion order). -bool ClientSocketPoolBaseHelper::FindTopStalledGroup(Group** group, - std::string* group_name) { +bool ClientSocketPoolBaseHelper::FindTopStalledGroup( + Group** group, + std::string* group_name) const { + CHECK((group && group_name) || (!group && !group_name)); Group* top_group = NULL; const std::string* top_group_name = NULL; bool has_stalled_group = false; - for (GroupMap::iterator i = group_map_.begin(); + for (GroupMap::const_iterator i = group_map_.begin(); i != group_map_.end(); ++i) { Group* curr_group = i->second; const RequestQueue& queue = curr_group->pending_requests(); if (queue.empty()) continue; - if (curr_group->IsStalled(max_sockets_per_group_)) { + if (curr_group->IsStalledOnPoolMaxSockets(max_sockets_per_group_)) { + if (!group) + return true; has_stalled_group = true; bool has_higher_priority = !top_group || curr_group->TopPendingPriority() < top_group->TopPendingPriority(); @@ -815,8 +853,11 @@ bool ClientSocketPoolBaseHelper::FindTopStalledGroup(Group** group, } if (top_group) { + CHECK(group); *group = top_group; *group_name = *top_group_name; + } else { + CHECK(!has_stalled_group); } return has_stalled_group; } @@ -889,6 +930,25 @@ void ClientSocketPoolBaseHelper::Flush() { AbortAllRequests(); } +bool ClientSocketPoolBaseHelper::IsStalled() const { + // If we are not using |max_sockets_|, then clearly we are not stalled + if ((handed_out_socket_count_ + connecting_socket_count_) < max_sockets_) + return false; + // So in order to be stalled we need to be using |max_sockets_| AND + // we need to have a request that is actually stalled on the global + // socket limit. To find such a request, we look for a group that + // a has more requests that jobs AND where the number of jobs is less + // than |max_sockets_per_group_|. (If the number of jobs is equal to + // |max_sockets_per_group_|, then the request is stalled on the group, + // which does not count.) + for (GroupMap::const_iterator it = group_map_.begin(); + it != group_map_.end(); it++) { + if (it->second->IsStalledOnPoolMaxSockets(max_sockets_per_group_)) + return true; + } + return false; +} + void ClientSocketPoolBaseHelper::RemoveConnectJob(ConnectJob* job, Group* group) { CHECK_GT(connecting_socket_count_, 0); @@ -1025,8 +1085,10 @@ bool ClientSocketPoolBaseHelper::ReachedMaxSocketsLimit() const { return true; } -void ClientSocketPoolBaseHelper::CloseOneIdleSocket() { - CloseOneIdleSocketExceptInGroup(NULL); +bool ClientSocketPoolBaseHelper::CloseOneIdleSocket() { + if (idle_socket_count() == 0) + return false; + return CloseOneIdleSocketExceptInGroup(NULL); } bool ClientSocketPoolBaseHelper::CloseOneIdleSocketExceptInGroup( @@ -1050,9 +1112,19 @@ bool ClientSocketPoolBaseHelper::CloseOneIdleSocketExceptInGroup( } } - if (!exception_group) - LOG(DFATAL) << "No idle socket found to close!."; + return false; +} +bool ClientSocketPoolBaseHelper::CloseOneIdleConnectionInLayeredPool() { + // This pool doesn't have any idle sockets. It's possible that a pool at a + // higher layer is holding one of this sockets active, but it's actually idle. + // Query the higher layers. + for (std::set<LayeredPool*>::const_iterator it = higher_layer_pools_.begin(); + it != higher_layer_pools_.end(); ++it) { + (*it)->CrashIfFreed(); + if ((*it)->CloseOneIdleConnection()) + return true; + } return false; } |