summaryrefslogtreecommitdiffstats
path: root/net/socket/client_socket_pool_base.cc
diff options
context:
space:
mode:
authorrch@chromium.org <rch@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-03-20 18:23:25 +0000
committerrch@chromium.org <rch@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-03-20 18:23:25 +0000
commit4ab20fd6d3c2397617519fba50070c329778d774 (patch)
tree4e852f22ab0502c83ff84b394ce590d16adefb69 /net/socket/client_socket_pool_base.cc
parente8688892f420dcc681527c4a4623af2e26f3de2f (diff)
downloadchromium_src-4ab20fd6d3c2397617519fba50070c329778d774.zip
chromium_src-4ab20fd6d3c2397617519fba50070c329778d774.tar.gz
chromium_src-4ab20fd6d3c2397617519fba50070c329778d774.tar.bz2
Revert 127717 - Revert 118788 - Revert 113405 - Revert 113305 - Revert 113300 - Revert 112134 - Revert 112130 - Close idle connections / SPDY sessions when needed
Due to the idle connection state being held by different socket pools, it's possible for one socket pool to hold an idle socket in a lower layer socket pool. From the lower level socket pool's perspective, the socket is being "actively" used. From the higher socket pool's (including SpdySession, which is more of a connection manager) perspective, the connection is idle and can be closed if we have hit a limit. Normally this isn't a big deal, except when we have a lot of idle SPDY connections and are connecting via a proxy, so we have low connection limits through the proxy server. We address this problem by allowing lower-level socket pools to tell higher level socket pools to close a socket. Fixed ASAN test failures by removing .Times(1) and .Times(2) from CloseMultipleIdleSocketsHeldByLayeredPoolWhenNeeded unittest (this removes the tests relying on the order of std::set in CloseOneIdleConnectionInLayeredPool). ASAN is prob ably causing the memory allocator to allocate the pools differently. The std::set is ordered by LayeredPool* which is the address of the LayeredPool (willchan). Added NET_EXPORT for layered_pool class defintion to fix windows shared compile. BUG=62364, 92244, 109876, 110368 TEST= Review URL: http://codereview.chromium.org/9667016 TBR=rch@chromium.org Review URL: https://chromiumcodereview.appspot.com/9760002 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@127730 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net/socket/client_socket_pool_base.cc')
-rw-r--r--net/socket/client_socket_pool_base.cc101
1 files changed, 15 insertions, 86 deletions
diff --git a/net/socket/client_socket_pool_base.cc b/net/socket/client_socket_pool_base.cc
index b3417bf..d230ad4 100644
--- a/net/socket/client_socket_pool_base.cc
+++ b/net/socket/client_socket_pool_base.cc
@@ -207,7 +207,6 @@ ClientSocketPoolBaseHelper::~ClientSocketPoolBaseHelper() {
DCHECK(group_map_.empty());
DCHECK(pending_callback_map_.empty());
DCHECK_EQ(0, connecting_socket_count_);
- CHECK(higher_layer_pools_.empty());
NetworkChangeNotifier::RemoveIPAddressObserver(this);
}
@@ -239,18 +238,6 @@ ClientSocketPoolBaseHelper::RemoveRequestFromQueue(
return req;
}
-void ClientSocketPoolBaseHelper::AddLayeredPool(LayeredPool* pool) {
- CHECK(pool);
- CHECK(!ContainsKey(higher_layer_pools_, pool));
- higher_layer_pools_.insert(pool);
-}
-
-void ClientSocketPoolBaseHelper::RemoveLayeredPool(LayeredPool* pool) {
- CHECK(pool);
- CHECK(ContainsKey(higher_layer_pools_, pool));
- higher_layer_pools_.erase(pool);
-}
-
int ClientSocketPoolBaseHelper::RequestSocket(
const std::string& group_name,
const Request* request) {
@@ -349,46 +336,26 @@ int ClientSocketPoolBaseHelper::RequestSocketInternal(
// Can we make another active socket now?
if (!group->HasAvailableSocketSlot(max_sockets_per_group_) &&
!request->ignore_limits()) {
- // TODO(willchan): Consider whether or not we need to close a socket in a
- // higher layered group. I don't think this makes sense since we would just
- // reuse that socket then if we needed one and wouldn't make it down to this
- // layer.
request->net_log().AddEvent(
NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS_PER_GROUP, NULL);
return ERR_IO_PENDING;
}
if (ReachedMaxSocketsLimit() && !request->ignore_limits()) {
- // NOTE(mmenke): Wonder if we really need different code for each case
- // here. Only reason for them now seems to be preconnects.
if (idle_socket_count() > 0) {
- // There's an idle socket in this pool. Either that's because there's
- // still one in this group, but we got here due to preconnecting bypassing
- // idle sockets, or because there's an idle socket in another group.
bool closed = CloseOneIdleSocketExceptInGroup(group);
if (preconnecting && !closed)
return ERR_PRECONNECT_MAX_SOCKET_LIMIT;
} else {
- do {
- if (!CloseOneIdleConnectionInLayeredPool()) {
- // We could check if we really have a stalled group here, but it
- // requires a scan of all groups, so just flip a flag here, and do
- // the check later.
- request->net_log().AddEvent(
- NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS, NULL);
- return ERR_IO_PENDING;
- }
- } while (ReachedMaxSocketsLimit());
-
- // It is possible that CloseOneIdleConnectionInLayeredPool() has deleted
- // our Group (see http://crbug.com/109876), so look it up again
- // to be safe.
- group = GetOrCreateGroup(group_name);
+ // We could check if we really have a stalled group here, but it requires
+ // a scan of all groups, so just flip a flag here, and do the check later.
+ request->net_log().AddEvent(
+ NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS, NULL);
+ return ERR_IO_PENDING;
}
}
- // We couldn't find a socket to reuse, and there's space to allocate one,
- // so allocate and connect a new one.
+ // We couldn't find a socket to reuse, so allocate and connect a new one.
scoped_ptr<ConnectJob> connect_job(
connect_job_factory_->NewConnectJob(group_name, *request, this));
@@ -650,8 +617,7 @@ DictionaryValue* ClientSocketPoolBaseHelper::GetInfoAsValue(
group_dict->Set("connect_jobs", connect_jobs_list);
group_dict->SetBoolean("is_stalled",
- group->IsStalledOnPoolMaxSockets(
- max_sockets_per_group_));
+ group->IsStalled(max_sockets_per_group_));
group_dict->SetBoolean("has_backup_job", group->HasBackupJob());
all_groups_dict->SetWithoutPathExpansion(it->first, group_dict);
@@ -826,22 +792,18 @@ void ClientSocketPoolBaseHelper::CheckForStalledSocketGroups() {
// are not at the |max_sockets_per_group_| limit. Note: for requests with
// the same priority, the winner is based on group hash ordering (and not
// insertion order).
-bool ClientSocketPoolBaseHelper::FindTopStalledGroup(
- Group** group,
- std::string* group_name) const {
- CHECK((group && group_name) || (!group && !group_name));
+bool ClientSocketPoolBaseHelper::FindTopStalledGroup(Group** group,
+ std::string* group_name) {
Group* top_group = NULL;
const std::string* top_group_name = NULL;
bool has_stalled_group = false;
- for (GroupMap::const_iterator i = group_map_.begin();
+ for (GroupMap::iterator i = group_map_.begin();
i != group_map_.end(); ++i) {
Group* curr_group = i->second;
const RequestQueue& queue = curr_group->pending_requests();
if (queue.empty())
continue;
- if (curr_group->IsStalledOnPoolMaxSockets(max_sockets_per_group_)) {
- if (!group)
- return true;
+ if (curr_group->IsStalled(max_sockets_per_group_)) {
has_stalled_group = true;
bool has_higher_priority = !top_group ||
curr_group->TopPendingPriority() < top_group->TopPendingPriority();
@@ -853,11 +815,8 @@ bool ClientSocketPoolBaseHelper::FindTopStalledGroup(
}
if (top_group) {
- CHECK(group);
*group = top_group;
*group_name = *top_group_name;
- } else {
- CHECK(!has_stalled_group);
}
return has_stalled_group;
}
@@ -930,25 +889,6 @@ void ClientSocketPoolBaseHelper::Flush() {
AbortAllRequests();
}
-bool ClientSocketPoolBaseHelper::IsStalled() const {
- // If we are not using |max_sockets_|, then clearly we are not stalled
- if ((handed_out_socket_count_ + connecting_socket_count_) < max_sockets_)
- return false;
- // So in order to be stalled we need to be using |max_sockets_| AND
- // we need to have a request that is actually stalled on the global
- // socket limit. To find such a request, we look for a group that
- // a has more requests that jobs AND where the number of jobs is less
- // than |max_sockets_per_group_|. (If the number of jobs is equal to
- // |max_sockets_per_group_|, then the request is stalled on the group,
- // which does not count.)
- for (GroupMap::const_iterator it = group_map_.begin();
- it != group_map_.end(); it++) {
- if (it->second->IsStalledOnPoolMaxSockets(max_sockets_per_group_))
- return true;
- }
- return false;
-}
-
void ClientSocketPoolBaseHelper::RemoveConnectJob(ConnectJob* job,
Group* group) {
CHECK_GT(connecting_socket_count_, 0);
@@ -1085,10 +1025,8 @@ bool ClientSocketPoolBaseHelper::ReachedMaxSocketsLimit() const {
return true;
}
-bool ClientSocketPoolBaseHelper::CloseOneIdleSocket() {
- if (idle_socket_count() == 0)
- return false;
- return CloseOneIdleSocketExceptInGroup(NULL);
+void ClientSocketPoolBaseHelper::CloseOneIdleSocket() {
+ CloseOneIdleSocketExceptInGroup(NULL);
}
bool ClientSocketPoolBaseHelper::CloseOneIdleSocketExceptInGroup(
@@ -1112,18 +1050,9 @@ bool ClientSocketPoolBaseHelper::CloseOneIdleSocketExceptInGroup(
}
}
- return false;
-}
+ if (!exception_group)
+ LOG(DFATAL) << "No idle socket found to close!.";
-bool ClientSocketPoolBaseHelper::CloseOneIdleConnectionInLayeredPool() {
- // This pool doesn't have any idle sockets. It's possible that a pool at a
- // higher layer is holding one of this sockets active, but it's actually idle.
- // Query the higher layers.
- for (std::set<LayeredPool*>::const_iterator it = higher_layer_pools_.begin();
- it != higher_layer_pools_.end(); ++it) {
- if ((*it)->CloseOneIdleConnection())
- return true;
- }
return false;
}