diff options
author | mbelshe@chromium.org <mbelshe@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2010-06-29 01:02:50 +0000 |
---|---|---|
committer | mbelshe@chromium.org <mbelshe@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2010-06-29 01:02:50 +0000 |
commit | 934152ead035eaf344c8705a3a670fe34d70f641 (patch) | |
tree | 03d57d6f0980784d1d078c07e2ce43f8e48ec1c1 /net | |
parent | ca983deb2813e9e7557ae7985f1a9efa057f8298 (diff) | |
download | chromium_src-934152ead035eaf344c8705a3a670fe34d70f641.zip chromium_src-934152ead035eaf344c8705a3a670fe34d70f641.tar.gz chromium_src-934152ead035eaf344c8705a3a670fe34d70f641.tar.bz2 |
We basically don't do late socket binding when a connect has already
been started for a request, even if another socket frees up earlier.
The reassignment logic was quite complicated, so I reworked it. Fixing
this bug was easy by changing the way FindTopStalledGroup worked, but
because that function is called in that loop, changing this case
caused the loop to go infinitely in some cases. This led me to look
into unwinding the loop.
The problem really came down to ReleaseSocket/DoReleaseSocket. Because
we allow for a pending queue of released sockets, we had to do this
looping (which has been a source of bugs before). To fix, I
eliminated the pending_releases queue. I also reworked the routes
through OnAvailableSocketSlot to unify them and always run asynchronously.
The result is that now we no longer have the loop. So when one
socket is released, we hand out exactly one socket. Note also that
this logic slightly changes the priority of how we recycle sockets.
Previously, we always consulted the TopStalledGroup. The TopStalledGroup
is really only interesting in the case where we're at our max global
socket limit, which is rarely the case. In the new logic, when a
socket is released, first priority goes to any pending socket in the
same group, regardless of that group's priority. The reason is - why
close a socket we already have open? Previously, if the released
socket's group was not the highest priority group, the socket would
be marked idle, then closed (to make space for a socket to the
TopStalledGroup), and finally a new socket created. I believe the
new algorithm, while not perfectly matching the priorities, is more
efficient (less churn on sockets), and also is more graceful to the
common case.
Finally OnAvailableSocketSlot does two things. First, it tries to
"give" the now available slot to a particular group, which is dependent
on how OnAvailableSocketSlot was called. If we're currently
stalled on max sockets, it will also check (after giving the socket
out) to see if we can somehow free something up to satisfy a
stalled group. If that second step fails for whatever reason,
we don't loop. In theory, this could mean that we go under the
socket max and didn't dish out some sockets right away. To make
sure that multiple stalled groups can get unblocked, we'll record
the number of stalled groups, and once in this mode,
OnAvailableSocketSlot will keep checking for stalled groups until the
count finally drops to zero.
BUG=47375
TEST=DelayedSocketBindingWaitingForConnect,CancelStalledSocketAtSocketLimit
Review URL: http://codereview.chromium.org/2861023
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@51081 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net')
-rw-r--r-- | net/socket/client_socket_pool_base.cc | 231 | ||||
-rw-r--r-- | net/socket/client_socket_pool_base.h | 89 | ||||
-rw-r--r-- | net/socket/client_socket_pool_base_unittest.cc | 364 | ||||
-rw-r--r-- | net/socket/socks_client_socket_pool_unittest.cc | 14 |
4 files changed, 389 insertions, 309 deletions
diff --git a/net/socket/client_socket_pool_base.cc b/net/socket/client_socket_pool_base.cc index 40daa35..68f04e1 100644 --- a/net/socket/client_socket_pool_base.cc +++ b/net/socket/client_socket_pool_base.cc @@ -132,16 +132,15 @@ ClientSocketPoolBaseHelper::ClientSocketPoolBaseHelper( : idle_socket_count_(0), connecting_socket_count_(0), handed_out_socket_count_(0), - num_releasing_sockets_(0), max_sockets_(max_sockets), max_sockets_per_group_(max_sockets_per_group), unused_idle_socket_timeout_(unused_idle_socket_timeout), used_idle_socket_timeout_(used_idle_socket_timeout), - may_have_stalled_group_(false), connect_job_factory_(connect_job_factory), backup_jobs_enabled_(false), ALLOW_THIS_IN_INITIALIZER_LIST(method_factory_(this)), - pool_generation_number_(0) { + pool_generation_number_(0), + last_stalled_group_count_(0) { DCHECK_LE(0, max_sockets_per_group); DCHECK_LE(max_sockets_per_group, max_sockets); @@ -188,6 +187,7 @@ int ClientSocketPoolBaseHelper::RequestSocket( const Request* request) { request->net_log().BeginEvent(NetLog::TYPE_SOCKET_POOL, NULL); Group& group = group_map_[group_name]; + int rv = RequestSocketInternal(group_name, request); if (rv != ERR_IO_PENDING) { request->net_log().EndEvent(NetLog::TYPE_SOCKET_POOL, NULL); @@ -209,21 +209,8 @@ int ClientSocketPoolBaseHelper::RequestSocketInternal( Group& group = group_map_[group_name]; // Try to reuse a socket. - while (!group.idle_sockets.empty()) { - IdleSocket idle_socket = group.idle_sockets.back(); - group.idle_sockets.pop_back(); - DecrementIdleCount(); - if (idle_socket.socket->IsConnectedAndIdle()) { - // We found one we can reuse! - base::TimeDelta idle_time = - base::TimeTicks::Now() - idle_socket.start_time; - HandOutSocket( - idle_socket.socket, idle_socket.used, handle, idle_time, &group, - request->net_log()); - return OK; - } - delete idle_socket.socket; - } + if (AssignIdleSocketToGroup(&group, request)) + return OK; // Can we make another active socket now? if (!group.HasAvailableSocketSlot(max_sockets_per_group_)) { @@ -238,19 +225,12 @@ int ClientSocketPoolBaseHelper::RequestSocketInternal( } else { // We could check if we really have a stalled group here, but it requires // a scan of all groups, so just flip a flag here, and do the check later. - may_have_stalled_group_ = true; request->net_log().AddEvent( NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS, NULL); return ERR_IO_PENDING; } } - // See if we already have enough connect jobs or sockets that will be released - // soon. - if (group.HasReleasingSockets()) { - return ERR_IO_PENDING; - } - // We couldn't find a socket to reuse, so allocate and connect a new one. scoped_ptr<ConnectJob> connect_job( connect_job_factory_->NewConnectJob(group_name, *request, this)); @@ -284,6 +264,28 @@ int ClientSocketPoolBaseHelper::RequestSocketInternal( return rv; } +bool ClientSocketPoolBaseHelper::AssignIdleSocketToGroup(Group* group, + const Request* request) { + // Iterate through the list of idle sockets until we find one or exhaust + // the list. + while (!group->idle_sockets.empty()) { + IdleSocket idle_socket = group->idle_sockets.back(); + group->idle_sockets.pop_back(); + DecrementIdleCount(); + if (idle_socket.socket->IsConnectedAndIdle()) { + // We found one we can reuse! + base::TimeDelta idle_time = + base::TimeTicks::Now() - idle_socket.start_time; + HandOutSocket( + idle_socket.socket, idle_socket.used, request->handle(), idle_time, + group, request->net_log()); + return true; + } + delete idle_socket.socket; + } + return false; +} + // static void ClientSocketPoolBaseHelper::LogBoundConnectJobToRequest( const NetLog::Source& connect_job_source, const Request* request) { @@ -358,31 +360,17 @@ void ClientSocketPoolBaseHelper::CancelRequest( req->net_log().AddEvent(NetLog::TYPE_CANCELLED, NULL); req->net_log().EndEvent(NetLog::TYPE_SOCKET_POOL, NULL); delete req; - // Let one connect job connect and become idle for potential future use. - if (group.jobs.size() > group.pending_requests.size() + 1) { - // TODO(willchan): Cancel the job in the earliest LoadState. + + // We let the job run, unless we're at the socket limit. + if (group.jobs.size() && ReachedMaxSocketsLimit()) { RemoveConnectJob(*group.jobs.begin(), &group); - OnAvailableSocketSlot(group_name, &group); + CheckForStalledSocketGroups(); } - return; + break; } } } -void ClientSocketPoolBaseHelper::ReleaseSocket(const std::string& group_name, - ClientSocket* socket, - int id) { - Group& group = group_map_[group_name]; - group.num_releasing_sockets++; - num_releasing_sockets_++; - DCHECK_LE(group.num_releasing_sockets, group.active_socket_count); - // Run this asynchronously to allow the caller to finish before we let - // another to begin doing work. This also avoids nasty recursion issues. - // NOTE: We cannot refer to the handle argument after this method returns. - MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod(this, - &ClientSocketPoolBaseHelper::DoReleaseSocket, group_name, socket, id)); -} - void ClientSocketPoolBaseHelper::CloseIdleSockets() { CleanupIdleSockets(true); } @@ -483,9 +471,9 @@ void ClientSocketPoolBaseHelper::DecrementIdleCount() { timer_.Stop(); } -void ClientSocketPoolBaseHelper::DoReleaseSocket(const std::string& group_name, - ClientSocket* socket, - int id) { +void ClientSocketPoolBaseHelper::ReleaseSocket(const std::string& group_name, + ClientSocket* socket, + int id) { // Running callbacks can cause the last outside reference to be released. // Hold onto a reference. scoped_refptr<ClientSocketPoolBaseHelper> ref_holder(this); @@ -495,60 +483,52 @@ void ClientSocketPoolBaseHelper::DoReleaseSocket(const std::string& group_name, Group& group = i->second; - group.num_releasing_sockets--; - DCHECK_GE(group.num_releasing_sockets, 0); - CHECK_GT(handed_out_socket_count_, 0); handed_out_socket_count_--; CHECK_GT(group.active_socket_count, 0); group.active_socket_count--; - CHECK_GT(num_releasing_sockets_, 0); - num_releasing_sockets_--; - const bool can_reuse = socket->IsConnectedAndIdle() && id == pool_generation_number_; if (can_reuse) { + // Add it to the idle list. AddIdleSocket(socket, true /* used socket */, &group); + OnAvailableSocketSlot(group_name, MayHaveStalledGroups()); } else { delete socket; } + // Check to see if there are stalled groups that can resume now. + CheckForStalledSocketGroups(); +} - // If there are no more releasing sockets, then we might have to process - // multiple available socket slots, since we stalled their processing until - // all sockets have been released. Note that ProcessPendingRequest() will - // invoke user callbacks, so |num_releasing_sockets_| may change. - // - // This code has been known to infinite loop. Set a counter and CHECK to make - // sure it doesn't get ridiculously high. - - int iterations = 0; - while (num_releasing_sockets_ == 0) { - CHECK_LT(iterations, 1000) << "Probably stuck in an infinite loop."; - std::string top_group_name; - Group* top_group = NULL; - int stalled_group_count = FindTopStalledGroup(&top_group, &top_group_name); - if (stalled_group_count >= 1) { - if (ReachedMaxSocketsLimit()) { - if (idle_socket_count() > 0) { - CloseOneIdleSocket(); - } else { - // We can't activate more sockets since we're already at our global - // limit. - may_have_stalled_group_ = true; - return; - } - } +void ClientSocketPoolBaseHelper::CheckForStalledSocketGroups() { + // If we have idle sockets, see if we can give one to the top-stalled group. + std::string top_group_name; + Group* top_group = NULL; + last_stalled_group_count_ = FindTopStalledGroup(&top_group, &top_group_name); + if (!last_stalled_group_count_) + return; - ProcessPendingRequest(top_group_name, top_group); + if (ReachedMaxSocketsLimit()) { + if (idle_socket_count() > 0) { + CloseOneIdleSocket(); } else { - may_have_stalled_group_ = false; + // We can't activate more sockets since we're already at our global + // limit. return; } - - iterations++; } + + // Note: we don't loop on waking stalled groups. If the stalled group is at + // its limit, may be left with other stalled groups that could be + // waken. This isn't optimal, but there is no starvation, so to avoid + // the looping we leave it at this. + OnAvailableSocketSlot(top_group_name, false); +} + +bool ClientSocketPoolBaseHelper::MayHaveStalledGroups() { + return last_stalled_group_count_ > 0 || ReachedMaxSocketsLimit(); } // Search for the highest priority pending request, amongst the groups that @@ -620,7 +600,7 @@ void ClientSocketPoolBaseHelper::OnConnectJobComplete( r->callback()->Run(result); } else { AddIdleSocket(socket.release(), false /* unused socket */, &group); - OnAvailableSocketSlot(group_name, &group); + OnAvailableSocketSlot(group_name, MayHaveStalledGroups()); } } else { DCHECK(!socket.get()); @@ -632,7 +612,7 @@ void ClientSocketPoolBaseHelper::OnConnectJobComplete( new NetLogIntegerParameter("net_error", result)); r->callback()->Run(result); } - MaybeOnAvailableSocketSlot(group_name); + OnAvailableSocketSlot(group_name, MayHaveStalledGroups()); } } @@ -651,66 +631,57 @@ void ClientSocketPoolBaseHelper::RemoveConnectJob(const ConnectJob *job, CHECK_GT(connecting_socket_count_, 0); connecting_socket_count_--; - DCHECK(job); - delete job; - if (group) { DCHECK(ContainsKey(group->jobs, job)); group->jobs.erase(job); } -} -void ClientSocketPoolBaseHelper::MaybeOnAvailableSocketSlot( - const std::string& group_name) { - GroupMap::iterator it = group_map_.find(group_name); - if (it != group_map_.end()) { - Group& group = it->second; - if (group.HasAvailableSocketSlot(max_sockets_per_group_)) - OnAvailableSocketSlot(group_name, &group); - } + DCHECK(job); + delete job; } void ClientSocketPoolBaseHelper::OnAvailableSocketSlot( - const std::string& group_name, Group* group) { - if (may_have_stalled_group_) { - std::string top_group_name; - Group* top_group = NULL; - int stalled_group_count = FindTopStalledGroup(&top_group, &top_group_name); - if (stalled_group_count == 0 || - (stalled_group_count == 1 && top_group->num_releasing_sockets == 0)) { - may_have_stalled_group_ = false; - } - if (stalled_group_count >= 1) - ProcessPendingRequest(top_group_name, top_group); - } else if (!group->pending_requests.empty()) { - ProcessPendingRequest(group_name, group); - // |group| may no longer be valid after this point. Be careful not to - // access it again. - } else if (group->IsEmpty()) { - // Delete |group| if no longer needed. |group| will no longer be valid. - group_map_.erase(group_name); - } + const std::string& group_name, bool was_at_socket_limit) { + // Go back to the message loop before processing the request wakeup + // so that we don't get recursive and lengthy stacks. + MessageLoop::current()->PostTask(FROM_HERE, + NewRunnableMethod( + this, + &ClientSocketPoolBaseHelper::ProcessPendingRequest, + group_name, + was_at_socket_limit)); } void ClientSocketPoolBaseHelper::ProcessPendingRequest( - const std::string& group_name, Group* group) { - int rv = RequestSocketInternal(group_name, *group->pending_requests.begin()); + const std::string& group_name, bool was_at_socket_limit) { + GroupMap::iterator it = group_map_.find(group_name); + if (it != group_map_.end()) { + Group& group = it->second; + if (!group.pending_requests.empty()) { + int rv = RequestSocketInternal(group_name, + *group.pending_requests.begin()); + if (rv != ERR_IO_PENDING) { + scoped_ptr<const Request> request(RemoveRequestFromQueue( + group.pending_requests.begin(), &group.pending_requests)); + + scoped_refptr<NetLog::EventParameters> params; + if (rv != OK) + params = new NetLogIntegerParameter("net_error", rv); + request->net_log().EndEvent(NetLog::TYPE_SOCKET_POOL, params); + request->callback()->Run(rv); + } - if (rv != ERR_IO_PENDING) { - scoped_ptr<const Request> r(RemoveRequestFromQueue( - group->pending_requests.begin(), &group->pending_requests)); - - scoped_refptr<NetLog::EventParameters> params; - if (rv != OK) - params = new NetLogIntegerParameter("net_error", rv); - r->net_log().EndEvent(NetLog::TYPE_SOCKET_POOL, params); - r->callback()->Run(rv); - if (rv != OK) { - // |group| may be invalid after the callback, we need to search - // |group_map_| again. - MaybeOnAvailableSocketSlot(group_name); + // |group| may no longer be valid after this point. Be careful not to + // access it again. + if (group.IsEmpty()) { + // Delete |group| if no longer needed. |group| will no longer be valid. + group_map_.erase(group_name); + } } } + + if (was_at_socket_limit) + CheckForStalledSocketGroups(); } void ClientSocketPoolBaseHelper::HandOutSocket( diff --git a/net/socket/client_socket_pool_base.h b/net/socket/client_socket_pool_base.h index deb405d..6e27969 100644 --- a/net/socket/client_socket_pool_base.h +++ b/net/socket/client_socket_pool_base.h @@ -221,9 +221,6 @@ class ClientSocketPoolBaseHelper // NetworkChangeNotifier::Observer methods: virtual void OnIPAddressChanged(); - // For testing. - bool may_have_stalled_group() const { return may_have_stalled_group_; } - int NumConnectJobsInGroup(const std::string& group_name) const { return group_map_.find(group_name)->second.jobs.size(); } @@ -266,16 +263,10 @@ class ClientSocketPoolBaseHelper // A Group is allocated per group_name when there are idle sockets or pending // requests. Otherwise, the Group object is removed from the map. - // |active_socket_count| tracks the number of sockets held by clients. Of - // this number of sockets held by clients, some of them may be released soon, - // since ReleaseSocket() was called of them, but the DoReleaseSocket() task - // has not run yet for them. |num_releasing_sockets| tracks these values, - // which is useful for not starting up new ConnectJobs when sockets may - // become available really soon. + // |active_socket_count| tracks the number of sockets held by clients. struct Group { Group() : active_socket_count(0), - num_releasing_sockets(0), backup_job(NULL), backup_task(NULL) { } @@ -294,10 +285,6 @@ class ClientSocketPoolBaseHelper max_sockets_per_group; } - bool HasReleasingSockets() const { - return num_releasing_sockets > 0; - } - RequestPriority TopPendingPriority() const { return pending_requests.front()->priority(); } @@ -317,8 +304,6 @@ class ClientSocketPoolBaseHelper std::set<const ConnectJob*> jobs; RequestQueue pending_requests; int active_socket_count; // number of active sockets used by clients - // Number of sockets being released within one loop through the MessageLoop. - int num_releasing_sockets; // A backup job in case the connect for this group takes too long. ConnectJob* backup_job; CancelableTask* backup_task; @@ -337,10 +322,6 @@ class ClientSocketPoolBaseHelper void IncrementIdleCount(); void DecrementIdleCount(); - // Called via PostTask by ReleaseSocket. - void DoReleaseSocket( - const std::string& group_name, ClientSocket* socket, int id); - // Scans the group map for groups which have an available socket slot and // at least one pending request. Returns number of groups found, and if found // at least one, fills |group| and |group_name| with data of the stalled group @@ -356,15 +337,17 @@ class ClientSocketPoolBaseHelper // Removes |job| from |connect_job_set_|. Also updates |group| if non-NULL. void RemoveConnectJob(const ConnectJob* job, Group* group); - // Same as OnAvailableSocketSlot except it looks up the Group first to see if - // it's there. - void MaybeOnAvailableSocketSlot(const std::string& group_name); - // Might delete the Group from |group_map_|. - void OnAvailableSocketSlot(const std::string& group_name, Group* group); + // If |was_at_socket_limit|, will also check for idle sockets to assign + // to any stalled groups. + void OnAvailableSocketSlot(const std::string& group_name, + bool was_at_socket_limit); - // Process a request from a group's pending_requests queue. - void ProcessPendingRequest(const std::string& group_name, Group* group); + // Process a pending socket request for a group. + // If |was_at_socket_limit|, will also check for idle sockets to assign + // to any stalled groups. + void ProcessPendingRequest(const std::string& group_name, + bool was_at_socket_limit); // Assigns |socket| to |handle| and updates |group|'s counters appropriately. void HandOutSocket(ClientSocket* socket, @@ -384,7 +367,6 @@ class ClientSocketPoolBaseHelper void CancelAllConnectJobs(); // Returns true if we can't create any more sockets due to the total limit. - // TODO(phajdan.jr): Also take idle sockets into account. bool ReachedMaxSocketsLimit() const; // This is the internal implementation of RequestSocket(). It differs in that @@ -393,6 +375,10 @@ class ClientSocketPoolBaseHelper int RequestSocketInternal(const std::string& group_name, const Request* request); + // Assigns an idle socket for the group to the request. + // Returns |true| if an idle socket is available, false otherwise. + bool AssignIdleSocketToGroup(Group* group, const Request* request); + static void LogBoundConnectJobToRequest( const NetLog::Source& connect_job_source, const Request* request); @@ -409,6 +395,13 @@ class ClientSocketPoolBaseHelper // I'm not sure if we hit this situation often. void CloseOneIdleSocket(); + // Checks if there are stalled socket groups that should be notified + // for possible wakeup. + void CheckForStalledSocketGroups(); + + // Returns true if we might have a stalled group. + bool MayHaveStalledGroups(); + GroupMap group_map_; // Timer used to periodically prune idle sockets that timed out or can't be @@ -424,9 +417,6 @@ class ClientSocketPoolBaseHelper // Number of connected sockets we handed out across all groups. int handed_out_socket_count_; - // Number of sockets being released. - int num_releasing_sockets_; - // The maximum total number of sockets. See ReachedMaxSocketsLimit. const int max_sockets_; @@ -437,26 +427,6 @@ class ClientSocketPoolBaseHelper const base::TimeDelta unused_idle_socket_timeout_; const base::TimeDelta used_idle_socket_timeout_; - // Until the maximum number of sockets limit is reached, a group can only - // have pending requests if it exceeds the "max sockets per group" limit. - // - // This means when a socket is released, the only pending requests that can - // be started next belong to the same group. - // - // However once the |max_sockets_| limit is reached, this stops being true: - // groups can now have pending requests without having first reached the - // |max_sockets_per_group_| limit. So choosing the next request involves - // selecting the highest priority request across *all* groups. - // - // |may_have_stalled_group_| is not conclusive, since when we cancel pending - // requests, we may reach the situation where we have the maximum number of - // sockets, but no request is stalled because of the global socket limit - // (although some requests may be blocked on the socket per group limit). - // We don't strictly maintain |may_have_stalled_group_|, since that would - // require a linear search through all groups in |group_map_| to see if one - // of them is stalled. - bool may_have_stalled_group_; - const scoped_ptr<ConnectJobFactory> connect_job_factory_; // TODO(vandebo) Remove when backup jobs move to TCPClientSocketPool @@ -469,6 +439,9 @@ class ClientSocketPoolBaseHelper // pool. This is so that when sockets get released back to the pool, we can // make sure that they are discarded rather than reused. int pool_generation_number_; + + // The count of stalled groups the last time we checked. + int last_stalled_group_count_; }; } // namespace internal @@ -575,11 +548,6 @@ class ClientSocketPoolBase { return helper_->OnConnectJobComplete(result, job); } - // For testing. - bool may_have_stalled_group() const { - return helper_->may_have_stalled_group(); - } - int NumConnectJobsInGroup(const std::string& group_name) const { return helper_->NumConnectJobsInGroup(group_name); } @@ -636,11 +604,10 @@ class ClientSocketPoolBase { // Histograms for the pool const scoped_refptr<ClientSocketPoolHistograms> histograms_; - // One might ask why ClientSocketPoolBaseHelper is also refcounted if its - // containing ClientSocketPool is already refcounted. The reason is because - // DoReleaseSocket() posts a task. If ClientSocketPool gets deleted between - // the posting of the task and the execution, then we'll hit the DCHECK that - // |ClientSocketPoolBaseHelper::group_map_| is empty. + // The reason for reference counting here is because the operations on + // the ClientSocketPoolBaseHelper which release sockets can cause the + // ClientSocketPoolBase<T> reference to drop to zero. While we're deep + // in cleanup code, we'll often hold a reference to |self|. scoped_refptr<internal::ClientSocketPoolBaseHelper> helper_; DISALLOW_COPY_AND_ASSIGN(ClientSocketPoolBase); diff --git a/net/socket/client_socket_pool_base_unittest.cc b/net/socket/client_socket_pool_base_unittest.cc index 7b84d94..3e02af7 100644 --- a/net/socket/client_socket_pool_base_unittest.cc +++ b/net/socket/client_socket_pool_base_unittest.cc @@ -772,58 +772,6 @@ TEST_F(ClientSocketPoolBaseTest, TotalLimitCountsConnectingSockets) { EXPECT_EQ(kIndexOutOfBounds, GetOrderOfRequest(6)); } -// Inside ClientSocketPoolBase we have a may_have_stalled_group flag, -// which tells it to use more expensive, but accurate, group selection -// algorithm. Make sure it doesn't get stuck in the "on" state. -TEST_F(ClientSocketPoolBaseTest, MayHaveStalledGroupReset) { - CreatePool(kDefaultMaxSockets, kDefaultMaxSocketsPerGroup); - - EXPECT_FALSE(pool_->base()->may_have_stalled_group()); - - // Reach group socket limit. - EXPECT_EQ(OK, StartRequest("a", kDefaultPriority)); - EXPECT_EQ(OK, StartRequest("a", kDefaultPriority)); - EXPECT_FALSE(pool_->base()->may_have_stalled_group()); - - // Reach total limit, but don't request more sockets. - EXPECT_EQ(OK, StartRequest("b", kDefaultPriority)); - EXPECT_EQ(OK, StartRequest("b", kDefaultPriority)); - EXPECT_FALSE(pool_->base()->may_have_stalled_group()); - - // Request one more socket while we are at the maximum sockets limit. - // This should flip the may_have_stalled_group flag. - EXPECT_EQ(ERR_IO_PENDING, StartRequest("c", kDefaultPriority)); - EXPECT_TRUE(pool_->base()->may_have_stalled_group()); - - // After releasing first connection for "a", we're still at the - // maximum sockets limit, but every group's pending queue is empty, - // so we reset the flag. - EXPECT_TRUE(ReleaseOneConnection(NO_KEEP_ALIVE)); - EXPECT_FALSE(pool_->base()->may_have_stalled_group()); - - // Requesting additional socket while at the total limit should - // flip the flag back to "on". - EXPECT_EQ(ERR_IO_PENDING, StartRequest("c", kDefaultPriority)); - EXPECT_TRUE(pool_->base()->may_have_stalled_group()); - - // We'll request one more socket to verify that we don't reset the flag - // too eagerly. - EXPECT_EQ(ERR_IO_PENDING, StartRequest("d", kDefaultPriority)); - EXPECT_TRUE(pool_->base()->may_have_stalled_group()); - - // We're at the maximum socket limit, and still have one request pending - // for "d". Flag should be "on". - EXPECT_TRUE(ReleaseOneConnection(NO_KEEP_ALIVE)); - EXPECT_TRUE(pool_->base()->may_have_stalled_group()); - - // Now every group's pending queue should be empty again. - EXPECT_TRUE(ReleaseOneConnection(NO_KEEP_ALIVE)); - EXPECT_FALSE(pool_->base()->may_have_stalled_group()); - - ReleaseAllConnections(NO_KEEP_ALIVE); - EXPECT_FALSE(pool_->base()->may_have_stalled_group()); -} - TEST_F(ClientSocketPoolBaseTest, CorrectlyCountStalledGroups) { CreatePool(kDefaultMaxSockets, kDefaultMaxSockets); connect_job_factory_->set_job_type(TestConnectJob::kMockJob); @@ -876,56 +824,117 @@ TEST_F(ClientSocketPoolBaseTest, StallAndThenCancelAndTriggerAvailableSocket) { handles[i].Reset(); } -TEST_F(ClientSocketPoolBaseTest, CloseIdleSocketAtSocketLimit) { +TEST_F(ClientSocketPoolBaseTest, CancelStalledSocketAtSocketLimit) { CreatePool(kDefaultMaxSockets, kDefaultMaxSocketsPerGroup); connect_job_factory_->set_job_type(TestConnectJob::kMockJob); - for (int i = 0; i < kDefaultMaxSockets; ++i) { - ClientSocketHandle handle; + { + ClientSocketHandle handles[kDefaultMaxSockets]; + TestCompletionCallback callbacks[kDefaultMaxSockets]; + for (int i = 0; i < kDefaultMaxSockets; ++i) { + EXPECT_EQ(OK, + InitHandle(&handles[i], IntToString(i), kDefaultPriority, + &callbacks[i], pool_, BoundNetLog())); + } + + // Force a stalled group. + ClientSocketHandle stalled_handle; TestCompletionCallback callback; - EXPECT_EQ(OK, - InitHandle(&handle, IntToString(i), kDefaultPriority, &callback, + EXPECT_EQ(ERR_IO_PENDING, + InitHandle(&stalled_handle, "foo", kDefaultPriority, &callback, pool_, BoundNetLog())); - } - // Stall a group - ClientSocketHandle handle; - TestCompletionCallback callback; - EXPECT_EQ(ERR_IO_PENDING, - InitHandle(&handle, "foo", kDefaultPriority, &callback, pool_, - BoundNetLog())); + // Cancel the stalled request. + stalled_handle.Reset(); - // Cancel the stalled request. - handle.Reset(); + EXPECT_EQ(kDefaultMaxSockets, client_socket_factory_.allocation_count()); + EXPECT_EQ(0, pool_->IdleSocketCount()); - // Flush all the DoReleaseSocket tasks. - MessageLoop::current()->RunAllPending(); + // Dropping out of scope will close all handles and return them to idle. + } EXPECT_EQ(kDefaultMaxSockets, client_socket_factory_.allocation_count()); EXPECT_EQ(kDefaultMaxSockets, pool_->IdleSocketCount()); +} - for (int i = 0; i < kDefaultMaxSockets; ++i) { - ClientSocketHandle handle; +TEST_F(ClientSocketPoolBaseTest, CancelPendingSocketAtSocketLimit) { + CreatePool(kDefaultMaxSockets, kDefaultMaxSocketsPerGroup); + connect_job_factory_->set_job_type(TestConnectJob::kMockWaitingJob); + + { + ClientSocketHandle handles[kDefaultMaxSockets]; + for (int i = 0; i < kDefaultMaxSockets; ++i) { + TestCompletionCallback callback; + EXPECT_EQ(ERR_IO_PENDING, + InitHandle(&handles[i], IntToString(i), kDefaultPriority, + &callback, pool_, BoundNetLog())); + } + + // Force a stalled group. + connect_job_factory_->set_job_type(TestConnectJob::kMockPendingJob); + ClientSocketHandle stalled_handle; TestCompletionCallback callback; - EXPECT_EQ(OK, - InitHandle(&handle, StringPrintf("Take 2: %d", i), - kDefaultPriority, &callback, pool_, BoundNetLog())); + EXPECT_EQ(ERR_IO_PENDING, + InitHandle(&stalled_handle, "foo", kDefaultPriority, &callback, + pool_, BoundNetLog())); + + // Since it is stalled, it should have no connect jobs. + EXPECT_EQ(0, pool_->NumConnectJobsInGroup("foo")); + + // Cancel the stalled request. + handles[0].Reset(); + + MessageLoop::current()->RunAllPending(); + + // Now we should have a connect job. + EXPECT_EQ(1, pool_->NumConnectJobsInGroup("foo")); + + // The stalled socket should connect. + EXPECT_EQ(OK, callback.WaitForResult()); + + EXPECT_EQ(kDefaultMaxSockets + 1, + client_socket_factory_.allocation_count()); + EXPECT_EQ(0, pool_->IdleSocketCount()); + EXPECT_EQ(0, pool_->NumConnectJobsInGroup("foo")); + + // Dropping out of scope will close all handles and return them to idle. } - EXPECT_EQ(2 * kDefaultMaxSockets, client_socket_factory_.allocation_count()); - EXPECT_EQ(0, pool_->IdleSocketCount()); + EXPECT_EQ(1, pool_->IdleSocketCount()); +} - // Before the next round of DoReleaseSocket tasks run, we will hit the - // socket limit. +TEST_F(ClientSocketPoolBaseTest, WaitForStalledSocketAtSocketLimit) { + CreatePool(kDefaultMaxSockets, kDefaultMaxSocketsPerGroup); + connect_job_factory_->set_job_type(TestConnectJob::kMockJob); - EXPECT_EQ(ERR_IO_PENDING, - InitHandle(&handle, "foo", kDefaultPriority, &callback, pool_, - BoundNetLog())); + ClientSocketHandle stalled_handle; + TestCompletionCallback callback; + { + ClientSocketHandle handles[kDefaultMaxSockets]; + for (int i = 0; i < kDefaultMaxSockets; ++i) { + TestCompletionCallback callback; + EXPECT_EQ(OK, + InitHandle(&handles[i], StringPrintf("Take 2: %d", i), + kDefaultPriority, &callback, pool_, BoundNetLog())); + } + + EXPECT_EQ(kDefaultMaxSockets, client_socket_factory_.allocation_count()); + EXPECT_EQ(0, pool_->IdleSocketCount()); + + // Now we will hit the socket limit. + EXPECT_EQ(ERR_IO_PENDING, + InitHandle(&stalled_handle, "foo", kDefaultPriority, &callback, + pool_, BoundNetLog())); + + // Dropping out of scope will close all handles and return them to idle. + } // But if we wait for it, the released idle sockets will be closed in // preference of the waiting request. - EXPECT_EQ(OK, callback.WaitForResult()); + + EXPECT_EQ(kDefaultMaxSockets + 1, client_socket_factory_.allocation_count()); + EXPECT_EQ(3, pool_->IdleSocketCount()); } // Regression test for http://crbug.com/40952. @@ -1279,11 +1288,11 @@ TEST_F(ClientSocketPoolBaseTest, GroupWithPendingRequestsIsNotEmpty) { // Create a stalled group with high priorities. EXPECT_EQ(ERR_IO_PENDING, StartRequest("c", kHighPriority)); EXPECT_EQ(ERR_IO_PENDING, StartRequest("c", kHighPriority)); - EXPECT_TRUE(pool_->base()->may_have_stalled_group()); - // Release the first two sockets from "a", which will make room - // for requests from "c". After that "a" will have no active sockets - // and one pending request. + // Release the first two sockets from "a". Because this is a keepalive, + // the first release will unblock the pending request for "a". The + // second release will unblock a request for "c", becaue it is the next + // high priority socket. EXPECT_TRUE(ReleaseOneConnection(KEEP_ALIVE)); EXPECT_TRUE(ReleaseOneConnection(KEEP_ALIVE)); @@ -1291,6 +1300,8 @@ TEST_F(ClientSocketPoolBaseTest, GroupWithPendingRequestsIsNotEmpty) { // we were hitting a CHECK here. EXPECT_EQ(0, pool_->IdleSocketCountInGroup("a")); pool_->CloseIdleSockets(); + + MessageLoop::current()->RunAllPending(); // Run the released socket wakeups } TEST_F(ClientSocketPoolBaseTest, BasicAsynchronous) { @@ -1391,7 +1402,7 @@ TEST_F(ClientSocketPoolBaseTest, CancelRequestLimitsJobs) { EXPECT_EQ(kDefaultMaxSocketsPerGroup, pool_->NumConnectJobsInGroup("a")); requests_[0]->handle()->Reset(); - EXPECT_EQ(kDefaultMaxSocketsPerGroup - 1, pool_->NumConnectJobsInGroup("a")); + EXPECT_EQ(kDefaultMaxSocketsPerGroup, pool_->NumConnectJobsInGroup("a")); } // When requests and ConnectJobs are not coupled, the request will get serviced @@ -1424,7 +1435,7 @@ TEST_F(ClientSocketPoolBaseTest, ReleaseSockets) { // Both Requests 2 and 3 are pending. We release socket 1 which should // service request 2. Request 3 should still be waiting. req1.handle()->Reset(); - MessageLoop::current()->RunAllPending(); // Run the DoReleaseSocket() + MessageLoop::current()->RunAllPending(); // Run the released socket wakeups ASSERT_TRUE(req2.handle()->socket()); EXPECT_EQ(OK, req2.WaitForResult()); EXPECT_FALSE(req3.handle()->socket()); @@ -1710,31 +1721,6 @@ class TestReleasingSocketRequest : public CallbackRunner< Tuple1<int> > { TestCompletionCallback callback2_; }; -// This test covers the case where, within the same DoReleaseSocket() callback, -// we release the just acquired socket and start up a new request. See bug -// 36871 for details. -TEST_F(ClientSocketPoolBaseTest, ReleasedSocketReleasesToo) { - CreatePool(kDefaultMaxSockets, kDefaultMaxSocketsPerGroup); - - connect_job_factory_->set_job_type(TestConnectJob::kMockJob); - - // Complete one request and release the socket. - ClientSocketHandle handle; - TestCompletionCallback callback; - EXPECT_EQ(OK, InitHandle(&handle, "a", kDefaultPriority, &callback, pool_, - BoundNetLog())); - handle.Reset(); - - // Before the DoReleaseSocket() task has run, start up a - // TestReleasingSocketRequest. This one will be ERR_IO_PENDING since - // num_releasing_sockets > 0 and there was no idle socket to use yet. - TestReleasingSocketRequest request(pool_.get()); - EXPECT_EQ(ERR_IO_PENDING, InitHandle(request.handle(), "a", kDefaultPriority, - &request, pool_, BoundNetLog())); - - EXPECT_EQ(OK, request.WaitForResult()); -} - // http://crbug.com/44724 regression test. // We start releasing the pool when we flush on network change. When that // happens, the only active references are in the ClientSocketHandles. When a @@ -1784,6 +1770,160 @@ TEST_F(ClientSocketPoolBaseTest, DoNotReuseSocketAfterFlush) { EXPECT_EQ(ClientSocketHandle::UNUSED, handle.reuse_type()); } +// Test delayed socket binding for the case where we have two connects, +// and while one is waiting on a connect, the other frees up. +// The socket waiting on a connect should switch immediately to the freed +// up socket. +TEST_F(ClientSocketPoolBaseTest, DelayedSocketBindingWaitingForConnect) { + CreatePool(kDefaultMaxSockets, kDefaultMaxSocketsPerGroup); + connect_job_factory_->set_job_type(TestConnectJob::kMockPendingJob); + + ClientSocketHandle handle1; + TestCompletionCallback callback; + EXPECT_EQ(ERR_IO_PENDING, + InitHandle(&handle1, "a", kDefaultPriority, + &callback, pool_, BoundNetLog())); + EXPECT_EQ(OK, callback.WaitForResult()); + + // No idle sockets, no pending jobs. + EXPECT_EQ(0, pool_->IdleSocketCount()); + EXPECT_EQ(0, pool_->NumConnectJobsInGroup("a")); + + // Create a second socket to the same host, but this one will wait. + connect_job_factory_->set_job_type(TestConnectJob::kMockWaitingJob); + ClientSocketHandle handle2; + EXPECT_EQ(ERR_IO_PENDING, + InitHandle(&handle2, "a", kDefaultPriority, + &callback, pool_, BoundNetLog())); + // No idle sockets, and one connecting job. + EXPECT_EQ(0, pool_->IdleSocketCount()); + EXPECT_EQ(1, pool_->NumConnectJobsInGroup("a")); + + // Return the first handle to the pool. This will initiate the delayed + // binding. + handle1.Reset(); + + MessageLoop::current()->RunAllPending(); + + // Still no idle sockets, still one pending connect job. + EXPECT_EQ(0, pool_->IdleSocketCount()); + EXPECT_EQ(1, pool_->NumConnectJobsInGroup("a")); + + // The second socket connected, even though it was a Waiting Job. + EXPECT_EQ(OK, callback.WaitForResult()); + + // And we can see there is still one job waiting. + EXPECT_EQ(1, pool_->NumConnectJobsInGroup("a")); + + // Finally, signal the waiting Connect. + client_socket_factory_.SignalJobs(); + EXPECT_EQ(0, pool_->NumConnectJobsInGroup("a")); + + MessageLoop::current()->RunAllPending(); +} + +// Test delayed socket binding when a group is at capacity and one +// of the group's sockets frees up. +TEST_F(ClientSocketPoolBaseTest, DelayedSocketBindingAtGroupCapacity) { + CreatePool(kDefaultMaxSockets, kDefaultMaxSocketsPerGroup); + connect_job_factory_->set_job_type(TestConnectJob::kMockPendingJob); + + ClientSocketHandle handle1; + TestCompletionCallback callback; + EXPECT_EQ(ERR_IO_PENDING, + InitHandle(&handle1, "a", kDefaultPriority, + &callback, pool_, BoundNetLog())); + EXPECT_EQ(OK, callback.WaitForResult()); + + // No idle sockets, no pending jobs. + EXPECT_EQ(0, pool_->IdleSocketCount()); + EXPECT_EQ(0, pool_->NumConnectJobsInGroup("a")); + + // Create a second socket to the same host, but this one will wait. + connect_job_factory_->set_job_type(TestConnectJob::kMockWaitingJob); + ClientSocketHandle handle2; + EXPECT_EQ(ERR_IO_PENDING, + InitHandle(&handle2, "a", kDefaultPriority, + &callback, pool_, BoundNetLog())); + // No idle sockets, and one connecting job. + EXPECT_EQ(0, pool_->IdleSocketCount()); + EXPECT_EQ(1, pool_->NumConnectJobsInGroup("a")); + + // Return the first handle to the pool. This will initiate the delayed + // binding. + handle1.Reset(); + + MessageLoop::current()->RunAllPending(); + + // Still no idle sockets, still one pending connect job. + EXPECT_EQ(0, pool_->IdleSocketCount()); + EXPECT_EQ(1, pool_->NumConnectJobsInGroup("a")); + + // The second socket connected, even though it was a Waiting Job. + EXPECT_EQ(OK, callback.WaitForResult()); + + // And we can see there is still one job waiting. + EXPECT_EQ(1, pool_->NumConnectJobsInGroup("a")); + + // Finally, signal the waiting Connect. + client_socket_factory_.SignalJobs(); + EXPECT_EQ(0, pool_->NumConnectJobsInGroup("a")); + + MessageLoop::current()->RunAllPending(); +} + +// Test out the case where we have one socket connected, one +// connecting, when the first socket finishes and goes idle. +// Although the second connection is pending, th second request +// should complete, by taking the first socket's idle socket. +TEST_F(ClientSocketPoolBaseTest, DelayedSocketBindingAtStall) { + CreatePool(kDefaultMaxSockets, kDefaultMaxSocketsPerGroup); + connect_job_factory_->set_job_type(TestConnectJob::kMockPendingJob); + + ClientSocketHandle handle1; + TestCompletionCallback callback; + EXPECT_EQ(ERR_IO_PENDING, + InitHandle(&handle1, "a", kDefaultPriority, + &callback, pool_, BoundNetLog())); + EXPECT_EQ(OK, callback.WaitForResult()); + + // No idle sockets, no pending jobs. + EXPECT_EQ(0, pool_->IdleSocketCount()); + EXPECT_EQ(0, pool_->NumConnectJobsInGroup("a")); + + // Create a second socket to the same host, but this one will wait. + connect_job_factory_->set_job_type(TestConnectJob::kMockWaitingJob); + ClientSocketHandle handle2; + EXPECT_EQ(ERR_IO_PENDING, + InitHandle(&handle2, "a", kDefaultPriority, + &callback, pool_, BoundNetLog())); + // No idle sockets, and one connecting job. + EXPECT_EQ(0, pool_->IdleSocketCount()); + EXPECT_EQ(1, pool_->NumConnectJobsInGroup("a")); + + // Return the first handle to the pool. This will initiate the delayed + // binding. + handle1.Reset(); + + MessageLoop::current()->RunAllPending(); + + // Still no idle sockets, still one pending connect job. + EXPECT_EQ(0, pool_->IdleSocketCount()); + EXPECT_EQ(1, pool_->NumConnectJobsInGroup("a")); + + // The second socket connected, even though it was a Waiting Job. + EXPECT_EQ(OK, callback.WaitForResult()); + + // And we can see there is still one job waiting. + EXPECT_EQ(1, pool_->NumConnectJobsInGroup("a")); + + // Finally, signal the waiting Connect. + client_socket_factory_.SignalJobs(); + EXPECT_EQ(0, pool_->NumConnectJobsInGroup("a")); + + MessageLoop::current()->RunAllPending(); +} + } // namespace } // namespace net diff --git a/net/socket/socks_client_socket_pool_unittest.cc b/net/socket/socks_client_socket_pool_unittest.cc index fb83d96..7bfce55 100644 --- a/net/socket/socks_client_socket_pool_unittest.cc +++ b/net/socket/socks_client_socket_pool_unittest.cc @@ -319,15 +319,16 @@ TEST_F(SOCKSClientSocketPoolTest, CancelDuringTCPConnect) { pool_->CancelRequest("a", requests_[0]->handle()); pool_->CancelRequest("a", requests_[1]->handle()); - EXPECT_EQ(1, tcp_socket_pool_->cancel_count()); + // Requests in the connect phase don't actually get cancelled. + EXPECT_EQ(0, tcp_socket_pool_->cancel_count()); // Now wait for the TCP sockets to connect. MessageLoop::current()->RunAllPending(); EXPECT_EQ(kRequestNotFound, GetOrderOfRequest(1)); EXPECT_EQ(kRequestNotFound, GetOrderOfRequest(2)); - EXPECT_EQ(1, tcp_socket_pool_->cancel_count()); - EXPECT_EQ(1, pool_->IdleSocketCount()); + EXPECT_EQ(0, tcp_socket_pool_->cancel_count()); + EXPECT_EQ(2, pool_->IdleSocketCount()); requests_[0]->handle()->Reset(); requests_[1]->handle()->Reset(); @@ -354,7 +355,8 @@ TEST_F(SOCKSClientSocketPoolTest, CancelDuringSOCKSConnect) { pool_->CancelRequest("a", requests_[0]->handle()); pool_->CancelRequest("a", requests_[1]->handle()); EXPECT_EQ(0, tcp_socket_pool_->cancel_count()); - EXPECT_EQ(1, tcp_socket_pool_->release_count()); + // Requests in the connect phase don't actually get cancelled. + EXPECT_EQ(0, tcp_socket_pool_->release_count()); // Now wait for the async data to reach the SOCKS connect jobs. MessageLoop::current()->RunAllPending(); @@ -362,8 +364,8 @@ TEST_F(SOCKSClientSocketPoolTest, CancelDuringSOCKSConnect) { EXPECT_EQ(kRequestNotFound, GetOrderOfRequest(1)); EXPECT_EQ(kRequestNotFound, GetOrderOfRequest(2)); EXPECT_EQ(0, tcp_socket_pool_->cancel_count()); - EXPECT_EQ(1, tcp_socket_pool_->release_count()); - EXPECT_EQ(1, pool_->IdleSocketCount()); + EXPECT_EQ(0, tcp_socket_pool_->release_count()); + EXPECT_EQ(2, pool_->IdleSocketCount()); requests_[0]->handle()->Reset(); requests_[1]->handle()->Reset(); |