summaryrefslogtreecommitdiffstats
path: root/net/socket
diff options
context:
space:
mode:
Diffstat (limited to 'net/socket')
-rw-r--r--net/socket/client_socket_pool_base.cc231
-rw-r--r--net/socket/client_socket_pool_base.h89
-rw-r--r--net/socket/client_socket_pool_base_unittest.cc364
-rw-r--r--net/socket/socks_client_socket_pool_unittest.cc14
4 files changed, 309 insertions, 389 deletions
diff --git a/net/socket/client_socket_pool_base.cc b/net/socket/client_socket_pool_base.cc
index 68f04e1..40daa35 100644
--- a/net/socket/client_socket_pool_base.cc
+++ b/net/socket/client_socket_pool_base.cc
@@ -132,15 +132,16 @@ ClientSocketPoolBaseHelper::ClientSocketPoolBaseHelper(
: idle_socket_count_(0),
connecting_socket_count_(0),
handed_out_socket_count_(0),
+ num_releasing_sockets_(0),
max_sockets_(max_sockets),
max_sockets_per_group_(max_sockets_per_group),
unused_idle_socket_timeout_(unused_idle_socket_timeout),
used_idle_socket_timeout_(used_idle_socket_timeout),
+ may_have_stalled_group_(false),
connect_job_factory_(connect_job_factory),
backup_jobs_enabled_(false),
ALLOW_THIS_IN_INITIALIZER_LIST(method_factory_(this)),
- pool_generation_number_(0),
- last_stalled_group_count_(0) {
+ pool_generation_number_(0) {
DCHECK_LE(0, max_sockets_per_group);
DCHECK_LE(max_sockets_per_group, max_sockets);
@@ -187,7 +188,6 @@ int ClientSocketPoolBaseHelper::RequestSocket(
const Request* request) {
request->net_log().BeginEvent(NetLog::TYPE_SOCKET_POOL, NULL);
Group& group = group_map_[group_name];
-
int rv = RequestSocketInternal(group_name, request);
if (rv != ERR_IO_PENDING) {
request->net_log().EndEvent(NetLog::TYPE_SOCKET_POOL, NULL);
@@ -209,8 +209,21 @@ int ClientSocketPoolBaseHelper::RequestSocketInternal(
Group& group = group_map_[group_name];
// Try to reuse a socket.
- if (AssignIdleSocketToGroup(&group, request))
- return OK;
+ while (!group.idle_sockets.empty()) {
+ IdleSocket idle_socket = group.idle_sockets.back();
+ group.idle_sockets.pop_back();
+ DecrementIdleCount();
+ if (idle_socket.socket->IsConnectedAndIdle()) {
+ // We found one we can reuse!
+ base::TimeDelta idle_time =
+ base::TimeTicks::Now() - idle_socket.start_time;
+ HandOutSocket(
+ idle_socket.socket, idle_socket.used, handle, idle_time, &group,
+ request->net_log());
+ return OK;
+ }
+ delete idle_socket.socket;
+ }
// Can we make another active socket now?
if (!group.HasAvailableSocketSlot(max_sockets_per_group_)) {
@@ -225,12 +238,19 @@ int ClientSocketPoolBaseHelper::RequestSocketInternal(
} else {
// We could check if we really have a stalled group here, but it requires
// a scan of all groups, so just flip a flag here, and do the check later.
+ may_have_stalled_group_ = true;
request->net_log().AddEvent(
NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS, NULL);
return ERR_IO_PENDING;
}
}
+ // See if we already have enough connect jobs or sockets that will be released
+ // soon.
+ if (group.HasReleasingSockets()) {
+ return ERR_IO_PENDING;
+ }
+
// We couldn't find a socket to reuse, so allocate and connect a new one.
scoped_ptr<ConnectJob> connect_job(
connect_job_factory_->NewConnectJob(group_name, *request, this));
@@ -264,28 +284,6 @@ int ClientSocketPoolBaseHelper::RequestSocketInternal(
return rv;
}
-bool ClientSocketPoolBaseHelper::AssignIdleSocketToGroup(Group* group,
- const Request* request) {
- // Iterate through the list of idle sockets until we find one or exhaust
- // the list.
- while (!group->idle_sockets.empty()) {
- IdleSocket idle_socket = group->idle_sockets.back();
- group->idle_sockets.pop_back();
- DecrementIdleCount();
- if (idle_socket.socket->IsConnectedAndIdle()) {
- // We found one we can reuse!
- base::TimeDelta idle_time =
- base::TimeTicks::Now() - idle_socket.start_time;
- HandOutSocket(
- idle_socket.socket, idle_socket.used, request->handle(), idle_time,
- group, request->net_log());
- return true;
- }
- delete idle_socket.socket;
- }
- return false;
-}
-
// static
void ClientSocketPoolBaseHelper::LogBoundConnectJobToRequest(
const NetLog::Source& connect_job_source, const Request* request) {
@@ -360,17 +358,31 @@ void ClientSocketPoolBaseHelper::CancelRequest(
req->net_log().AddEvent(NetLog::TYPE_CANCELLED, NULL);
req->net_log().EndEvent(NetLog::TYPE_SOCKET_POOL, NULL);
delete req;
-
- // We let the job run, unless we're at the socket limit.
- if (group.jobs.size() && ReachedMaxSocketsLimit()) {
+ // Let one connect job connect and become idle for potential future use.
+ if (group.jobs.size() > group.pending_requests.size() + 1) {
+ // TODO(willchan): Cancel the job in the earliest LoadState.
RemoveConnectJob(*group.jobs.begin(), &group);
- CheckForStalledSocketGroups();
+ OnAvailableSocketSlot(group_name, &group);
}
- break;
+ return;
}
}
}
+void ClientSocketPoolBaseHelper::ReleaseSocket(const std::string& group_name,
+ ClientSocket* socket,
+ int id) {
+ Group& group = group_map_[group_name];
+ group.num_releasing_sockets++;
+ num_releasing_sockets_++;
+ DCHECK_LE(group.num_releasing_sockets, group.active_socket_count);
+ // Run this asynchronously to allow the caller to finish before we let
+ // another to begin doing work. This also avoids nasty recursion issues.
+ // NOTE: We cannot refer to the handle argument after this method returns.
+ MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod(this,
+ &ClientSocketPoolBaseHelper::DoReleaseSocket, group_name, socket, id));
+}
+
void ClientSocketPoolBaseHelper::CloseIdleSockets() {
CleanupIdleSockets(true);
}
@@ -471,9 +483,9 @@ void ClientSocketPoolBaseHelper::DecrementIdleCount() {
timer_.Stop();
}
-void ClientSocketPoolBaseHelper::ReleaseSocket(const std::string& group_name,
- ClientSocket* socket,
- int id) {
+void ClientSocketPoolBaseHelper::DoReleaseSocket(const std::string& group_name,
+ ClientSocket* socket,
+ int id) {
// Running callbacks can cause the last outside reference to be released.
// Hold onto a reference.
scoped_refptr<ClientSocketPoolBaseHelper> ref_holder(this);
@@ -483,52 +495,60 @@ void ClientSocketPoolBaseHelper::ReleaseSocket(const std::string& group_name,
Group& group = i->second;
+ group.num_releasing_sockets--;
+ DCHECK_GE(group.num_releasing_sockets, 0);
+
CHECK_GT(handed_out_socket_count_, 0);
handed_out_socket_count_--;
CHECK_GT(group.active_socket_count, 0);
group.active_socket_count--;
+ CHECK_GT(num_releasing_sockets_, 0);
+ num_releasing_sockets_--;
+
const bool can_reuse = socket->IsConnectedAndIdle() &&
id == pool_generation_number_;
if (can_reuse) {
- // Add it to the idle list.
AddIdleSocket(socket, true /* used socket */, &group);
- OnAvailableSocketSlot(group_name, MayHaveStalledGroups());
} else {
delete socket;
}
- // Check to see if there are stalled groups that can resume now.
- CheckForStalledSocketGroups();
-}
-void ClientSocketPoolBaseHelper::CheckForStalledSocketGroups() {
- // If we have idle sockets, see if we can give one to the top-stalled group.
- std::string top_group_name;
- Group* top_group = NULL;
- last_stalled_group_count_ = FindTopStalledGroup(&top_group, &top_group_name);
- if (!last_stalled_group_count_)
- return;
+ // If there are no more releasing sockets, then we might have to process
+ // multiple available socket slots, since we stalled their processing until
+ // all sockets have been released. Note that ProcessPendingRequest() will
+ // invoke user callbacks, so |num_releasing_sockets_| may change.
+ //
+ // This code has been known to infinite loop. Set a counter and CHECK to make
+ // sure it doesn't get ridiculously high.
+
+ int iterations = 0;
+ while (num_releasing_sockets_ == 0) {
+ CHECK_LT(iterations, 1000) << "Probably stuck in an infinite loop.";
+ std::string top_group_name;
+ Group* top_group = NULL;
+ int stalled_group_count = FindTopStalledGroup(&top_group, &top_group_name);
+ if (stalled_group_count >= 1) {
+ if (ReachedMaxSocketsLimit()) {
+ if (idle_socket_count() > 0) {
+ CloseOneIdleSocket();
+ } else {
+ // We can't activate more sockets since we're already at our global
+ // limit.
+ may_have_stalled_group_ = true;
+ return;
+ }
+ }
- if (ReachedMaxSocketsLimit()) {
- if (idle_socket_count() > 0) {
- CloseOneIdleSocket();
+ ProcessPendingRequest(top_group_name, top_group);
} else {
- // We can't activate more sockets since we're already at our global
- // limit.
+ may_have_stalled_group_ = false;
return;
}
- }
- // Note: we don't loop on waking stalled groups. If the stalled group is at
- // its limit, may be left with other stalled groups that could be
- // waken. This isn't optimal, but there is no starvation, so to avoid
- // the looping we leave it at this.
- OnAvailableSocketSlot(top_group_name, false);
-}
-
-bool ClientSocketPoolBaseHelper::MayHaveStalledGroups() {
- return last_stalled_group_count_ > 0 || ReachedMaxSocketsLimit();
+ iterations++;
+ }
}
// Search for the highest priority pending request, amongst the groups that
@@ -600,7 +620,7 @@ void ClientSocketPoolBaseHelper::OnConnectJobComplete(
r->callback()->Run(result);
} else {
AddIdleSocket(socket.release(), false /* unused socket */, &group);
- OnAvailableSocketSlot(group_name, MayHaveStalledGroups());
+ OnAvailableSocketSlot(group_name, &group);
}
} else {
DCHECK(!socket.get());
@@ -612,7 +632,7 @@ void ClientSocketPoolBaseHelper::OnConnectJobComplete(
new NetLogIntegerParameter("net_error", result));
r->callback()->Run(result);
}
- OnAvailableSocketSlot(group_name, MayHaveStalledGroups());
+ MaybeOnAvailableSocketSlot(group_name);
}
}
@@ -631,57 +651,66 @@ void ClientSocketPoolBaseHelper::RemoveConnectJob(const ConnectJob *job,
CHECK_GT(connecting_socket_count_, 0);
connecting_socket_count_--;
+ DCHECK(job);
+ delete job;
+
if (group) {
DCHECK(ContainsKey(group->jobs, job));
group->jobs.erase(job);
}
+}
- DCHECK(job);
- delete job;
+void ClientSocketPoolBaseHelper::MaybeOnAvailableSocketSlot(
+ const std::string& group_name) {
+ GroupMap::iterator it = group_map_.find(group_name);
+ if (it != group_map_.end()) {
+ Group& group = it->second;
+ if (group.HasAvailableSocketSlot(max_sockets_per_group_))
+ OnAvailableSocketSlot(group_name, &group);
+ }
}
void ClientSocketPoolBaseHelper::OnAvailableSocketSlot(
- const std::string& group_name, bool was_at_socket_limit) {
- // Go back to the message loop before processing the request wakeup
- // so that we don't get recursive and lengthy stacks.
- MessageLoop::current()->PostTask(FROM_HERE,
- NewRunnableMethod(
- this,
- &ClientSocketPoolBaseHelper::ProcessPendingRequest,
- group_name,
- was_at_socket_limit));
+ const std::string& group_name, Group* group) {
+ if (may_have_stalled_group_) {
+ std::string top_group_name;
+ Group* top_group = NULL;
+ int stalled_group_count = FindTopStalledGroup(&top_group, &top_group_name);
+ if (stalled_group_count == 0 ||
+ (stalled_group_count == 1 && top_group->num_releasing_sockets == 0)) {
+ may_have_stalled_group_ = false;
+ }
+ if (stalled_group_count >= 1)
+ ProcessPendingRequest(top_group_name, top_group);
+ } else if (!group->pending_requests.empty()) {
+ ProcessPendingRequest(group_name, group);
+ // |group| may no longer be valid after this point. Be careful not to
+ // access it again.
+ } else if (group->IsEmpty()) {
+ // Delete |group| if no longer needed. |group| will no longer be valid.
+ group_map_.erase(group_name);
+ }
}
void ClientSocketPoolBaseHelper::ProcessPendingRequest(
- const std::string& group_name, bool was_at_socket_limit) {
- GroupMap::iterator it = group_map_.find(group_name);
- if (it != group_map_.end()) {
- Group& group = it->second;
- if (!group.pending_requests.empty()) {
- int rv = RequestSocketInternal(group_name,
- *group.pending_requests.begin());
- if (rv != ERR_IO_PENDING) {
- scoped_ptr<const Request> request(RemoveRequestFromQueue(
- group.pending_requests.begin(), &group.pending_requests));
-
- scoped_refptr<NetLog::EventParameters> params;
- if (rv != OK)
- params = new NetLogIntegerParameter("net_error", rv);
- request->net_log().EndEvent(NetLog::TYPE_SOCKET_POOL, params);
- request->callback()->Run(rv);
- }
+ const std::string& group_name, Group* group) {
+ int rv = RequestSocketInternal(group_name, *group->pending_requests.begin());
- // |group| may no longer be valid after this point. Be careful not to
- // access it again.
- if (group.IsEmpty()) {
- // Delete |group| if no longer needed. |group| will no longer be valid.
- group_map_.erase(group_name);
- }
+ if (rv != ERR_IO_PENDING) {
+ scoped_ptr<const Request> r(RemoveRequestFromQueue(
+ group->pending_requests.begin(), &group->pending_requests));
+
+ scoped_refptr<NetLog::EventParameters> params;
+ if (rv != OK)
+ params = new NetLogIntegerParameter("net_error", rv);
+ r->net_log().EndEvent(NetLog::TYPE_SOCKET_POOL, params);
+ r->callback()->Run(rv);
+ if (rv != OK) {
+ // |group| may be invalid after the callback, we need to search
+ // |group_map_| again.
+ MaybeOnAvailableSocketSlot(group_name);
}
}
-
- if (was_at_socket_limit)
- CheckForStalledSocketGroups();
}
void ClientSocketPoolBaseHelper::HandOutSocket(
diff --git a/net/socket/client_socket_pool_base.h b/net/socket/client_socket_pool_base.h
index 6e27969..deb405d 100644
--- a/net/socket/client_socket_pool_base.h
+++ b/net/socket/client_socket_pool_base.h
@@ -221,6 +221,9 @@ class ClientSocketPoolBaseHelper
// NetworkChangeNotifier::Observer methods:
virtual void OnIPAddressChanged();
+ // For testing.
+ bool may_have_stalled_group() const { return may_have_stalled_group_; }
+
int NumConnectJobsInGroup(const std::string& group_name) const {
return group_map_.find(group_name)->second.jobs.size();
}
@@ -263,10 +266,16 @@ class ClientSocketPoolBaseHelper
// A Group is allocated per group_name when there are idle sockets or pending
// requests. Otherwise, the Group object is removed from the map.
- // |active_socket_count| tracks the number of sockets held by clients.
+ // |active_socket_count| tracks the number of sockets held by clients. Of
+ // this number of sockets held by clients, some of them may be released soon,
+ // since ReleaseSocket() was called of them, but the DoReleaseSocket() task
+ // has not run yet for them. |num_releasing_sockets| tracks these values,
+ // which is useful for not starting up new ConnectJobs when sockets may
+ // become available really soon.
struct Group {
Group()
: active_socket_count(0),
+ num_releasing_sockets(0),
backup_job(NULL),
backup_task(NULL) {
}
@@ -285,6 +294,10 @@ class ClientSocketPoolBaseHelper
max_sockets_per_group;
}
+ bool HasReleasingSockets() const {
+ return num_releasing_sockets > 0;
+ }
+
RequestPriority TopPendingPriority() const {
return pending_requests.front()->priority();
}
@@ -304,6 +317,8 @@ class ClientSocketPoolBaseHelper
std::set<const ConnectJob*> jobs;
RequestQueue pending_requests;
int active_socket_count; // number of active sockets used by clients
+ // Number of sockets being released within one loop through the MessageLoop.
+ int num_releasing_sockets;
// A backup job in case the connect for this group takes too long.
ConnectJob* backup_job;
CancelableTask* backup_task;
@@ -322,6 +337,10 @@ class ClientSocketPoolBaseHelper
void IncrementIdleCount();
void DecrementIdleCount();
+ // Called via PostTask by ReleaseSocket.
+ void DoReleaseSocket(
+ const std::string& group_name, ClientSocket* socket, int id);
+
// Scans the group map for groups which have an available socket slot and
// at least one pending request. Returns number of groups found, and if found
// at least one, fills |group| and |group_name| with data of the stalled group
@@ -337,17 +356,15 @@ class ClientSocketPoolBaseHelper
// Removes |job| from |connect_job_set_|. Also updates |group| if non-NULL.
void RemoveConnectJob(const ConnectJob* job, Group* group);
+ // Same as OnAvailableSocketSlot except it looks up the Group first to see if
+ // it's there.
+ void MaybeOnAvailableSocketSlot(const std::string& group_name);
+
// Might delete the Group from |group_map_|.
- // If |was_at_socket_limit|, will also check for idle sockets to assign
- // to any stalled groups.
- void OnAvailableSocketSlot(const std::string& group_name,
- bool was_at_socket_limit);
+ void OnAvailableSocketSlot(const std::string& group_name, Group* group);
- // Process a pending socket request for a group.
- // If |was_at_socket_limit|, will also check for idle sockets to assign
- // to any stalled groups.
- void ProcessPendingRequest(const std::string& group_name,
- bool was_at_socket_limit);
+ // Process a request from a group's pending_requests queue.
+ void ProcessPendingRequest(const std::string& group_name, Group* group);
// Assigns |socket| to |handle| and updates |group|'s counters appropriately.
void HandOutSocket(ClientSocket* socket,
@@ -367,6 +384,7 @@ class ClientSocketPoolBaseHelper
void CancelAllConnectJobs();
// Returns true if we can't create any more sockets due to the total limit.
+ // TODO(phajdan.jr): Also take idle sockets into account.
bool ReachedMaxSocketsLimit() const;
// This is the internal implementation of RequestSocket(). It differs in that
@@ -375,10 +393,6 @@ class ClientSocketPoolBaseHelper
int RequestSocketInternal(const std::string& group_name,
const Request* request);
- // Assigns an idle socket for the group to the request.
- // Returns |true| if an idle socket is available, false otherwise.
- bool AssignIdleSocketToGroup(Group* group, const Request* request);
-
static void LogBoundConnectJobToRequest(
const NetLog::Source& connect_job_source, const Request* request);
@@ -395,13 +409,6 @@ class ClientSocketPoolBaseHelper
// I'm not sure if we hit this situation often.
void CloseOneIdleSocket();
- // Checks if there are stalled socket groups that should be notified
- // for possible wakeup.
- void CheckForStalledSocketGroups();
-
- // Returns true if we might have a stalled group.
- bool MayHaveStalledGroups();
-
GroupMap group_map_;
// Timer used to periodically prune idle sockets that timed out or can't be
@@ -417,6 +424,9 @@ class ClientSocketPoolBaseHelper
// Number of connected sockets we handed out across all groups.
int handed_out_socket_count_;
+ // Number of sockets being released.
+ int num_releasing_sockets_;
+
// The maximum total number of sockets. See ReachedMaxSocketsLimit.
const int max_sockets_;
@@ -427,6 +437,26 @@ class ClientSocketPoolBaseHelper
const base::TimeDelta unused_idle_socket_timeout_;
const base::TimeDelta used_idle_socket_timeout_;
+ // Until the maximum number of sockets limit is reached, a group can only
+ // have pending requests if it exceeds the "max sockets per group" limit.
+ //
+ // This means when a socket is released, the only pending requests that can
+ // be started next belong to the same group.
+ //
+ // However once the |max_sockets_| limit is reached, this stops being true:
+ // groups can now have pending requests without having first reached the
+ // |max_sockets_per_group_| limit. So choosing the next request involves
+ // selecting the highest priority request across *all* groups.
+ //
+ // |may_have_stalled_group_| is not conclusive, since when we cancel pending
+ // requests, we may reach the situation where we have the maximum number of
+ // sockets, but no request is stalled because of the global socket limit
+ // (although some requests may be blocked on the socket per group limit).
+ // We don't strictly maintain |may_have_stalled_group_|, since that would
+ // require a linear search through all groups in |group_map_| to see if one
+ // of them is stalled.
+ bool may_have_stalled_group_;
+
const scoped_ptr<ConnectJobFactory> connect_job_factory_;
// TODO(vandebo) Remove when backup jobs move to TCPClientSocketPool
@@ -439,9 +469,6 @@ class ClientSocketPoolBaseHelper
// pool. This is so that when sockets get released back to the pool, we can
// make sure that they are discarded rather than reused.
int pool_generation_number_;
-
- // The count of stalled groups the last time we checked.
- int last_stalled_group_count_;
};
} // namespace internal
@@ -548,6 +575,11 @@ class ClientSocketPoolBase {
return helper_->OnConnectJobComplete(result, job);
}
+ // For testing.
+ bool may_have_stalled_group() const {
+ return helper_->may_have_stalled_group();
+ }
+
int NumConnectJobsInGroup(const std::string& group_name) const {
return helper_->NumConnectJobsInGroup(group_name);
}
@@ -604,10 +636,11 @@ class ClientSocketPoolBase {
// Histograms for the pool
const scoped_refptr<ClientSocketPoolHistograms> histograms_;
- // The reason for reference counting here is because the operations on
- // the ClientSocketPoolBaseHelper which release sockets can cause the
- // ClientSocketPoolBase<T> reference to drop to zero. While we're deep
- // in cleanup code, we'll often hold a reference to |self|.
+ // One might ask why ClientSocketPoolBaseHelper is also refcounted if its
+ // containing ClientSocketPool is already refcounted. The reason is because
+ // DoReleaseSocket() posts a task. If ClientSocketPool gets deleted between
+ // the posting of the task and the execution, then we'll hit the DCHECK that
+ // |ClientSocketPoolBaseHelper::group_map_| is empty.
scoped_refptr<internal::ClientSocketPoolBaseHelper> helper_;
DISALLOW_COPY_AND_ASSIGN(ClientSocketPoolBase);
diff --git a/net/socket/client_socket_pool_base_unittest.cc b/net/socket/client_socket_pool_base_unittest.cc
index 3e02af7..7b84d94 100644
--- a/net/socket/client_socket_pool_base_unittest.cc
+++ b/net/socket/client_socket_pool_base_unittest.cc
@@ -772,6 +772,58 @@ TEST_F(ClientSocketPoolBaseTest, TotalLimitCountsConnectingSockets) {
EXPECT_EQ(kIndexOutOfBounds, GetOrderOfRequest(6));
}
+// Inside ClientSocketPoolBase we have a may_have_stalled_group flag,
+// which tells it to use more expensive, but accurate, group selection
+// algorithm. Make sure it doesn't get stuck in the "on" state.
+TEST_F(ClientSocketPoolBaseTest, MayHaveStalledGroupReset) {
+ CreatePool(kDefaultMaxSockets, kDefaultMaxSocketsPerGroup);
+
+ EXPECT_FALSE(pool_->base()->may_have_stalled_group());
+
+ // Reach group socket limit.
+ EXPECT_EQ(OK, StartRequest("a", kDefaultPriority));
+ EXPECT_EQ(OK, StartRequest("a", kDefaultPriority));
+ EXPECT_FALSE(pool_->base()->may_have_stalled_group());
+
+ // Reach total limit, but don't request more sockets.
+ EXPECT_EQ(OK, StartRequest("b", kDefaultPriority));
+ EXPECT_EQ(OK, StartRequest("b", kDefaultPriority));
+ EXPECT_FALSE(pool_->base()->may_have_stalled_group());
+
+ // Request one more socket while we are at the maximum sockets limit.
+ // This should flip the may_have_stalled_group flag.
+ EXPECT_EQ(ERR_IO_PENDING, StartRequest("c", kDefaultPriority));
+ EXPECT_TRUE(pool_->base()->may_have_stalled_group());
+
+ // After releasing first connection for "a", we're still at the
+ // maximum sockets limit, but every group's pending queue is empty,
+ // so we reset the flag.
+ EXPECT_TRUE(ReleaseOneConnection(NO_KEEP_ALIVE));
+ EXPECT_FALSE(pool_->base()->may_have_stalled_group());
+
+ // Requesting additional socket while at the total limit should
+ // flip the flag back to "on".
+ EXPECT_EQ(ERR_IO_PENDING, StartRequest("c", kDefaultPriority));
+ EXPECT_TRUE(pool_->base()->may_have_stalled_group());
+
+ // We'll request one more socket to verify that we don't reset the flag
+ // too eagerly.
+ EXPECT_EQ(ERR_IO_PENDING, StartRequest("d", kDefaultPriority));
+ EXPECT_TRUE(pool_->base()->may_have_stalled_group());
+
+ // We're at the maximum socket limit, and still have one request pending
+ // for "d". Flag should be "on".
+ EXPECT_TRUE(ReleaseOneConnection(NO_KEEP_ALIVE));
+ EXPECT_TRUE(pool_->base()->may_have_stalled_group());
+
+ // Now every group's pending queue should be empty again.
+ EXPECT_TRUE(ReleaseOneConnection(NO_KEEP_ALIVE));
+ EXPECT_FALSE(pool_->base()->may_have_stalled_group());
+
+ ReleaseAllConnections(NO_KEEP_ALIVE);
+ EXPECT_FALSE(pool_->base()->may_have_stalled_group());
+}
+
TEST_F(ClientSocketPoolBaseTest, CorrectlyCountStalledGroups) {
CreatePool(kDefaultMaxSockets, kDefaultMaxSockets);
connect_job_factory_->set_job_type(TestConnectJob::kMockJob);
@@ -824,117 +876,56 @@ TEST_F(ClientSocketPoolBaseTest, StallAndThenCancelAndTriggerAvailableSocket) {
handles[i].Reset();
}
-TEST_F(ClientSocketPoolBaseTest, CancelStalledSocketAtSocketLimit) {
+TEST_F(ClientSocketPoolBaseTest, CloseIdleSocketAtSocketLimit) {
CreatePool(kDefaultMaxSockets, kDefaultMaxSocketsPerGroup);
connect_job_factory_->set_job_type(TestConnectJob::kMockJob);
- {
- ClientSocketHandle handles[kDefaultMaxSockets];
- TestCompletionCallback callbacks[kDefaultMaxSockets];
- for (int i = 0; i < kDefaultMaxSockets; ++i) {
- EXPECT_EQ(OK,
- InitHandle(&handles[i], IntToString(i), kDefaultPriority,
- &callbacks[i], pool_, BoundNetLog()));
- }
-
- // Force a stalled group.
- ClientSocketHandle stalled_handle;
+ for (int i = 0; i < kDefaultMaxSockets; ++i) {
+ ClientSocketHandle handle;
TestCompletionCallback callback;
- EXPECT_EQ(ERR_IO_PENDING,
- InitHandle(&stalled_handle, "foo", kDefaultPriority, &callback,
+ EXPECT_EQ(OK,
+ InitHandle(&handle, IntToString(i), kDefaultPriority, &callback,
pool_, BoundNetLog()));
+ }
- // Cancel the stalled request.
- stalled_handle.Reset();
+ // Stall a group
+ ClientSocketHandle handle;
+ TestCompletionCallback callback;
+ EXPECT_EQ(ERR_IO_PENDING,
+ InitHandle(&handle, "foo", kDefaultPriority, &callback, pool_,
+ BoundNetLog()));
- EXPECT_EQ(kDefaultMaxSockets, client_socket_factory_.allocation_count());
- EXPECT_EQ(0, pool_->IdleSocketCount());
+ // Cancel the stalled request.
+ handle.Reset();
- // Dropping out of scope will close all handles and return them to idle.
- }
+ // Flush all the DoReleaseSocket tasks.
+ MessageLoop::current()->RunAllPending();
EXPECT_EQ(kDefaultMaxSockets, client_socket_factory_.allocation_count());
EXPECT_EQ(kDefaultMaxSockets, pool_->IdleSocketCount());
-}
-
-TEST_F(ClientSocketPoolBaseTest, CancelPendingSocketAtSocketLimit) {
- CreatePool(kDefaultMaxSockets, kDefaultMaxSocketsPerGroup);
- connect_job_factory_->set_job_type(TestConnectJob::kMockWaitingJob);
- {
- ClientSocketHandle handles[kDefaultMaxSockets];
- for (int i = 0; i < kDefaultMaxSockets; ++i) {
- TestCompletionCallback callback;
- EXPECT_EQ(ERR_IO_PENDING,
- InitHandle(&handles[i], IntToString(i), kDefaultPriority,
- &callback, pool_, BoundNetLog()));
- }
-
- // Force a stalled group.
- connect_job_factory_->set_job_type(TestConnectJob::kMockPendingJob);
- ClientSocketHandle stalled_handle;
+ for (int i = 0; i < kDefaultMaxSockets; ++i) {
+ ClientSocketHandle handle;
TestCompletionCallback callback;
- EXPECT_EQ(ERR_IO_PENDING,
- InitHandle(&stalled_handle, "foo", kDefaultPriority, &callback,
- pool_, BoundNetLog()));
-
- // Since it is stalled, it should have no connect jobs.
- EXPECT_EQ(0, pool_->NumConnectJobsInGroup("foo"));
-
- // Cancel the stalled request.
- handles[0].Reset();
-
- MessageLoop::current()->RunAllPending();
-
- // Now we should have a connect job.
- EXPECT_EQ(1, pool_->NumConnectJobsInGroup("foo"));
-
- // The stalled socket should connect.
- EXPECT_EQ(OK, callback.WaitForResult());
-
- EXPECT_EQ(kDefaultMaxSockets + 1,
- client_socket_factory_.allocation_count());
- EXPECT_EQ(0, pool_->IdleSocketCount());
- EXPECT_EQ(0, pool_->NumConnectJobsInGroup("foo"));
-
- // Dropping out of scope will close all handles and return them to idle.
+ EXPECT_EQ(OK,
+ InitHandle(&handle, StringPrintf("Take 2: %d", i),
+ kDefaultPriority, &callback, pool_, BoundNetLog()));
}
- EXPECT_EQ(1, pool_->IdleSocketCount());
-}
-
-TEST_F(ClientSocketPoolBaseTest, WaitForStalledSocketAtSocketLimit) {
- CreatePool(kDefaultMaxSockets, kDefaultMaxSocketsPerGroup);
- connect_job_factory_->set_job_type(TestConnectJob::kMockJob);
-
- ClientSocketHandle stalled_handle;
- TestCompletionCallback callback;
- {
- ClientSocketHandle handles[kDefaultMaxSockets];
- for (int i = 0; i < kDefaultMaxSockets; ++i) {
- TestCompletionCallback callback;
- EXPECT_EQ(OK,
- InitHandle(&handles[i], StringPrintf("Take 2: %d", i),
- kDefaultPriority, &callback, pool_, BoundNetLog()));
- }
-
- EXPECT_EQ(kDefaultMaxSockets, client_socket_factory_.allocation_count());
- EXPECT_EQ(0, pool_->IdleSocketCount());
+ EXPECT_EQ(2 * kDefaultMaxSockets, client_socket_factory_.allocation_count());
+ EXPECT_EQ(0, pool_->IdleSocketCount());
- // Now we will hit the socket limit.
- EXPECT_EQ(ERR_IO_PENDING,
- InitHandle(&stalled_handle, "foo", kDefaultPriority, &callback,
- pool_, BoundNetLog()));
+ // Before the next round of DoReleaseSocket tasks run, we will hit the
+ // socket limit.
- // Dropping out of scope will close all handles and return them to idle.
- }
+ EXPECT_EQ(ERR_IO_PENDING,
+ InitHandle(&handle, "foo", kDefaultPriority, &callback, pool_,
+ BoundNetLog()));
// But if we wait for it, the released idle sockets will be closed in
// preference of the waiting request.
- EXPECT_EQ(OK, callback.WaitForResult());
- EXPECT_EQ(kDefaultMaxSockets + 1, client_socket_factory_.allocation_count());
- EXPECT_EQ(3, pool_->IdleSocketCount());
+ EXPECT_EQ(OK, callback.WaitForResult());
}
// Regression test for http://crbug.com/40952.
@@ -1288,11 +1279,11 @@ TEST_F(ClientSocketPoolBaseTest, GroupWithPendingRequestsIsNotEmpty) {
// Create a stalled group with high priorities.
EXPECT_EQ(ERR_IO_PENDING, StartRequest("c", kHighPriority));
EXPECT_EQ(ERR_IO_PENDING, StartRequest("c", kHighPriority));
+ EXPECT_TRUE(pool_->base()->may_have_stalled_group());
- // Release the first two sockets from "a". Because this is a keepalive,
- // the first release will unblock the pending request for "a". The
- // second release will unblock a request for "c", becaue it is the next
- // high priority socket.
+ // Release the first two sockets from "a", which will make room
+ // for requests from "c". After that "a" will have no active sockets
+ // and one pending request.
EXPECT_TRUE(ReleaseOneConnection(KEEP_ALIVE));
EXPECT_TRUE(ReleaseOneConnection(KEEP_ALIVE));
@@ -1300,8 +1291,6 @@ TEST_F(ClientSocketPoolBaseTest, GroupWithPendingRequestsIsNotEmpty) {
// we were hitting a CHECK here.
EXPECT_EQ(0, pool_->IdleSocketCountInGroup("a"));
pool_->CloseIdleSockets();
-
- MessageLoop::current()->RunAllPending(); // Run the released socket wakeups
}
TEST_F(ClientSocketPoolBaseTest, BasicAsynchronous) {
@@ -1402,7 +1391,7 @@ TEST_F(ClientSocketPoolBaseTest, CancelRequestLimitsJobs) {
EXPECT_EQ(kDefaultMaxSocketsPerGroup, pool_->NumConnectJobsInGroup("a"));
requests_[0]->handle()->Reset();
- EXPECT_EQ(kDefaultMaxSocketsPerGroup, pool_->NumConnectJobsInGroup("a"));
+ EXPECT_EQ(kDefaultMaxSocketsPerGroup - 1, pool_->NumConnectJobsInGroup("a"));
}
// When requests and ConnectJobs are not coupled, the request will get serviced
@@ -1435,7 +1424,7 @@ TEST_F(ClientSocketPoolBaseTest, ReleaseSockets) {
// Both Requests 2 and 3 are pending. We release socket 1 which should
// service request 2. Request 3 should still be waiting.
req1.handle()->Reset();
- MessageLoop::current()->RunAllPending(); // Run the released socket wakeups
+ MessageLoop::current()->RunAllPending(); // Run the DoReleaseSocket()
ASSERT_TRUE(req2.handle()->socket());
EXPECT_EQ(OK, req2.WaitForResult());
EXPECT_FALSE(req3.handle()->socket());
@@ -1721,6 +1710,31 @@ class TestReleasingSocketRequest : public CallbackRunner< Tuple1<int> > {
TestCompletionCallback callback2_;
};
+// This test covers the case where, within the same DoReleaseSocket() callback,
+// we release the just acquired socket and start up a new request. See bug
+// 36871 for details.
+TEST_F(ClientSocketPoolBaseTest, ReleasedSocketReleasesToo) {
+ CreatePool(kDefaultMaxSockets, kDefaultMaxSocketsPerGroup);
+
+ connect_job_factory_->set_job_type(TestConnectJob::kMockJob);
+
+ // Complete one request and release the socket.
+ ClientSocketHandle handle;
+ TestCompletionCallback callback;
+ EXPECT_EQ(OK, InitHandle(&handle, "a", kDefaultPriority, &callback, pool_,
+ BoundNetLog()));
+ handle.Reset();
+
+ // Before the DoReleaseSocket() task has run, start up a
+ // TestReleasingSocketRequest. This one will be ERR_IO_PENDING since
+ // num_releasing_sockets > 0 and there was no idle socket to use yet.
+ TestReleasingSocketRequest request(pool_.get());
+ EXPECT_EQ(ERR_IO_PENDING, InitHandle(request.handle(), "a", kDefaultPriority,
+ &request, pool_, BoundNetLog()));
+
+ EXPECT_EQ(OK, request.WaitForResult());
+}
+
// http://crbug.com/44724 regression test.
// We start releasing the pool when we flush on network change. When that
// happens, the only active references are in the ClientSocketHandles. When a
@@ -1770,160 +1784,6 @@ TEST_F(ClientSocketPoolBaseTest, DoNotReuseSocketAfterFlush) {
EXPECT_EQ(ClientSocketHandle::UNUSED, handle.reuse_type());
}
-// Test delayed socket binding for the case where we have two connects,
-// and while one is waiting on a connect, the other frees up.
-// The socket waiting on a connect should switch immediately to the freed
-// up socket.
-TEST_F(ClientSocketPoolBaseTest, DelayedSocketBindingWaitingForConnect) {
- CreatePool(kDefaultMaxSockets, kDefaultMaxSocketsPerGroup);
- connect_job_factory_->set_job_type(TestConnectJob::kMockPendingJob);
-
- ClientSocketHandle handle1;
- TestCompletionCallback callback;
- EXPECT_EQ(ERR_IO_PENDING,
- InitHandle(&handle1, "a", kDefaultPriority,
- &callback, pool_, BoundNetLog()));
- EXPECT_EQ(OK, callback.WaitForResult());
-
- // No idle sockets, no pending jobs.
- EXPECT_EQ(0, pool_->IdleSocketCount());
- EXPECT_EQ(0, pool_->NumConnectJobsInGroup("a"));
-
- // Create a second socket to the same host, but this one will wait.
- connect_job_factory_->set_job_type(TestConnectJob::kMockWaitingJob);
- ClientSocketHandle handle2;
- EXPECT_EQ(ERR_IO_PENDING,
- InitHandle(&handle2, "a", kDefaultPriority,
- &callback, pool_, BoundNetLog()));
- // No idle sockets, and one connecting job.
- EXPECT_EQ(0, pool_->IdleSocketCount());
- EXPECT_EQ(1, pool_->NumConnectJobsInGroup("a"));
-
- // Return the first handle to the pool. This will initiate the delayed
- // binding.
- handle1.Reset();
-
- MessageLoop::current()->RunAllPending();
-
- // Still no idle sockets, still one pending connect job.
- EXPECT_EQ(0, pool_->IdleSocketCount());
- EXPECT_EQ(1, pool_->NumConnectJobsInGroup("a"));
-
- // The second socket connected, even though it was a Waiting Job.
- EXPECT_EQ(OK, callback.WaitForResult());
-
- // And we can see there is still one job waiting.
- EXPECT_EQ(1, pool_->NumConnectJobsInGroup("a"));
-
- // Finally, signal the waiting Connect.
- client_socket_factory_.SignalJobs();
- EXPECT_EQ(0, pool_->NumConnectJobsInGroup("a"));
-
- MessageLoop::current()->RunAllPending();
-}
-
-// Test delayed socket binding when a group is at capacity and one
-// of the group's sockets frees up.
-TEST_F(ClientSocketPoolBaseTest, DelayedSocketBindingAtGroupCapacity) {
- CreatePool(kDefaultMaxSockets, kDefaultMaxSocketsPerGroup);
- connect_job_factory_->set_job_type(TestConnectJob::kMockPendingJob);
-
- ClientSocketHandle handle1;
- TestCompletionCallback callback;
- EXPECT_EQ(ERR_IO_PENDING,
- InitHandle(&handle1, "a", kDefaultPriority,
- &callback, pool_, BoundNetLog()));
- EXPECT_EQ(OK, callback.WaitForResult());
-
- // No idle sockets, no pending jobs.
- EXPECT_EQ(0, pool_->IdleSocketCount());
- EXPECT_EQ(0, pool_->NumConnectJobsInGroup("a"));
-
- // Create a second socket to the same host, but this one will wait.
- connect_job_factory_->set_job_type(TestConnectJob::kMockWaitingJob);
- ClientSocketHandle handle2;
- EXPECT_EQ(ERR_IO_PENDING,
- InitHandle(&handle2, "a", kDefaultPriority,
- &callback, pool_, BoundNetLog()));
- // No idle sockets, and one connecting job.
- EXPECT_EQ(0, pool_->IdleSocketCount());
- EXPECT_EQ(1, pool_->NumConnectJobsInGroup("a"));
-
- // Return the first handle to the pool. This will initiate the delayed
- // binding.
- handle1.Reset();
-
- MessageLoop::current()->RunAllPending();
-
- // Still no idle sockets, still one pending connect job.
- EXPECT_EQ(0, pool_->IdleSocketCount());
- EXPECT_EQ(1, pool_->NumConnectJobsInGroup("a"));
-
- // The second socket connected, even though it was a Waiting Job.
- EXPECT_EQ(OK, callback.WaitForResult());
-
- // And we can see there is still one job waiting.
- EXPECT_EQ(1, pool_->NumConnectJobsInGroup("a"));
-
- // Finally, signal the waiting Connect.
- client_socket_factory_.SignalJobs();
- EXPECT_EQ(0, pool_->NumConnectJobsInGroup("a"));
-
- MessageLoop::current()->RunAllPending();
-}
-
-// Test out the case where we have one socket connected, one
-// connecting, when the first socket finishes and goes idle.
-// Although the second connection is pending, th second request
-// should complete, by taking the first socket's idle socket.
-TEST_F(ClientSocketPoolBaseTest, DelayedSocketBindingAtStall) {
- CreatePool(kDefaultMaxSockets, kDefaultMaxSocketsPerGroup);
- connect_job_factory_->set_job_type(TestConnectJob::kMockPendingJob);
-
- ClientSocketHandle handle1;
- TestCompletionCallback callback;
- EXPECT_EQ(ERR_IO_PENDING,
- InitHandle(&handle1, "a", kDefaultPriority,
- &callback, pool_, BoundNetLog()));
- EXPECT_EQ(OK, callback.WaitForResult());
-
- // No idle sockets, no pending jobs.
- EXPECT_EQ(0, pool_->IdleSocketCount());
- EXPECT_EQ(0, pool_->NumConnectJobsInGroup("a"));
-
- // Create a second socket to the same host, but this one will wait.
- connect_job_factory_->set_job_type(TestConnectJob::kMockWaitingJob);
- ClientSocketHandle handle2;
- EXPECT_EQ(ERR_IO_PENDING,
- InitHandle(&handle2, "a", kDefaultPriority,
- &callback, pool_, BoundNetLog()));
- // No idle sockets, and one connecting job.
- EXPECT_EQ(0, pool_->IdleSocketCount());
- EXPECT_EQ(1, pool_->NumConnectJobsInGroup("a"));
-
- // Return the first handle to the pool. This will initiate the delayed
- // binding.
- handle1.Reset();
-
- MessageLoop::current()->RunAllPending();
-
- // Still no idle sockets, still one pending connect job.
- EXPECT_EQ(0, pool_->IdleSocketCount());
- EXPECT_EQ(1, pool_->NumConnectJobsInGroup("a"));
-
- // The second socket connected, even though it was a Waiting Job.
- EXPECT_EQ(OK, callback.WaitForResult());
-
- // And we can see there is still one job waiting.
- EXPECT_EQ(1, pool_->NumConnectJobsInGroup("a"));
-
- // Finally, signal the waiting Connect.
- client_socket_factory_.SignalJobs();
- EXPECT_EQ(0, pool_->NumConnectJobsInGroup("a"));
-
- MessageLoop::current()->RunAllPending();
-}
-
} // namespace
} // namespace net
diff --git a/net/socket/socks_client_socket_pool_unittest.cc b/net/socket/socks_client_socket_pool_unittest.cc
index 7bfce55..fb83d96 100644
--- a/net/socket/socks_client_socket_pool_unittest.cc
+++ b/net/socket/socks_client_socket_pool_unittest.cc
@@ -319,16 +319,15 @@ TEST_F(SOCKSClientSocketPoolTest, CancelDuringTCPConnect) {
pool_->CancelRequest("a", requests_[0]->handle());
pool_->CancelRequest("a", requests_[1]->handle());
- // Requests in the connect phase don't actually get cancelled.
- EXPECT_EQ(0, tcp_socket_pool_->cancel_count());
+ EXPECT_EQ(1, tcp_socket_pool_->cancel_count());
// Now wait for the TCP sockets to connect.
MessageLoop::current()->RunAllPending();
EXPECT_EQ(kRequestNotFound, GetOrderOfRequest(1));
EXPECT_EQ(kRequestNotFound, GetOrderOfRequest(2));
- EXPECT_EQ(0, tcp_socket_pool_->cancel_count());
- EXPECT_EQ(2, pool_->IdleSocketCount());
+ EXPECT_EQ(1, tcp_socket_pool_->cancel_count());
+ EXPECT_EQ(1, pool_->IdleSocketCount());
requests_[0]->handle()->Reset();
requests_[1]->handle()->Reset();
@@ -355,8 +354,7 @@ TEST_F(SOCKSClientSocketPoolTest, CancelDuringSOCKSConnect) {
pool_->CancelRequest("a", requests_[0]->handle());
pool_->CancelRequest("a", requests_[1]->handle());
EXPECT_EQ(0, tcp_socket_pool_->cancel_count());
- // Requests in the connect phase don't actually get cancelled.
- EXPECT_EQ(0, tcp_socket_pool_->release_count());
+ EXPECT_EQ(1, tcp_socket_pool_->release_count());
// Now wait for the async data to reach the SOCKS connect jobs.
MessageLoop::current()->RunAllPending();
@@ -364,8 +362,8 @@ TEST_F(SOCKSClientSocketPoolTest, CancelDuringSOCKSConnect) {
EXPECT_EQ(kRequestNotFound, GetOrderOfRequest(1));
EXPECT_EQ(kRequestNotFound, GetOrderOfRequest(2));
EXPECT_EQ(0, tcp_socket_pool_->cancel_count());
- EXPECT_EQ(0, tcp_socket_pool_->release_count());
- EXPECT_EQ(2, pool_->IdleSocketCount());
+ EXPECT_EQ(1, tcp_socket_pool_->release_count());
+ EXPECT_EQ(1, pool_->IdleSocketCount());
requests_[0]->handle()->Reset();
requests_[1]->handle()->Reset();