summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorwillchan@chromium.org <willchan@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-04-07 21:29:49 +0000
committerwillchan@chromium.org <willchan@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-04-07 21:29:49 +0000
commitd43002e2c5da97a28eecbd552e4f4ec027a19179 (patch)
tree566c14f12c05bd77970fb52e0a8cdacdcdaa3ec0 /net
parent21afbe283b2c6cf3d76310492165fa6b10eff791 (diff)
downloadchromium_src-d43002e2c5da97a28eecbd552e4f4ec027a19179.zip
chromium_src-d43002e2c5da97a28eecbd552e4f4ec027a19179.tar.gz
chromium_src-d43002e2c5da97a28eecbd552e4f4ec027a19179.tar.bz2
Fix ClientSocketPool to count idle sockets toward socket limit.
Also fix ClientSocketPool to free idle sockets when we've hit the socket limit. BUG=32817 Review URL: http://codereview.chromium.org/1574015 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@43882 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net')
-rw-r--r--net/socket/client_socket_pool_base.cc46
-rw-r--r--net/socket/client_socket_pool_base.h17
-rw-r--r--net/socket/client_socket_pool_base_unittest.cc80
3 files changed, 115 insertions, 28 deletions
diff --git a/net/socket/client_socket_pool_base.cc b/net/socket/client_socket_pool_base.cc
index d6a8caf..4be76ff 100644
--- a/net/socket/client_socket_pool_base.cc
+++ b/net/socket/client_socket_pool_base.cc
@@ -191,18 +191,22 @@ int ClientSocketPoolBaseHelper::RequestSocketInternal(
Group& group = group_map_[group_name];
// Can we make another active socket now?
- if (ReachedMaxSocketsLimit() ||
- !group.HasAvailableSocketSlot(max_sockets_per_group_)) {
- if (ReachedMaxSocketsLimit()) {
+ if (!group.HasAvailableSocketSlot(max_sockets_per_group_)) {
+ request->net_log().AddEvent(
+ NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS_PER_GROUP);
+ return ERR_IO_PENDING;
+ }
+
+ if (ReachedMaxSocketsLimit()) {
+ if (idle_socket_count() > 0) {
+ CloseOneIdleSocket();
+ } else {
// We could check if we really have a stalled group here, but it requires
// a scan of all groups, so just flip a flag here, and do the check later.
may_have_stalled_group_ = true;
-
request->net_log().AddEvent(NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS);
- } else {
- request->net_log().AddEvent(NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS_PER_GROUP);
+ return ERR_IO_PENDING;
}
- return ERR_IO_PENDING;
}
// Try to reuse a socket.
@@ -623,8 +627,10 @@ void ClientSocketPoolBaseHelper::OnAvailableSocketSlot(
int stalled_group_count = FindTopStalledGroup(&top_group, &top_group_name);
if (stalled_group_count <= 1)
may_have_stalled_group_ = false;
- if (stalled_group_count >= 1)
+ if (stalled_group_count >= 1) {
+ CHECK_GE(1, idle_socket_count());
ProcessPendingRequest(top_group_name, top_group);
+ }
} else if (!group->pending_requests.empty()) {
ProcessPendingRequest(group_name, group);
// |group| may no longer be valid after this point. Be careful not to
@@ -713,11 +719,33 @@ void ClientSocketPoolBaseHelper::CancelAllConnectJobs() {
bool ClientSocketPoolBaseHelper::ReachedMaxSocketsLimit() const {
// Each connecting socket will eventually connect and be handed out.
- int total = handed_out_socket_count_ + connecting_socket_count_;
+ int total = handed_out_socket_count_ + connecting_socket_count_ +
+ idle_socket_count();
DCHECK_LE(total, max_sockets_);
return total == max_sockets_;
}
+void ClientSocketPoolBaseHelper::CloseOneIdleSocket() {
+ CHECK_GT(idle_socket_count(), 0);
+
+ for (GroupMap::iterator i = group_map_.begin(); i != group_map_.end(); ++i) {
+ Group& group = i->second;
+
+ if (!group.idle_sockets.empty()) {
+ std::deque<IdleSocket>::iterator j = group.idle_sockets.begin();
+ delete j->socket;
+ group.idle_sockets.erase(j);
+ DecrementIdleCount();
+ if (group.IsEmpty())
+ group_map_.erase(i);
+
+ return;
+ }
+ }
+
+ NOTREACHED();
+}
+
} // namespace internal
} // namespace net
diff --git a/net/socket/client_socket_pool_base.h b/net/socket/client_socket_pool_base.h
index d1abd58..782af6c 100644
--- a/net/socket/client_socket_pool_base.h
+++ b/net/socket/client_socket_pool_base.h
@@ -391,6 +391,13 @@ class ClientSocketPoolBaseHelper
// Called when the backup socket timer fires.
void OnBackupSocketTimerFired(const std::string& group_name);
+ // Closes one idle socket. Picks the first one encountered.
+ // TODO(willchan): Consider a better algorithm for doing this. Perhaps we
+ // should keep an ordered list of idle sockets, and close them in order.
+ // Requires maintaining more state. It's not clear if it's worth it since
+ // I'm not sure if we hit this situation often.
+ void CloseOneIdleSocket();
+
GroupMap group_map_;
// Timer used to periodically prune idle sockets that timed out or can't be
@@ -427,9 +434,13 @@ class ClientSocketPoolBaseHelper
// |max_sockets_per_group_| limit. So choosing the next request involves
// selecting the highest priority request across *all* groups.
//
- // Since reaching the maximum number of sockets is an edge case, we make note
- // of when it happens, and thus avoid doing the slower "scan all groups"
- // in the common case.
+ // |may_have_stalled_group_| is not conclusive, since when we cancel pending
+ // requests, we may reach the situation where we have the maximum number of
+ // sockets, but not request is stalled because of the global socket limit
+ // (although some requests may be blocked on the socket per group limit).
+ // We don't strictly maintain |may_have_stalled_group_|, since that would
+ // require a linear search through all groups in |group_map_| to see if one
+ // of them is stalled.
bool may_have_stalled_group_;
const scoped_ptr<ConnectJobFactory> connect_job_factory_;
diff --git a/net/socket/client_socket_pool_base_unittest.cc b/net/socket/client_socket_pool_base_unittest.cc
index 65feffd..d2676e8 100644
--- a/net/socket/client_socket_pool_base_unittest.cc
+++ b/net/socket/client_socket_pool_base_unittest.cc
@@ -9,6 +9,7 @@
#include "base/message_loop.h"
#include "base/platform_thread.h"
#include "base/scoped_vector.h"
+#include "base/string_util.h"
#include "net/base/net_log.h"
#include "net/base/net_log_unittest.h"
#include "net/base/net_errors.h"
@@ -572,7 +573,7 @@ TEST_F(ClientSocketPoolBaseTest, TotalLimit) {
EXPECT_EQ(ERR_IO_PENDING, StartRequest("f", kDefaultPriority));
EXPECT_EQ(ERR_IO_PENDING, StartRequest("g", kDefaultPriority));
- ReleaseAllConnections(KEEP_ALIVE);
+ ReleaseAllConnections(NO_KEEP_ALIVE);
EXPECT_EQ(static_cast<int>(requests_.size()),
client_socket_factory_.allocation_count());
@@ -608,7 +609,7 @@ TEST_F(ClientSocketPoolBaseTest, TotalLimitReachedNewGroup) {
// Now create a new group and verify that we don't starve it.
EXPECT_EQ(ERR_IO_PENDING, StartRequest("c", kDefaultPriority));
- ReleaseAllConnections(KEEP_ALIVE);
+ ReleaseAllConnections(NO_KEEP_ALIVE);
EXPECT_EQ(static_cast<int>(requests_.size()),
client_socket_factory_.allocation_count());
@@ -639,11 +640,8 @@ TEST_F(ClientSocketPoolBaseTest, TotalLimitRespectsPriority) {
EXPECT_EQ(ERR_IO_PENDING, StartRequest("a", MEDIUM));
EXPECT_EQ(ERR_IO_PENDING, StartRequest("b", HIGHEST));
- ReleaseAllConnections(KEEP_ALIVE);
+ ReleaseAllConnections(NO_KEEP_ALIVE);
- // We're re-using one socket for group "a", and one for "b".
- EXPECT_EQ(static_cast<int>(requests_.size()) - 2,
- client_socket_factory_.allocation_count());
EXPECT_EQ(requests_.size() - kDefaultMaxSockets, completion_count_);
// First 4 requests don't have to wait, and finish in order.
@@ -677,10 +675,9 @@ TEST_F(ClientSocketPoolBaseTest, TotalLimitRespectsGroupLimit) {
EXPECT_EQ(ERR_IO_PENDING, StartRequest("a", LOW));
EXPECT_EQ(ERR_IO_PENDING, StartRequest("b", HIGHEST));
- ReleaseAllConnections(KEEP_ALIVE);
+ ReleaseAllConnections(NO_KEEP_ALIVE);
- // We're re-using one socket for group "a", and one for "b".
- EXPECT_EQ(static_cast<int>(requests_.size()) - 2,
+ EXPECT_EQ(static_cast<int>(requests_.size()),
client_socket_factory_.allocation_count());
EXPECT_EQ(requests_.size() - kDefaultMaxSockets, completion_count_);
@@ -725,7 +722,7 @@ TEST_F(ClientSocketPoolBaseTest, TotalLimitCountsConnectingSockets) {
connect_job_factory_->set_job_type(TestConnectJob::kMockJob);
EXPECT_EQ(ERR_IO_PENDING, StartRequest("e", kDefaultPriority));
- ReleaseAllConnections(KEEP_ALIVE);
+ ReleaseAllConnections(NO_KEEP_ALIVE);
EXPECT_EQ(static_cast<int>(requests_.size()),
client_socket_factory_.allocation_count());
@@ -766,7 +763,7 @@ TEST_F(ClientSocketPoolBaseTest, MayHaveStalledGroupReset) {
// After releasing first connection for "a", we're still at the
// maximum sockets limit, but every group's pending queue is empty,
// so we reset the flag.
- EXPECT_TRUE(ReleaseOneConnection(KEEP_ALIVE));
+ EXPECT_TRUE(ReleaseOneConnection(NO_KEEP_ALIVE));
EXPECT_FALSE(pool_->base()->may_have_stalled_group());
// Requesting additional socket while at the total limit should
@@ -781,17 +778,69 @@ TEST_F(ClientSocketPoolBaseTest, MayHaveStalledGroupReset) {
// We're at the maximum socket limit, and still have one request pending
// for "d". Flag should be "on".
- EXPECT_TRUE(ReleaseOneConnection(KEEP_ALIVE));
+ EXPECT_TRUE(ReleaseOneConnection(NO_KEEP_ALIVE));
EXPECT_TRUE(pool_->base()->may_have_stalled_group());
// Now every group's pending queue should be empty again.
- EXPECT_TRUE(ReleaseOneConnection(KEEP_ALIVE));
+ EXPECT_TRUE(ReleaseOneConnection(NO_KEEP_ALIVE));
EXPECT_FALSE(pool_->base()->may_have_stalled_group());
- ReleaseAllConnections(KEEP_ALIVE);
+ ReleaseAllConnections(NO_KEEP_ALIVE);
EXPECT_FALSE(pool_->base()->may_have_stalled_group());
}
+TEST_F(ClientSocketPoolBaseTest, CloseIdleSocketAtSocketLimit) {
+ CreatePool(kDefaultMaxSockets, kDefaultMaxSocketsPerGroup);
+ connect_job_factory_->set_job_type(TestConnectJob::kMockJob);
+
+ for (int i = 0; i < kDefaultMaxSockets; ++i) {
+ ClientSocketHandle handle;
+ TestCompletionCallback callback;
+ EXPECT_EQ(OK,
+ InitHandle(&handle, IntToString(i), kDefaultPriority, &callback,
+ pool_, NULL));
+ }
+
+ // Stall a group
+ ClientSocketHandle handle;
+ TestCompletionCallback callback;
+ EXPECT_EQ(ERR_IO_PENDING,
+ InitHandle(&handle, "foo", kDefaultPriority, &callback, pool_,
+ NULL));
+
+ // Cancel the stalled request.
+ handle.Reset();
+
+ // Flush all the DoReleaseSocket tasks.
+ MessageLoop::current()->RunAllPending();
+
+ EXPECT_EQ(kDefaultMaxSockets, client_socket_factory_.allocation_count());
+ EXPECT_EQ(kDefaultMaxSockets, pool_->IdleSocketCount());
+
+ for (int i = 0; i < kDefaultMaxSockets; ++i) {
+ ClientSocketHandle handle;
+ TestCompletionCallback callback;
+ EXPECT_EQ(OK,
+ InitHandle(&handle, StringPrintf("Take 2: %d", i),
+ kDefaultPriority, &callback, pool_, NULL));
+ }
+
+ EXPECT_EQ(2 * kDefaultMaxSockets, client_socket_factory_.allocation_count());
+ EXPECT_EQ(0, pool_->IdleSocketCount());
+
+ // Before the next round of DoReleaseSocket tasks run, we will hit the
+ // socket limit.
+
+ EXPECT_EQ(ERR_IO_PENDING,
+ InitHandle(&handle, "foo", kDefaultPriority, &callback, pool_,
+ NULL));
+
+ // But if we wait for it, the released idle sockets will be closed in
+ // preference of the waiting request.
+
+ EXPECT_EQ(OK, callback.WaitForResult());
+}
+
TEST_F(ClientSocketPoolBaseTest, PendingRequests) {
CreatePool(kDefaultMaxSockets, kDefaultMaxSocketsPerGroup);
@@ -1112,9 +1161,8 @@ TEST_F(ClientSocketPoolBaseTest, GroupWithPendingRequestsIsNotEmpty) {
// Closing idle sockets should not get us into trouble, but in the bug
// we were hitting a CHECK here.
- EXPECT_EQ(2, pool_->IdleSocketCountInGroup("a"));
- pool_->CloseIdleSockets();
EXPECT_EQ(0, pool_->IdleSocketCountInGroup("a"));
+ pool_->CloseIdleSockets();
}
TEST_F(ClientSocketPoolBaseTest, BasicAsynchronous) {