summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorwillchan@chromium.org <willchan@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-05-18 22:59:15 +0000
committerwillchan@chromium.org <willchan@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-05-18 22:59:15 +0000
commita7fedd46c5167b048b6fa61e95969ca475e06904 (patch)
treeb493fc4c7c0dddc6f47a1072ed9ca339d7e0e3f2 /net
parent57105fe4d3b6b91ed17ba52c4c51e0d2fc784790 (diff)
downloadchromium_src-a7fedd46c5167b048b6fa61e95969ca475e06904.zip
chromium_src-a7fedd46c5167b048b6fa61e95969ca475e06904.tar.gz
chromium_src-a7fedd46c5167b048b6fa61e95969ca475e06904.tar.bz2
Reland my close on idle socket change.
This reverts 44402 which reverted r44150 (fix for r43882) and r43882 (original change). These changes were suspected for causing the infinite loop on the IO thread and also a crash. It turned out that the backup socket was the cause. These should be able to be safely relanded. There's one minor change from the original, due to merging in the weeks of changes in between. BUG=32817 Review URL: http://codereview.chromium.org/2077004 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@47583 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net')
-rw-r--r--net/socket/client_socket_pool_base.cc77
-rw-r--r--net/socket/client_socket_pool_base.h21
-rw-r--r--net/socket/client_socket_pool_base_unittest.cc117
-rw-r--r--net/socket/tcp_client_socket_pool.cc2
4 files changed, 172 insertions, 45 deletions
diff --git a/net/socket/client_socket_pool_base.cc b/net/socket/client_socket_pool_base.cc
index b008c54..820e9e2 100644
--- a/net/socket/client_socket_pool_base.cc
+++ b/net/socket/client_socket_pool_base.cc
@@ -187,23 +187,6 @@ int ClientSocketPoolBaseHelper::RequestSocketInternal(
CHECK(handle);
Group& group = group_map_[group_name];
- // Can we make another active socket now?
- if (ReachedMaxSocketsLimit() ||
- !group.HasAvailableSocketSlot(max_sockets_per_group_)) {
- if (ReachedMaxSocketsLimit()) {
- // We could check if we really have a stalled group here, but it requires
- // a scan of all groups, so just flip a flag here, and do the check later.
- may_have_stalled_group_ = true;
-
- request->net_log().AddEvent(NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS,
- NULL);
- } else {
- request->net_log().AddEvent(
- NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS_PER_GROUP, NULL);
- }
- return ERR_IO_PENDING;
- }
-
// Try to reuse a socket.
while (!group.idle_sockets.empty()) {
IdleSocket idle_socket = group.idle_sockets.back();
@@ -221,6 +204,26 @@ int ClientSocketPoolBaseHelper::RequestSocketInternal(
delete idle_socket.socket;
}
+ // Can we make another active socket now?
+ if (!group.HasAvailableSocketSlot(max_sockets_per_group_)) {
+ request->net_log().AddEvent(
+ NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS_PER_GROUP, NULL);
+ return ERR_IO_PENDING;
+ }
+
+ if (ReachedMaxSocketsLimit()) {
+ if (idle_socket_count() > 0) {
+ CloseOneIdleSocket();
+ } else {
+ // We could check if we really have a stalled group here, but it requires
+ // a scan of all groups, so just flip a flag here, and do the check later.
+ may_have_stalled_group_ = true;
+ request->net_log().AddEvent(
+ NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS, NULL);
+ return ERR_IO_PENDING;
+ }
+ }
+
// See if we already have enough connect jobs or sockets that will be released
// soon.
if (group.HasReleasingSockets()) {
@@ -502,10 +505,14 @@ void ClientSocketPoolBaseHelper::DoReleaseSocket(const std::string& group_name,
int stalled_group_count = FindTopStalledGroup(&top_group, &top_group_name);
if (stalled_group_count >= 1) {
if (ReachedMaxSocketsLimit()) {
- // We can't activate more sockets since we're already at our global
- // limit.
- may_have_stalled_group_ = true;
- return;
+ if (idle_socket_count() > 0) {
+ CloseOneIdleSocket();
+ } else {
+ // We can't activate more sockets since we're already at our global
+ // limit.
+ may_have_stalled_group_ = true;
+ return;
+ }
}
ProcessPendingRequest(top_group_name, top_group);
@@ -636,8 +643,10 @@ void ClientSocketPoolBaseHelper::OnAvailableSocketSlot(
(stalled_group_count == 1 && top_group->num_releasing_sockets == 0)) {
may_have_stalled_group_ = false;
}
- if (stalled_group_count >= 1)
+ if (stalled_group_count >= 1) {
+ CHECK_GE(1, idle_socket_count());
ProcessPendingRequest(top_group_name, top_group);
+ }
} else if (!group->pending_requests.empty()) {
ProcessPendingRequest(group_name, group);
// |group| may no longer be valid after this point. Be careful not to
@@ -726,7 +735,8 @@ void ClientSocketPoolBaseHelper::CancelAllConnectJobs() {
bool ClientSocketPoolBaseHelper::ReachedMaxSocketsLimit() const {
// Each connecting socket will eventually connect and be handed out.
- int total = handed_out_socket_count_ + connecting_socket_count_;
+ int total = handed_out_socket_count_ + connecting_socket_count_ +
+ idle_socket_count();
DCHECK_LE(total, max_sockets_);
if (total < max_sockets_)
return false;
@@ -734,6 +744,27 @@ bool ClientSocketPoolBaseHelper::ReachedMaxSocketsLimit() const {
return true;
}
+void ClientSocketPoolBaseHelper::CloseOneIdleSocket() {
+ CHECK_GT(idle_socket_count(), 0);
+
+ for (GroupMap::iterator i = group_map_.begin(); i != group_map_.end(); ++i) {
+ Group& group = i->second;
+
+ if (!group.idle_sockets.empty()) {
+ std::deque<IdleSocket>::iterator j = group.idle_sockets.begin();
+ delete j->socket;
+ group.idle_sockets.erase(j);
+ DecrementIdleCount();
+ if (group.IsEmpty())
+ group_map_.erase(i);
+
+ return;
+ }
+ }
+
+ LOG(DFATAL) << "No idle socket found to close!.";
+}
+
} // namespace internal
} // namespace net
diff --git a/net/socket/client_socket_pool_base.h b/net/socket/client_socket_pool_base.h
index 55ce7f7..23b293b 100644
--- a/net/socket/client_socket_pool_base.h
+++ b/net/socket/client_socket_pool_base.h
@@ -225,7 +225,7 @@ class ClientSocketPoolBaseHelper
return connect_job_factory_->ConnectionTimeout();
}
- void enable_backup_jobs() { backup_jobs_enabled_ = true; };
+ void EnableBackupJobs() { backup_jobs_enabled_ = true; };
private:
friend class base::RefCounted<ClientSocketPoolBaseHelper>;
@@ -387,6 +387,13 @@ class ClientSocketPoolBaseHelper
// Called when the backup socket timer fires.
void OnBackupSocketTimerFired(const std::string& group_name);
+ // Closes one idle socket. Picks the first one encountered.
+ // TODO(willchan): Consider a better algorithm for doing this. Perhaps we
+ // should keep an ordered list of idle sockets, and close them in order.
+ // Requires maintaining more state. It's not clear if it's worth it since
+ // I'm not sure if we hit this situation often.
+ void CloseOneIdleSocket();
+
GroupMap group_map_;
// Timer used to periodically prune idle sockets that timed out or can't be
@@ -426,9 +433,13 @@ class ClientSocketPoolBaseHelper
// |max_sockets_per_group_| limit. So choosing the next request involves
// selecting the highest priority request across *all* groups.
//
- // Since reaching the maximum number of sockets is an edge case, we make note
- // of when it happens, and thus avoid doing the slower "scan all groups"
- // in the common case.
+ // |may_have_stalled_group_| is not conclusive, since when we cancel pending
+ // requests, we may reach the situation where we have the maximum number of
+ // sockets, but not request is stalled because of the global socket limit
+ // (although some requests may be blocked on the socket per group limit).
+ // We don't strictly maintain |may_have_stalled_group_|, since that would
+ // require a linear search through all groups in |group_map_| to see if one
+ // of them is stalled.
bool may_have_stalled_group_;
const scoped_ptr<ConnectJobFactory> connect_job_factory_;
@@ -568,7 +579,7 @@ class ClientSocketPoolBase {
const std::string& name() const { return name_; }
- void enable_backup_jobs() { helper_->enable_backup_jobs(); };
+ void EnableBackupJobs() { helper_->EnableBackupJobs(); };
private:
// This adaptor class exists to bridge the
diff --git a/net/socket/client_socket_pool_base_unittest.cc b/net/socket/client_socket_pool_base_unittest.cc
index 6d68097..4fad0e0 100644
--- a/net/socket/client_socket_pool_base_unittest.cc
+++ b/net/socket/client_socket_pool_base_unittest.cc
@@ -9,6 +9,7 @@
#include "base/message_loop.h"
#include "base/platform_thread.h"
#include "base/scoped_vector.h"
+#include "base/string_util.h"
#include "net/base/net_log.h"
#include "net/base/net_log_unittest.h"
#include "net/base/net_errors.h"
@@ -342,6 +343,8 @@ class TestClientSocketPool : public ClientSocketPool {
void CleanupTimedOutIdleSockets() { base_.CleanupIdleSockets(false); }
+ void EnableBackupJobs() { base_.EnableBackupJobs(); }
+
private:
~TestClientSocketPool() {}
@@ -583,7 +586,7 @@ TEST_F(ClientSocketPoolBaseTest, TotalLimit) {
EXPECT_EQ(ERR_IO_PENDING, StartRequest("f", kDefaultPriority));
EXPECT_EQ(ERR_IO_PENDING, StartRequest("g", kDefaultPriority));
- ReleaseAllConnections(KEEP_ALIVE);
+ ReleaseAllConnections(NO_KEEP_ALIVE);
EXPECT_EQ(static_cast<int>(requests_.size()),
client_socket_factory_.allocation_count());
@@ -619,7 +622,7 @@ TEST_F(ClientSocketPoolBaseTest, TotalLimitReachedNewGroup) {
// Now create a new group and verify that we don't starve it.
EXPECT_EQ(ERR_IO_PENDING, StartRequest("c", kDefaultPriority));
- ReleaseAllConnections(KEEP_ALIVE);
+ ReleaseAllConnections(NO_KEEP_ALIVE);
EXPECT_EQ(static_cast<int>(requests_.size()),
client_socket_factory_.allocation_count());
@@ -650,11 +653,8 @@ TEST_F(ClientSocketPoolBaseTest, TotalLimitRespectsPriority) {
EXPECT_EQ(ERR_IO_PENDING, StartRequest("a", MEDIUM));
EXPECT_EQ(ERR_IO_PENDING, StartRequest("b", HIGHEST));
- ReleaseAllConnections(KEEP_ALIVE);
+ ReleaseAllConnections(NO_KEEP_ALIVE);
- // We're re-using one socket for group "a", and one for "b".
- EXPECT_EQ(static_cast<int>(requests_.size()) - 2,
- client_socket_factory_.allocation_count());
EXPECT_EQ(requests_.size() - kDefaultMaxSockets, completion_count_);
// First 4 requests don't have to wait, and finish in order.
@@ -688,10 +688,9 @@ TEST_F(ClientSocketPoolBaseTest, TotalLimitRespectsGroupLimit) {
EXPECT_EQ(ERR_IO_PENDING, StartRequest("a", LOW));
EXPECT_EQ(ERR_IO_PENDING, StartRequest("b", HIGHEST));
- ReleaseAllConnections(KEEP_ALIVE);
+ ReleaseAllConnections(NO_KEEP_ALIVE);
- // We're re-using one socket for group "a", and one for "b".
- EXPECT_EQ(static_cast<int>(requests_.size()) - 2,
+ EXPECT_EQ(static_cast<int>(requests_.size()),
client_socket_factory_.allocation_count());
EXPECT_EQ(requests_.size() - kDefaultMaxSockets, completion_count_);
@@ -736,7 +735,7 @@ TEST_F(ClientSocketPoolBaseTest, TotalLimitCountsConnectingSockets) {
connect_job_factory_->set_job_type(TestConnectJob::kMockJob);
EXPECT_EQ(ERR_IO_PENDING, StartRequest("e", kDefaultPriority));
- ReleaseAllConnections(KEEP_ALIVE);
+ ReleaseAllConnections(NO_KEEP_ALIVE);
EXPECT_EQ(static_cast<int>(requests_.size()),
client_socket_factory_.allocation_count());
@@ -777,7 +776,7 @@ TEST_F(ClientSocketPoolBaseTest, MayHaveStalledGroupReset) {
// After releasing first connection for "a", we're still at the
// maximum sockets limit, but every group's pending queue is empty,
// so we reset the flag.
- EXPECT_TRUE(ReleaseOneConnection(KEEP_ALIVE));
+ EXPECT_TRUE(ReleaseOneConnection(NO_KEEP_ALIVE));
EXPECT_FALSE(pool_->base()->may_have_stalled_group());
// Requesting additional socket while at the total limit should
@@ -792,14 +791,14 @@ TEST_F(ClientSocketPoolBaseTest, MayHaveStalledGroupReset) {
// We're at the maximum socket limit, and still have one request pending
// for "d". Flag should be "on".
- EXPECT_TRUE(ReleaseOneConnection(KEEP_ALIVE));
+ EXPECT_TRUE(ReleaseOneConnection(NO_KEEP_ALIVE));
EXPECT_TRUE(pool_->base()->may_have_stalled_group());
// Now every group's pending queue should be empty again.
- EXPECT_TRUE(ReleaseOneConnection(KEEP_ALIVE));
+ EXPECT_TRUE(ReleaseOneConnection(NO_KEEP_ALIVE));
EXPECT_FALSE(pool_->base()->may_have_stalled_group());
- ReleaseAllConnections(KEEP_ALIVE);
+ ReleaseAllConnections(NO_KEEP_ALIVE);
EXPECT_FALSE(pool_->base()->may_have_stalled_group());
}
@@ -855,6 +854,93 @@ TEST_F(ClientSocketPoolBaseTest, StallAndThenCancelAndTriggerAvailableSocket) {
handles[i].Reset();
}
+TEST_F(ClientSocketPoolBaseTest, CloseIdleSocketAtSocketLimit) {
+ CreatePool(kDefaultMaxSockets, kDefaultMaxSocketsPerGroup);
+ connect_job_factory_->set_job_type(TestConnectJob::kMockJob);
+
+ for (int i = 0; i < kDefaultMaxSockets; ++i) {
+ ClientSocketHandle handle;
+ TestCompletionCallback callback;
+ EXPECT_EQ(OK,
+ InitHandle(&handle, IntToString(i), kDefaultPriority, &callback,
+ pool_, BoundNetLog()));
+ }
+
+ // Stall a group
+ ClientSocketHandle handle;
+ TestCompletionCallback callback;
+ EXPECT_EQ(ERR_IO_PENDING,
+ InitHandle(&handle, "foo", kDefaultPriority, &callback, pool_,
+ BoundNetLog()));
+
+ // Cancel the stalled request.
+ handle.Reset();
+
+ // Flush all the DoReleaseSocket tasks.
+ MessageLoop::current()->RunAllPending();
+
+ EXPECT_EQ(kDefaultMaxSockets, client_socket_factory_.allocation_count());
+ EXPECT_EQ(kDefaultMaxSockets, pool_->IdleSocketCount());
+
+ for (int i = 0; i < kDefaultMaxSockets; ++i) {
+ ClientSocketHandle handle;
+ TestCompletionCallback callback;
+ EXPECT_EQ(OK,
+ InitHandle(&handle, StringPrintf("Take 2: %d", i),
+ kDefaultPriority, &callback, pool_, BoundNetLog()));
+ }
+
+ EXPECT_EQ(2 * kDefaultMaxSockets, client_socket_factory_.allocation_count());
+ EXPECT_EQ(0, pool_->IdleSocketCount());
+
+ // Before the next round of DoReleaseSocket tasks run, we will hit the
+ // socket limit.
+
+ EXPECT_EQ(ERR_IO_PENDING,
+ InitHandle(&handle, "foo", kDefaultPriority, &callback, pool_,
+ BoundNetLog()));
+
+ // But if we wait for it, the released idle sockets will be closed in
+ // preference of the waiting request.
+
+ EXPECT_EQ(OK, callback.WaitForResult());
+}
+
+// Regression test for http://crbug.com/40952.
+TEST_F(ClientSocketPoolBaseTest, CloseIdleSocketAtSocketLimitDeleteGroup) {
+ CreatePool(kDefaultMaxSockets, kDefaultMaxSocketsPerGroup);
+ pool_->EnableBackupJobs();
+ connect_job_factory_->set_job_type(TestConnectJob::kMockJob);
+
+ for (int i = 0; i < kDefaultMaxSockets; ++i) {
+ ClientSocketHandle handle;
+ TestCompletionCallback callback;
+ EXPECT_EQ(OK,
+ InitHandle(&handle, IntToString(i), kDefaultPriority, &callback,
+ pool_, BoundNetLog()));
+ }
+
+ // Flush all the DoReleaseSocket tasks.
+ MessageLoop::current()->RunAllPending();
+
+ // Stall a group. Set a pending job so it'll trigger a backup job if we don't
+ // reuse a socket.
+ connect_job_factory_->set_job_type(TestConnectJob::kMockPendingJob);
+ ClientSocketHandle handle;
+ TestCompletionCallback callback;
+
+ // "0" is special here, since it should be the first entry in the sorted map,
+ // which is the one which we would close an idle socket for. We shouldn't
+ // close an idle socket though, since we should reuse the idle socket.
+ EXPECT_EQ(
+ OK,
+ InitHandle(
+ &handle, "0", kDefaultPriority, &callback, pool_, BoundNetLog()));
+
+ EXPECT_EQ(kDefaultMaxSockets, client_socket_factory_.allocation_count());
+ EXPECT_EQ(kDefaultMaxSockets - 1, pool_->IdleSocketCount());
+}
+
TEST_F(ClientSocketPoolBaseTest, PendingRequests) {
CreatePool(kDefaultMaxSockets, kDefaultMaxSocketsPerGroup);
@@ -1181,9 +1267,8 @@ TEST_F(ClientSocketPoolBaseTest, GroupWithPendingRequestsIsNotEmpty) {
// Closing idle sockets should not get us into trouble, but in the bug
// we were hitting a CHECK here.
- EXPECT_EQ(2, pool_->IdleSocketCountInGroup("a"));
- pool_->CloseIdleSockets();
EXPECT_EQ(0, pool_->IdleSocketCountInGroup("a"));
+ pool_->CloseIdleSockets();
}
TEST_F(ClientSocketPoolBaseTest, BasicAsynchronous) {
diff --git a/net/socket/tcp_client_socket_pool.cc b/net/socket/tcp_client_socket_pool.cc
index 2f8e7675..b0762e3 100644
--- a/net/socket/tcp_client_socket_pool.cc
+++ b/net/socket/tcp_client_socket_pool.cc
@@ -182,7 +182,7 @@ TCPClientSocketPool::TCPClientSocketPool(
base::TimeDelta::FromSeconds(kUnusedIdleSocketTimeout),
base::TimeDelta::FromSeconds(kUsedIdleSocketTimeout),
new TCPConnectJobFactory(client_socket_factory, host_resolver)) {
- base_.enable_backup_jobs();
+ base_.EnableBackupJobs();
}
TCPClientSocketPool::~TCPClientSocketPool() {}