summaryrefslogtreecommitdiffstats
path: root/net/socket
diff options
context:
space:
mode:
authorwillchan@chromium.org <willchan@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-06-08 06:12:41 +0000
committerwillchan@chromium.org <willchan@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-06-08 06:12:41 +0000
commit93054cc1fb19b52ab35ead21f28baf46c55bbfb9 (patch)
tree147ba83f7c0d01ba4b93f1e5e04fb09ea3c58588 /net/socket
parent896280751f9b58b33eafed60c0b3321755a0a3ac (diff)
downloadchromium_src-93054cc1fb19b52ab35ead21f28baf46c55bbfb9.zip
chromium_src-93054cc1fb19b52ab35ead21f28baf46c55bbfb9.tar.gz
chromium_src-93054cc1fb19b52ab35ead21f28baf46c55bbfb9.tar.bz2
Revert "Revert an idle sockets change to trigger reliability bot. Will revert again soon."
Review URL: http://codereview.chromium.org/2761001 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@49146 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net/socket')
-rw-r--r--net/socket/client_socket_pool_base.cc77
-rw-r--r--net/socket/client_socket_pool_base.h21
-rw-r--r--net/socket/client_socket_pool_base_unittest.cc117
-rw-r--r--net/socket/tcp_client_socket_pool.cc2
4 files changed, 45 insertions, 172 deletions
diff --git a/net/socket/client_socket_pool_base.cc b/net/socket/client_socket_pool_base.cc
index 594a858..80857e5 100644
--- a/net/socket/client_socket_pool_base.cc
+++ b/net/socket/client_socket_pool_base.cc
@@ -212,6 +212,23 @@ int ClientSocketPoolBaseHelper::RequestSocketInternal(
CHECK(handle);
Group& group = group_map_[group_name];
+ // Can we make another active socket now?
+ if (ReachedMaxSocketsLimit() ||
+ !group.HasAvailableSocketSlot(max_sockets_per_group_)) {
+ if (ReachedMaxSocketsLimit()) {
+ // We could check if we really have a stalled group here, but it requires
+ // a scan of all groups, so just flip a flag here, and do the check later.
+ may_have_stalled_group_ = true;
+
+ request->net_log().AddEvent(NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS,
+ NULL);
+ } else {
+ request->net_log().AddEvent(
+ NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS_PER_GROUP, NULL);
+ }
+ return ERR_IO_PENDING;
+ }
+
// Try to reuse a socket.
while (!group.idle_sockets.empty()) {
IdleSocket idle_socket = group.idle_sockets.back();
@@ -229,26 +246,6 @@ int ClientSocketPoolBaseHelper::RequestSocketInternal(
delete idle_socket.socket;
}
- // Can we make another active socket now?
- if (!group.HasAvailableSocketSlot(max_sockets_per_group_)) {
- request->net_log().AddEvent(
- NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS_PER_GROUP, NULL);
- return ERR_IO_PENDING;
- }
-
- if (ReachedMaxSocketsLimit()) {
- if (idle_socket_count() > 0) {
- CloseOneIdleSocket();
- } else {
- // We could check if we really have a stalled group here, but it requires
- // a scan of all groups, so just flip a flag here, and do the check later.
- may_have_stalled_group_ = true;
- request->net_log().AddEvent(
- NetLog::TYPE_SOCKET_POOL_STALLED_MAX_SOCKETS, NULL);
- return ERR_IO_PENDING;
- }
- }
-
// See if we already have enough connect jobs or sockets that will be released
// soon.
if (group.HasReleasingSockets()) {
@@ -535,14 +532,10 @@ void ClientSocketPoolBaseHelper::DoReleaseSocket(const std::string& group_name,
int stalled_group_count = FindTopStalledGroup(&top_group, &top_group_name);
if (stalled_group_count >= 1) {
if (ReachedMaxSocketsLimit()) {
- if (idle_socket_count() > 0) {
- CloseOneIdleSocket();
- } else {
- // We can't activate more sockets since we're already at our global
- // limit.
- may_have_stalled_group_ = true;
- return;
- }
+ // We can't activate more sockets since we're already at our global
+ // limit.
+ may_have_stalled_group_ = true;
+ return;
}
ProcessPendingRequest(top_group_name, top_group);
@@ -680,10 +673,8 @@ void ClientSocketPoolBaseHelper::OnAvailableSocketSlot(
(stalled_group_count == 1 && top_group->num_releasing_sockets == 0)) {
may_have_stalled_group_ = false;
}
- if (stalled_group_count >= 1) {
- CHECK_GE(1, idle_socket_count());
+ if (stalled_group_count >= 1)
ProcessPendingRequest(top_group_name, top_group);
- }
} else if (!group->pending_requests.empty()) {
ProcessPendingRequest(group_name, group);
// |group| may no longer be valid after this point. Be careful not to
@@ -777,8 +768,7 @@ void ClientSocketPoolBaseHelper::CancelAllConnectJobs() {
bool ClientSocketPoolBaseHelper::ReachedMaxSocketsLimit() const {
// Each connecting socket will eventually connect and be handed out.
- int total = handed_out_socket_count_ + connecting_socket_count_ +
- idle_socket_count();
+ int total = handed_out_socket_count_ + connecting_socket_count_;
DCHECK_LE(total, max_sockets_);
if (total < max_sockets_)
return false;
@@ -786,27 +776,6 @@ bool ClientSocketPoolBaseHelper::ReachedMaxSocketsLimit() const {
return true;
}
-void ClientSocketPoolBaseHelper::CloseOneIdleSocket() {
- CHECK_GT(idle_socket_count(), 0);
-
- for (GroupMap::iterator i = group_map_.begin(); i != group_map_.end(); ++i) {
- Group& group = i->second;
-
- if (!group.idle_sockets.empty()) {
- std::deque<IdleSocket>::iterator j = group.idle_sockets.begin();
- delete j->socket;
- group.idle_sockets.erase(j);
- DecrementIdleCount();
- if (group.IsEmpty())
- group_map_.erase(i);
-
- return;
- }
- }
-
- LOG(DFATAL) << "No idle socket found to close!.";
-}
-
} // namespace internal
} // namespace net
diff --git a/net/socket/client_socket_pool_base.h b/net/socket/client_socket_pool_base.h
index a41696e..a037b0e 100644
--- a/net/socket/client_socket_pool_base.h
+++ b/net/socket/client_socket_pool_base.h
@@ -237,7 +237,7 @@ class ClientSocketPoolBaseHelper
return connect_job_factory_->ConnectionTimeout();
}
- void EnableBackupJobs() { backup_jobs_enabled_ = true; }
+ void enable_backup_jobs() { backup_jobs_enabled_ = true; }
private:
friend class base::RefCounted<ClientSocketPoolBaseHelper>;
@@ -403,13 +403,6 @@ class ClientSocketPoolBaseHelper
// Called when the backup socket timer fires.
void OnBackupSocketTimerFired(const std::string& group_name);
- // Closes one idle socket. Picks the first one encountered.
- // TODO(willchan): Consider a better algorithm for doing this. Perhaps we
- // should keep an ordered list of idle sockets, and close them in order.
- // Requires maintaining more state. It's not clear if it's worth it since
- // I'm not sure if we hit this situation often.
- void CloseOneIdleSocket();
-
GroupMap group_map_;
// Timer used to periodically prune idle sockets that timed out or can't be
@@ -449,13 +442,9 @@ class ClientSocketPoolBaseHelper
// |max_sockets_per_group_| limit. So choosing the next request involves
// selecting the highest priority request across *all* groups.
//
- // |may_have_stalled_group_| is not conclusive, since when we cancel pending
- // requests, we may reach the situation where we have the maximum number of
- // sockets, but not request is stalled because of the global socket limit
- // (although some requests may be blocked on the socket per group limit).
- // We don't strictly maintain |may_have_stalled_group_|, since that would
- // require a linear search through all groups in |group_map_| to see if one
- // of them is stalled.
+ // Since reaching the maximum number of sockets is an edge case, we make note
+ // of when it happens, and thus avoid doing the slower "scan all groups"
+ // in the common case.
bool may_have_stalled_group_;
const scoped_ptr<ConnectJobFactory> connect_job_factory_;
@@ -605,7 +594,7 @@ class ClientSocketPoolBase {
return histograms_;
}
- void EnableBackupJobs() { helper_->EnableBackupJobs(); }
+ void enable_backup_jobs() { helper_->enable_backup_jobs(); }
void Flush() { helper_->Flush(); }
diff --git a/net/socket/client_socket_pool_base_unittest.cc b/net/socket/client_socket_pool_base_unittest.cc
index 8c0e6ea..4b10cdb 100644
--- a/net/socket/client_socket_pool_base_unittest.cc
+++ b/net/socket/client_socket_pool_base_unittest.cc
@@ -9,7 +9,6 @@
#include "base/message_loop.h"
#include "base/platform_thread.h"
#include "base/scoped_vector.h"
-#include "base/string_util.h"
#include "net/base/net_log.h"
#include "net/base/net_log_unittest.h"
#include "net/base/net_errors.h"
@@ -351,8 +350,6 @@ class TestClientSocketPool : public ClientSocketPool {
void CleanupTimedOutIdleSockets() { base_.CleanupIdleSockets(false); }
- void EnableBackupJobs() { base_.EnableBackupJobs(); }
-
private:
~TestClientSocketPool() {}
@@ -598,7 +595,7 @@ TEST_F(ClientSocketPoolBaseTest, TotalLimit) {
EXPECT_EQ(ERR_IO_PENDING, StartRequest("f", kDefaultPriority));
EXPECT_EQ(ERR_IO_PENDING, StartRequest("g", kDefaultPriority));
- ReleaseAllConnections(NO_KEEP_ALIVE);
+ ReleaseAllConnections(KEEP_ALIVE);
EXPECT_EQ(static_cast<int>(requests_.size()),
client_socket_factory_.allocation_count());
@@ -634,7 +631,7 @@ TEST_F(ClientSocketPoolBaseTest, TotalLimitReachedNewGroup) {
// Now create a new group and verify that we don't starve it.
EXPECT_EQ(ERR_IO_PENDING, StartRequest("c", kDefaultPriority));
- ReleaseAllConnections(NO_KEEP_ALIVE);
+ ReleaseAllConnections(KEEP_ALIVE);
EXPECT_EQ(static_cast<int>(requests_.size()),
client_socket_factory_.allocation_count());
@@ -665,8 +662,11 @@ TEST_F(ClientSocketPoolBaseTest, TotalLimitRespectsPriority) {
EXPECT_EQ(ERR_IO_PENDING, StartRequest("a", MEDIUM));
EXPECT_EQ(ERR_IO_PENDING, StartRequest("b", HIGHEST));
- ReleaseAllConnections(NO_KEEP_ALIVE);
+ ReleaseAllConnections(KEEP_ALIVE);
+ // We're re-using one socket for group "a", and one for "b".
+ EXPECT_EQ(static_cast<int>(requests_.size()) - 2,
+ client_socket_factory_.allocation_count());
EXPECT_EQ(requests_.size() - kDefaultMaxSockets, completion_count_);
// First 4 requests don't have to wait, and finish in order.
@@ -700,9 +700,10 @@ TEST_F(ClientSocketPoolBaseTest, TotalLimitRespectsGroupLimit) {
EXPECT_EQ(ERR_IO_PENDING, StartRequest("a", LOW));
EXPECT_EQ(ERR_IO_PENDING, StartRequest("b", HIGHEST));
- ReleaseAllConnections(NO_KEEP_ALIVE);
+ ReleaseAllConnections(KEEP_ALIVE);
- EXPECT_EQ(static_cast<int>(requests_.size()),
+ // We're re-using one socket for group "a", and one for "b".
+ EXPECT_EQ(static_cast<int>(requests_.size()) - 2,
client_socket_factory_.allocation_count());
EXPECT_EQ(requests_.size() - kDefaultMaxSockets, completion_count_);
@@ -747,7 +748,7 @@ TEST_F(ClientSocketPoolBaseTest, TotalLimitCountsConnectingSockets) {
connect_job_factory_->set_job_type(TestConnectJob::kMockJob);
EXPECT_EQ(ERR_IO_PENDING, StartRequest("e", kDefaultPriority));
- ReleaseAllConnections(NO_KEEP_ALIVE);
+ ReleaseAllConnections(KEEP_ALIVE);
EXPECT_EQ(static_cast<int>(requests_.size()),
client_socket_factory_.allocation_count());
@@ -788,7 +789,7 @@ TEST_F(ClientSocketPoolBaseTest, MayHaveStalledGroupReset) {
// After releasing first connection for "a", we're still at the
// maximum sockets limit, but every group's pending queue is empty,
// so we reset the flag.
- EXPECT_TRUE(ReleaseOneConnection(NO_KEEP_ALIVE));
+ EXPECT_TRUE(ReleaseOneConnection(KEEP_ALIVE));
EXPECT_FALSE(pool_->base()->may_have_stalled_group());
// Requesting additional socket while at the total limit should
@@ -803,14 +804,14 @@ TEST_F(ClientSocketPoolBaseTest, MayHaveStalledGroupReset) {
// We're at the maximum socket limit, and still have one request pending
// for "d". Flag should be "on".
- EXPECT_TRUE(ReleaseOneConnection(NO_KEEP_ALIVE));
+ EXPECT_TRUE(ReleaseOneConnection(KEEP_ALIVE));
EXPECT_TRUE(pool_->base()->may_have_stalled_group());
// Now every group's pending queue should be empty again.
- EXPECT_TRUE(ReleaseOneConnection(NO_KEEP_ALIVE));
+ EXPECT_TRUE(ReleaseOneConnection(KEEP_ALIVE));
EXPECT_FALSE(pool_->base()->may_have_stalled_group());
- ReleaseAllConnections(NO_KEEP_ALIVE);
+ ReleaseAllConnections(KEEP_ALIVE);
EXPECT_FALSE(pool_->base()->may_have_stalled_group());
}
@@ -866,93 +867,6 @@ TEST_F(ClientSocketPoolBaseTest, StallAndThenCancelAndTriggerAvailableSocket) {
handles[i].Reset();
}
-TEST_F(ClientSocketPoolBaseTest, CloseIdleSocketAtSocketLimit) {
- CreatePool(kDefaultMaxSockets, kDefaultMaxSocketsPerGroup);
- connect_job_factory_->set_job_type(TestConnectJob::kMockJob);
-
- for (int i = 0; i < kDefaultMaxSockets; ++i) {
- ClientSocketHandle handle;
- TestCompletionCallback callback;
- EXPECT_EQ(OK,
- InitHandle(&handle, IntToString(i), kDefaultPriority, &callback,
- pool_, BoundNetLog()));
- }
-
- // Stall a group
- ClientSocketHandle handle;
- TestCompletionCallback callback;
- EXPECT_EQ(ERR_IO_PENDING,
- InitHandle(&handle, "foo", kDefaultPriority, &callback, pool_,
- BoundNetLog()));
-
- // Cancel the stalled request.
- handle.Reset();
-
- // Flush all the DoReleaseSocket tasks.
- MessageLoop::current()->RunAllPending();
-
- EXPECT_EQ(kDefaultMaxSockets, client_socket_factory_.allocation_count());
- EXPECT_EQ(kDefaultMaxSockets, pool_->IdleSocketCount());
-
- for (int i = 0; i < kDefaultMaxSockets; ++i) {
- ClientSocketHandle handle;
- TestCompletionCallback callback;
- EXPECT_EQ(OK,
- InitHandle(&handle, StringPrintf("Take 2: %d", i),
- kDefaultPriority, &callback, pool_, BoundNetLog()));
- }
-
- EXPECT_EQ(2 * kDefaultMaxSockets, client_socket_factory_.allocation_count());
- EXPECT_EQ(0, pool_->IdleSocketCount());
-
- // Before the next round of DoReleaseSocket tasks run, we will hit the
- // socket limit.
-
- EXPECT_EQ(ERR_IO_PENDING,
- InitHandle(&handle, "foo", kDefaultPriority, &callback, pool_,
- BoundNetLog()));
-
- // But if we wait for it, the released idle sockets will be closed in
- // preference of the waiting request.
-
- EXPECT_EQ(OK, callback.WaitForResult());
-}
-
-// Regression test for http://crbug.com/40952.
-TEST_F(ClientSocketPoolBaseTest, CloseIdleSocketAtSocketLimitDeleteGroup) {
- CreatePool(kDefaultMaxSockets, kDefaultMaxSocketsPerGroup);
- pool_->EnableBackupJobs();
- connect_job_factory_->set_job_type(TestConnectJob::kMockJob);
-
- for (int i = 0; i < kDefaultMaxSockets; ++i) {
- ClientSocketHandle handle;
- TestCompletionCallback callback;
- EXPECT_EQ(OK,
- InitHandle(&handle, IntToString(i), kDefaultPriority, &callback,
- pool_, BoundNetLog()));
- }
-
- // Flush all the DoReleaseSocket tasks.
- MessageLoop::current()->RunAllPending();
-
- // Stall a group. Set a pending job so it'll trigger a backup job if we don't
- // reuse a socket.
- connect_job_factory_->set_job_type(TestConnectJob::kMockPendingJob);
- ClientSocketHandle handle;
- TestCompletionCallback callback;
-
- // "0" is special here, since it should be the first entry in the sorted map,
- // which is the one which we would close an idle socket for. We shouldn't
- // close an idle socket though, since we should reuse the idle socket.
- EXPECT_EQ(
- OK,
- InitHandle(
- &handle, "0", kDefaultPriority, &callback, pool_, BoundNetLog()));
-
- EXPECT_EQ(kDefaultMaxSockets, client_socket_factory_.allocation_count());
- EXPECT_EQ(kDefaultMaxSockets - 1, pool_->IdleSocketCount());
-}
-
TEST_F(ClientSocketPoolBaseTest, PendingRequests) {
CreatePool(kDefaultMaxSockets, kDefaultMaxSocketsPerGroup);
@@ -1279,8 +1193,9 @@ TEST_F(ClientSocketPoolBaseTest, GroupWithPendingRequestsIsNotEmpty) {
// Closing idle sockets should not get us into trouble, but in the bug
// we were hitting a CHECK here.
- EXPECT_EQ(0, pool_->IdleSocketCountInGroup("a"));
+ EXPECT_EQ(2, pool_->IdleSocketCountInGroup("a"));
pool_->CloseIdleSockets();
+ EXPECT_EQ(0, pool_->IdleSocketCountInGroup("a"));
}
TEST_F(ClientSocketPoolBaseTest, BasicAsynchronous) {
diff --git a/net/socket/tcp_client_socket_pool.cc b/net/socket/tcp_client_socket_pool.cc
index b798bc4..c7eac14 100644
--- a/net/socket/tcp_client_socket_pool.cc
+++ b/net/socket/tcp_client_socket_pool.cc
@@ -186,7 +186,7 @@ TCPClientSocketPool::TCPClientSocketPool(
new TCPConnectJobFactory(client_socket_factory,
host_resolver, net_log),
network_change_notifier) {
- base_.EnableBackupJobs();
+ base_.enable_backup_jobs();
}
TCPClientSocketPool::~TCPClientSocketPool() {}