From 088da08b1e594e2e3cae7114394145e8268ab6c9 Mon Sep 17 00:00:00 2001 From: Selim Gurun Date: Wed, 16 Nov 2011 11:15:36 -0800 Subject: Defer closing idle sockets. Bug: 5523884 backport changes from chrome to defer closing idle sockets until next socket request (for power efficiency). This does not enable persistent connections. Change-Id: Iab82f546e8607ddc4cacc576cda52c4d2d536570 --- net/socket/client_socket_pool_base.cc | 40 +++++++++++-- net/socket/client_socket_pool_base.h | 14 +++++ net/socket/client_socket_pool_base_unittest.cc | 82 ++++++++++++++++++++++++++ 3 files changed, 132 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/socket/client_socket_pool_base.cc b/net/socket/client_socket_pool_base.cc index b6f6190..46cfc45 100644 --- a/net/socket/client_socket_pool_base.cc +++ b/net/socket/client_socket_pool_base.cc @@ -20,6 +20,14 @@ using base::TimeDelta; namespace { +// Indicate whether we should enable idle socket cleanup timer. When timer is +// disabled, sockets are closed next time a socket request is made. +#ifdef ANDROID +bool g_cleanup_timer_enabled = false; +#else +bool g_cleanup_timer_enabled = true; +#endif + // The timeout value, in seconds, used to clean up idle sockets that can't be // reused. // @@ -164,6 +172,7 @@ ClientSocketPoolBaseHelper::ClientSocketPoolBaseHelper( handed_out_socket_count_(0), max_sockets_(max_sockets), max_sockets_per_group_(max_sockets_per_group), + use_cleanup_timer_(g_cleanup_timer_enabled), unused_idle_socket_timeout_(unused_idle_socket_timeout), used_idle_socket_timeout_(used_idle_socket_timeout), connect_job_factory_(connect_job_factory), @@ -219,6 +228,10 @@ int ClientSocketPoolBaseHelper::RequestSocket( CHECK(request->callback()); CHECK(request->handle()); + // Cleanup any timed-out idle sockets if no timer is used. + if (!use_cleanup_timer_) + CleanupIdleSockets(false); + request->net_log().BeginEvent(NetLog::TYPE_SOCKET_POOL, NULL); Group* group = GetOrCreateGroup(group_name); @@ -240,6 +253,10 @@ void ClientSocketPoolBaseHelper::RequestSockets( DCHECK(!request.callback()); DCHECK(!request.handle()); + // Cleanup any timed out idle sockets if no timer is used. + if (!use_cleanup_timer_) + CleanupIdleSockets(false); + if (num_sockets > max_sockets_per_group_) { num_sockets = max_sockets_per_group_; } @@ -664,8 +681,7 @@ void ClientSocketPoolBaseHelper::EnableConnectBackupJobs() { void ClientSocketPoolBaseHelper::IncrementIdleCount() { if (++idle_socket_count_ == 1) - timer_.Start(TimeDelta::FromSeconds(kCleanupInterval), this, - &ClientSocketPoolBaseHelper::OnCleanupTimerFired); + StartIdleSocketTimer(); } void ClientSocketPoolBaseHelper::DecrementIdleCount() { @@ -673,6 +689,23 @@ void ClientSocketPoolBaseHelper::DecrementIdleCount() { timer_.Stop(); } +// static +bool ClientSocketPoolBaseHelper::cleanup_timer_enabled() { + return g_cleanup_timer_enabled; +} + +// static +bool ClientSocketPoolBaseHelper::set_cleanup_timer_enabled(bool enabled) { + bool old_value = g_cleanup_timer_enabled; + g_cleanup_timer_enabled = enabled; + return old_value; +} + +void ClientSocketPoolBaseHelper::StartIdleSocketTimer() { + timer_.Start(TimeDelta::FromSeconds(kCleanupInterval), this, + &ClientSocketPoolBaseHelper::OnCleanupTimerFired); +} + void ClientSocketPoolBaseHelper::ReleaseSocket(const std::string& group_name, ClientSocket* socket, int id) { @@ -863,8 +896,7 @@ void ClientSocketPoolBaseHelper::ProcessPendingRequest( RemoveGroup(group_name); request->net_log().EndEventWithNetErrorCode(NetLog::TYPE_SOCKET_POOL, rv); - InvokeUserCallbackLater( - request->handle(), request->callback(), rv); + InvokeUserCallbackLater(request->handle(), request->callback(), rv); } } diff --git a/net/socket/client_socket_pool_base.h b/net/socket/client_socket_pool_base.h index b5c4c04..bd30a8b 100644 --- a/net/socket/client_socket_pool_base.h +++ b/net/socket/client_socket_pool_base.h @@ -282,6 +282,14 @@ class ClientSocketPoolBaseHelper bool HasGroup(const std::string& group_name) const; + // Called to enable/disable cleaning up idle sockets. When enabled, + // idle sockets that have been around for longer than a period defined + // by kCleanupInterval are cleaned up using a timer. Otherwise they are + // closed next time client makes a request. This may reduce network + // activity and power consumption. + static bool cleanup_timer_enabled(); + static bool set_cleanup_timer_enabled(bool enabled); + // Closes all idle sockets if |force| is true. Else, only closes idle // sockets that timed out or can't be reused. Made public for testing. void CleanupIdleSockets(bool force); @@ -431,6 +439,9 @@ class ClientSocketPoolBaseHelper void IncrementIdleCount(); void DecrementIdleCount(); + // Start cleanup timer for idle sockets. + void StartIdleSocketTimer(); + // Scans the group map for groups which have an available socket slot and // at least one pending request. Returns true if any groups are stalled, and // if so, fills |group| and |group_name| with data of the stalled group @@ -541,6 +552,9 @@ class ClientSocketPoolBaseHelper // The maximum number of sockets kept per group. const int max_sockets_per_group_; + // Whether to use timer to cleanup idle sockets. + bool use_cleanup_timer_; + // The time to wait until closing idle sockets. const base::TimeDelta unused_idle_socket_timeout_; const base::TimeDelta used_idle_socket_timeout_; diff --git a/net/socket/client_socket_pool_base_unittest.cc b/net/socket/client_socket_pool_base_unittest.cc index eb83289..2fa5a7c 100644 --- a/net/socket/client_socket_pool_base_unittest.cc +++ b/net/socket/client_socket_pool_base_unittest.cc @@ -534,11 +534,15 @@ class ClientSocketPoolBaseTest : public testing::Test { connect_backup_jobs_enabled_ = internal::ClientSocketPoolBaseHelper::connect_backup_jobs_enabled(); internal::ClientSocketPoolBaseHelper::set_connect_backup_jobs_enabled(true); + cleanup_timer_enabled_ = + internal::ClientSocketPoolBaseHelper::cleanup_timer_enabled(); } virtual ~ClientSocketPoolBaseTest() { internal::ClientSocketPoolBaseHelper::set_connect_backup_jobs_enabled( connect_backup_jobs_enabled_); + internal::ClientSocketPoolBaseHelper::set_cleanup_timer_enabled( + cleanup_timer_enabled_); } void CreatePool(int max_sockets, int max_sockets_per_group) { @@ -589,6 +593,7 @@ class ClientSocketPoolBaseTest : public testing::Test { size_t completion_count() const { return test_base_.completion_count(); } bool connect_backup_jobs_enabled_; + bool cleanup_timer_enabled_; MockClientSocketFactory client_socket_factory_; TestConnectJobFactory* connect_job_factory_; scoped_refptr params_; @@ -1861,6 +1866,83 @@ TEST_F(ClientSocketPoolBaseTest, AdditionalErrorStateAsynchronous) { EXPECT_FALSE(handle.ssl_error_response_info().headers.get() == NULL); } +TEST_F(ClientSocketPoolBaseTest, DisableCleanupTimer) { + // Disable cleanup timer. + internal::ClientSocketPoolBaseHelper::set_cleanup_timer_enabled(false); + + CreatePoolWithIdleTimeouts( + kDefaultMaxSockets, kDefaultMaxSocketsPerGroup, + base::TimeDelta::FromMilliseconds(10), // Time out unused sockets + base::TimeDelta::FromMilliseconds(10)); // Time out used sockets + + connect_job_factory_->set_job_type(TestConnectJob::kMockPendingJob); + + // Startup two mock pending connect jobs, which will sit in the MessageLoop. + + ClientSocketHandle handle; + TestOldCompletionCallback callback; + int rv = handle.Init("a", + params_, + LOWEST, + &callback, + pool_.get(), + BoundNetLog()); + EXPECT_EQ(ERR_IO_PENDING, rv); + EXPECT_EQ(LOAD_STATE_CONNECTING, pool_->GetLoadState("a", &handle)); + + ClientSocketHandle handle2; + TestOldCompletionCallback callback2; + rv = handle2.Init("a", + params_, + LOWEST, + &callback2, + pool_.get(), + BoundNetLog()); + EXPECT_EQ(ERR_IO_PENDING, rv); + EXPECT_EQ(LOAD_STATE_CONNECTING, pool_->GetLoadState("a", &handle2)); + + // Cancel one of the requests. Wait for the other, which will get the first + // job. Release the socket. Run the loop again to make sure the second + // socket is sitting idle and the first one is released (since ReleaseSocket() + // just posts a DoReleaseSocket() task). + + handle.Reset(); + EXPECT_EQ(OK, callback2.WaitForResult()); + // Use the socket. + EXPECT_EQ(1, handle2.socket()->Write(NULL, 1, NULL)); + handle2.Reset(); + + // The idle socket timeout value was set to 10 milliseconds. Wait 20 + // milliseconds so the sockets timeout. + base::PlatformThread::Sleep(20); + MessageLoop::current()->RunAllPending(); + + ASSERT_EQ(2, pool_->IdleSocketCount()); + + // Request a new socket. This should cleanup the unused and timed out ones. + // A new socket will be created rather than reusing the idle one. + CapturingBoundNetLog log(CapturingNetLog::kUnbounded); + rv = handle.Init("a", + params_, + LOWEST, + &callback, + pool_.get(), + log.bound()); + EXPECT_EQ(ERR_IO_PENDING, rv); + EXPECT_EQ(OK, callback.WaitForResult()); + EXPECT_FALSE(handle.is_reused()); + + // Make sure the idle socket is closed + ASSERT_TRUE(pool_->HasGroup("a")); + EXPECT_EQ(0, pool_->IdleSocketCountInGroup("a")); + EXPECT_EQ(1, pool_->NumActiveSocketsInGroup("a")); + + net::CapturingNetLog::EntryList entries; + log.GetEntries(&entries); + EXPECT_FALSE(LogContainsEntryWithType( + entries, 1, NetLog::TYPE_SOCKET_POOL_REUSED_AN_EXISTING_SOCKET)); +} + TEST_F(ClientSocketPoolBaseTest, CleanupTimedOutIdleSockets) { CreatePoolWithIdleTimeouts( kDefaultMaxSockets, kDefaultMaxSocketsPerGroup, -- cgit v1.1