summaryrefslogtreecommitdiffstats
path: root/net/socket/client_socket_pool_base.h
diff options
context:
space:
mode:
authorwillchan@chromium.org <willchan@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-06-10 21:30:54 +0000
committerwillchan@chromium.org <willchan@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-06-10 21:30:54 +0000
commit43a21b827aef4bb36eaa4f555696e7d64abc1885 (patch)
tree69d1b40344998b2c84fba498db81c75ee228b090 /net/socket/client_socket_pool_base.h
parenta804fa4829632d13c40296b06864b99b00b3d844 (diff)
downloadchromium_src-43a21b827aef4bb36eaa4f555696e7d64abc1885.zip
chromium_src-43a21b827aef4bb36eaa4f555696e7d64abc1885.tar.gz
chromium_src-43a21b827aef4bb36eaa4f555696e7d64abc1885.tar.bz2
Reland my close on idle socket change (r43882+r44150).
I reverted it the first time because it was suspected of causing a hang on the IO thread. A different CL caused that. When I relanded it previously, the fix for the hang on the IO thread broke this (changed an assertion). I've now deleted that assertion. BUG=32817 Review URL: http://codereview.chromium.org/2716004 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@49444 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net/socket/client_socket_pool_base.h')
-rw-r--r--net/socket/client_socket_pool_base.h21
1 files changed, 16 insertions, 5 deletions
diff --git a/net/socket/client_socket_pool_base.h b/net/socket/client_socket_pool_base.h
index a037b0e..3e1de22 100644
--- a/net/socket/client_socket_pool_base.h
+++ b/net/socket/client_socket_pool_base.h
@@ -237,7 +237,7 @@ class ClientSocketPoolBaseHelper
return connect_job_factory_->ConnectionTimeout();
}
- void enable_backup_jobs() { backup_jobs_enabled_ = true; }
+ void EnableBackupJobs() { backup_jobs_enabled_ = true; }
private:
friend class base::RefCounted<ClientSocketPoolBaseHelper>;
@@ -403,6 +403,13 @@ class ClientSocketPoolBaseHelper
// Called when the backup socket timer fires.
void OnBackupSocketTimerFired(const std::string& group_name);
+ // Closes one idle socket. Picks the first one encountered.
+ // TODO(willchan): Consider a better algorithm for doing this. Perhaps we
+ // should keep an ordered list of idle sockets, and close them in order.
+ // Requires maintaining more state. It's not clear if it's worth it since
+ // I'm not sure if we hit this situation often.
+ void CloseOneIdleSocket();
+
GroupMap group_map_;
// Timer used to periodically prune idle sockets that timed out or can't be
@@ -442,9 +449,13 @@ class ClientSocketPoolBaseHelper
// |max_sockets_per_group_| limit. So choosing the next request involves
// selecting the highest priority request across *all* groups.
//
- // Since reaching the maximum number of sockets is an edge case, we make note
- // of when it happens, and thus avoid doing the slower "scan all groups"
- // in the common case.
+ // |may_have_stalled_group_| is not conclusive, since when we cancel pending
+ // requests, we may reach the situation where we have the maximum number of
+ // sockets, but no request is stalled because of the global socket limit
+ // (although some requests may be blocked on the socket per group limit).
+ // We don't strictly maintain |may_have_stalled_group_|, since that would
+ // require a linear search through all groups in |group_map_| to see if one
+ // of them is stalled.
bool may_have_stalled_group_;
const scoped_ptr<ConnectJobFactory> connect_job_factory_;
@@ -594,7 +605,7 @@ class ClientSocketPoolBase {
return histograms_;
}
- void enable_backup_jobs() { helper_->enable_backup_jobs(); }
+ void EnableBackupJobs() { helper_->EnableBackupJobs(); }
void Flush() { helper_->Flush(); }