summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/socket/client_socket_pool_base.cc32
-rw-r--r--net/socket/client_socket_pool_base.h9
-rw-r--r--net/socket/client_socket_pool_base_unittest.cc57
3 files changed, 95 insertions, 3 deletions
diff --git a/net/socket/client_socket_pool_base.cc b/net/socket/client_socket_pool_base.cc
index 2428afa..0f13c0f 100644
--- a/net/socket/client_socket_pool_base.cc
+++ b/net/socket/client_socket_pool_base.cc
@@ -352,6 +352,7 @@ bool ClientSocketPoolBaseHelper::HasGroup(const std::string& group_name) const {
void ClientSocketPoolBaseHelper::CloseIdleSockets() {
CleanupIdleSockets(true);
+ DCHECK_EQ(0, idle_socket_count_);
}
int ClientSocketPoolBaseHelper::IdleSocketCountInGroup(
@@ -689,7 +690,9 @@ void ClientSocketPoolBaseHelper::OnIPAddressChanged() {
void ClientSocketPoolBaseHelper::Flush() {
pool_generation_number_++;
+ CancelAllConnectJobs();
CloseIdleSockets();
+ AbortAllRequests();
}
void ClientSocketPoolBaseHelper::RemoveConnectJob(const ConnectJob* job,
@@ -787,6 +790,35 @@ void ClientSocketPoolBaseHelper::CancelAllConnectJobs() {
// Delete group if no longer needed.
if (group->IsEmpty()) {
+ // RemoveGroup() will call .erase() which will invalidate the iterator,
+ // but i will already have been incremented to a valid iterator before
+ // RemoveGroup() is called.
+ RemoveGroup(i++);
+ } else {
+ ++i;
+ }
+ }
+ DCHECK_EQ(0, connecting_socket_count_);
+}
+
+void ClientSocketPoolBaseHelper::AbortAllRequests() {
+ for (GroupMap::iterator i = group_map_.begin(); i != group_map_.end();) {
+ Group* group = i->second;
+
+ RequestQueue pending_requests;
+ pending_requests.swap(*group->mutable_pending_requests());
+ for (RequestQueue::iterator it2 = pending_requests.begin();
+ it2 != pending_requests.end(); ++it2) {
+ const Request* request = *it2;
+ InvokeUserCallbackLater(
+ request->handle(), request->callback(), ERR_ABORTED);
+ }
+
+ // Delete group if no longer needed.
+ if (group->IsEmpty()) {
+ // RemoveGroup() will call .erase() which will invalidate the iterator,
+ // but i will already have been incremented to a valid iterator before
+ // RemoveGroup() is called.
RemoveGroup(i++);
} else {
++i;
diff --git a/net/socket/client_socket_pool_base.h b/net/socket/client_socket_pool_base.h
index 032ce38..2540fde 100644
--- a/net/socket/client_socket_pool_base.h
+++ b/net/socket/client_socket_pool_base.h
@@ -400,11 +400,14 @@ class ClientSocketPoolBaseHelper
// whether or not the socket has previously been used.
void AddIdleSocket(ClientSocket* socket, bool used, Group* group);
- // Iterates through |connect_job_map_|, canceling all ConnectJobs.
- // Afterwards, it iterates through all groups and deletes them if they are no
- // longer needed.
+ // Iterates through |group_map_|, canceling all ConnectJobs and deleting
+ // groups if they are no longer needed.
void CancelAllConnectJobs();
+ // Iterates through |group_map_|, posting ERR_ABORTED callbacks for all
+ // requests, and then deleting groups if they are no longer needed.
+ void AbortAllRequests();
+
// Returns true if we can't create any more sockets due to the total limit.
bool ReachedMaxSocketsLimit() const;
diff --git a/net/socket/client_socket_pool_base_unittest.cc b/net/socket/client_socket_pool_base_unittest.cc
index 71074a9..006d4e81 100644
--- a/net/socket/client_socket_pool_base_unittest.cc
+++ b/net/socket/client_socket_pool_base_unittest.cc
@@ -1933,6 +1933,63 @@ TEST_F(ClientSocketPoolBaseTest, DoNotReuseSocketAfterFlush) {
EXPECT_EQ(ClientSocketHandle::UNUSED, handle.reuse_type());
}
+class ConnectWithinCallback : public CallbackRunner< Tuple1<int> > {
+ public:
+ ConnectWithinCallback(
+ const std::string& group_name,
+ const scoped_refptr<TestSocketParams>& params,
+ const scoped_refptr<TestClientSocketPool>& pool)
+ : group_name_(group_name), params_(params), pool_(pool) {}
+
+ ~ConnectWithinCallback() {}
+
+ virtual void RunWithParams(const Tuple1<int>& params) {
+ callback_.RunWithParams(params);
+ EXPECT_EQ(ERR_IO_PENDING,
+ handle_.Init(group_name_,
+ params_,
+ kDefaultPriority,
+ &nested_callback_,
+ pool_,
+ BoundNetLog()));
+ }
+
+ int WaitForResult() {
+ return callback_.WaitForResult();
+ }
+
+ int WaitForNestedResult() {
+ return nested_callback_.WaitForResult();
+ }
+
+ private:
+ const std::string group_name_;
+ const scoped_refptr<TestSocketParams> params_;
+ const scoped_refptr<TestClientSocketPool> pool_;
+ ClientSocketHandle handle_;
+ TestCompletionCallback callback_;
+ TestCompletionCallback nested_callback_;
+};
+
+TEST_F(ClientSocketPoolBaseTest, AbortAllRequestsOnFlush) {
+ CreatePool(kDefaultMaxSockets, kDefaultMaxSocketsPerGroup);
+
+ // First job will be waiting until it gets aborted.
+ connect_job_factory_->set_job_type(TestConnectJob::kMockWaitingJob);
+
+ ClientSocketHandle handle;
+ ConnectWithinCallback callback("a", params_, pool_);
+ EXPECT_EQ(ERR_IO_PENDING, handle.Init("a", params_, kDefaultPriority,
+ &callback, pool_, BoundNetLog()));
+
+ // Second job will be started during the first callback, and will
+ // asynchronously complete with OK.
+ connect_job_factory_->set_job_type(TestConnectJob::kMockPendingJob);
+ pool_->Flush();
+ EXPECT_EQ(ERR_ABORTED, callback.WaitForResult());
+ EXPECT_EQ(OK, callback.WaitForNestedResult());
+}
+
// Cancel a pending socket request while we're at max sockets,
// and verify that the backup socket firing doesn't cause a crash.
TEST_F(ClientSocketPoolBaseTest, BackupSocketCancelAtMaxSockets) {