summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorukai@chromium.org <ukai@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2009-11-13 02:30:38 +0000
committerukai@chromium.org <ukai@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2009-11-13 02:30:38 +0000
commitc6f27df9b6d00653f079a03bf805aa7645ab3b22 (patch)
tree290f394d8d4e9eae89abfece8d0529439cb25b61
parent6a37279aca0ed7e894894042de4b226f46af6bf6 (diff)
downloadchromium_src-c6f27df9b6d00653f079a03bf805aa7645ab3b22.zip
chromium_src-c6f27df9b6d00653f079a03bf805aa7645ab3b22.tar.gz
chromium_src-c6f27df9b6d00653f079a03bf805aa7645ab3b22.tar.bz2
Plumb LoadLog into SocketStream
BUG=none TEST=none Review URL: http://codereview.chromium.org/385003 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@31881 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r--net/base/load_log_event_type_list.h14
-rw-r--r--net/net.gyp5
-rw-r--r--net/socket_stream/socket_stream.cc62
-rw-r--r--net/socket_stream/socket_stream.h12
-rw-r--r--net/socket_stream/socket_stream_unittest.cc12
-rw-r--r--net/url_request/request_tracker.h124
-rw-r--r--net/url_request/request_tracker_unittest.cc (renamed from net/url_request/url_request_tracker_unittest.cc)86
-rw-r--r--net/url_request/url_request.cc13
-rw-r--r--net/url_request/url_request.h11
-rw-r--r--net/url_request/url_request_context.h17
-rw-r--r--net/url_request/url_request_tracker.cc85
-rw-r--r--net/url_request/url_request_tracker.h76
-rw-r--r--net/url_request/url_request_view_net_internals_job.cc61
13 files changed, 362 insertions, 216 deletions
diff --git a/net/base/load_log_event_type_list.h b/net/base/load_log_event_type_list.h
index da15647..e5af830 100644
--- a/net/base/load_log_event_type_list.h
+++ b/net/base/load_log_event_type_list.h
@@ -157,3 +157,17 @@ EVENT_TYPE(HTTP_TRANSACTION_READ_BODY)
// Measures the time taken to read the response out of the socket before
// restarting for authentication, on keep alive connections.
EVENT_TYPE(HTTP_TRANSACTION_DRAIN_BODY_FOR_AUTH_RESTART)
+
+// ------------------------------------------------------------------------
+// SocketStream
+// ------------------------------------------------------------------------
+
+// Measures the time between SocketStream::Connect() and
+// SocketStream::DidEstablishConnection()
+EVENT_TYPE(SOCKET_STREAM_CONNECT)
+
+// A message sent on the SocketStream.
+EVENT_TYPE(SOCKET_STREAM_SENT)
+
+// A message received on the SocketStream.
+EVENT_TYPE(SOCKET_STREAM_RECEIVED)
diff --git a/net/net.gyp b/net/net.gyp
index 9678a3a..0dbd0ab 100644
--- a/net/net.gyp
+++ b/net/net.gyp
@@ -439,6 +439,7 @@
'socket_stream/socket_stream_throttle.h',
'third_party/parseftp/ParseFTPList.cpp',
'third_party/parseftp/ParseFTPList.h',
+ 'url_request/request_tracker.h',
'url_request/url_request.cc',
'url_request/url_request.h',
'url_request/url_request_about_job.cc',
@@ -477,8 +478,6 @@
'url_request/url_request_status.h',
'url_request/url_request_test_job.cc',
'url_request/url_request_test_job.h',
- 'url_request/url_request_tracker.cc',
- 'url_request/url_request_tracker.h',
'url_request/url_request_view_net_internals_job.cc',
'url_request/url_request_view_net_internals_job.h',
'url_request/view_cache_helper.cc',
@@ -652,7 +651,7 @@
'socket/tcp_client_socket_unittest.cc',
'socket/tcp_pinger_unittest.cc',
'socket_stream/socket_stream_unittest.cc',
- 'url_request/url_request_tracker_unittest.cc',
+ 'url_request/request_tracker_unittest.cc',
'url_request/url_request_unittest.cc',
'url_request/url_request_unittest.h',
'websockets/websocket_throttle_unittest.cc',
diff --git a/net/socket_stream/socket_stream.cc b/net/socket_stream/socket_stream.cc
index 8ca8489..6a5285c 100644
--- a/net/socket_stream/socket_stream.cc
+++ b/net/socket_stream/socket_stream.cc
@@ -29,6 +29,7 @@
#include "net/socket_stream/socket_stream_throttle.h"
#include "net/url_request/url_request.h"
+static const int kMaxNumLoadLogEntries = 50;
static const int kMaxPendingSendAllowed = 32768; // 32 kilobytes.
static const int kReadBufferSize = 4096;
@@ -39,7 +40,8 @@ void SocketStream::ResponseHeaders::Realloc(size_t new_size) {
}
SocketStream::SocketStream(const GURL& url, Delegate* delegate)
- : url_(url),
+ : load_log_(new net::LoadLog(kMaxNumLoadLogEntries)),
+ url_(url),
delegate_(delegate),
max_pending_send_allowed_(kMaxPendingSendAllowed),
next_state_(STATE_NONE),
@@ -60,7 +62,9 @@ SocketStream::SocketStream(const GURL& url, Delegate* delegate)
throttle_(
SocketStreamThrottle::GetSocketStreamThrottleForScheme(
url.scheme())),
- metrics_(new SocketStreamMetrics(url)) {
+ metrics_(new SocketStreamMetrics(url)),
+ ALLOW_THIS_IN_INITIALIZER_LIST(
+ request_tracker_node_(this)) {
DCHECK(MessageLoop::current()) <<
"The current MessageLoop must exist";
DCHECK_EQ(MessageLoop::TYPE_IO, MessageLoop::current()->type()) <<
@@ -70,6 +74,7 @@ SocketStream::SocketStream(const GURL& url, Delegate* delegate)
}
SocketStream::~SocketStream() {
+ set_context(NULL);
DCHECK(!delegate_);
}
@@ -86,8 +91,20 @@ void SocketStream::SetUserData(const void* key, UserData* data) {
}
void SocketStream::set_context(URLRequestContext* context) {
+ scoped_refptr<URLRequestContext> prev_context = context_;
+
context_ = context;
- host_resolver_ = context_->host_resolver();
+
+ if (prev_context != context) {
+ if (prev_context)
+ prev_context->socket_stream_tracker()->Remove(this);
+ if (context)
+ context->socket_stream_tracker()->Add(this);
+ }
+
+ if (context_)
+ host_resolver_ = context_->host_resolver();
+
}
void SocketStream::Connect() {
@@ -102,6 +119,7 @@ void SocketStream::Connect() {
// Open a connection asynchronously, so that delegate won't be called
// back before returning Connect().
next_state_ = STATE_RESOLVE_PROXY;
+ LoadLog::BeginEvent(load_log_, LoadLog::TYPE_SOCKET_STREAM_CONNECT);
MessageLoop::current()->PostTask(
FROM_HERE,
NewRunnableMethod(this, &SocketStream::DoLoop, OK));
@@ -187,6 +205,7 @@ void SocketStream::DetachDelegate() {
if (!delegate_)
return;
delegate_ = NULL;
+ LoadLog::AddEvent(load_log_, LoadLog::TYPE_CANCELLED);
Close();
}
@@ -195,7 +214,9 @@ void SocketStream::Finish(int result) {
"The current MessageLoop must exist";
DCHECK_EQ(MessageLoop::TYPE_IO, MessageLoop::current()->type()) <<
"The current MessageLoop must be TYPE_IO";
- DCHECK_LT(result, 0);
+ DCHECK_LE(result, OK);
+ if (result == OK)
+ result = ERR_CONNECTION_CLOSED;
DCHECK_EQ(next_state_, STATE_NONE);
DLOG(INFO) << "Finish result=" << net::ErrorToString(result);
if (delegate_)
@@ -234,6 +255,7 @@ int SocketStream::DidEstablishConnection() {
next_state_ = STATE_READ_WRITE;
metrics_->OnConnected();
+ LoadLog::EndEvent(load_log_, LoadLog::TYPE_SOCKET_STREAM_CONNECT);
if (delegate_)
delegate_->OnConnected(this, max_pending_send_allowed_);
@@ -243,6 +265,7 @@ int SocketStream::DidEstablishConnection() {
int SocketStream::DidReceiveData(int result) {
DCHECK(read_buf_);
DCHECK_GT(result, 0);
+ LoadLog::AddEvent(load_log_, LoadLog::TYPE_SOCKET_STREAM_RECEIVED);
int len = result;
metrics_->OnRead(len);
result = throttle_->OnRead(this, read_buf_->data(), len, &io_callback_);
@@ -256,6 +279,7 @@ int SocketStream::DidReceiveData(int result) {
int SocketStream::DidSendData(int result) {
DCHECK_GT(result, 0);
+ LoadLog::AddEvent(load_log_, LoadLog::TYPE_SOCKET_STREAM_SENT);
int len = result;
metrics_->OnWrite(len);
result = throttle_->OnWrite(this, current_write_buf_->data(), len,
@@ -364,8 +388,6 @@ void SocketStream::DoLoop(int result) {
break;
case STATE_CLOSE:
DCHECK_LE(result, OK);
- if (result == OK)
- result = ERR_CONNECTION_CLOSED;
Finish(result);
return;
default:
@@ -373,6 +395,12 @@ void SocketStream::DoLoop(int result) {
Finish(result);
return;
}
+ // If the connection is not established yet and had actual errors,
+ // close the connection.
+ if (state != STATE_READ_WRITE && result < ERR_IO_PENDING) {
+ DCHECK_EQ(next_state_, STATE_CLOSE);
+ LoadLog::EndEvent(load_log_, LoadLog::TYPE_SOCKET_STREAM_CONNECT);
+ }
} while (result != ERR_IO_PENDING);
}
@@ -381,7 +409,7 @@ int SocketStream::DoResolveProxy() {
next_state_ = STATE_RESOLVE_PROXY_COMPLETE;
return proxy_service()->ResolveProxy(
- url_, &proxy_info_, &io_callback_, &pac_request_, NULL);
+ url_, &proxy_info_, &io_callback_, &pac_request_, load_log_);
}
int SocketStream::DoResolveProxyComplete(int result) {
@@ -424,7 +452,8 @@ int SocketStream::DoResolveHost() {
DCHECK(host_resolver_.get());
resolver_.reset(new SingleRequestHostResolver(host_resolver_.get()));
- return resolver_->Resolve(resolve_info, &addresses_, &io_callback_, NULL);
+ return resolver_->Resolve(resolve_info, &addresses_, &io_callback_,
+ load_log_);
}
int SocketStream::DoResolveHostComplete(int result) {
@@ -444,9 +473,8 @@ int SocketStream::DoTcpConnect() {
next_state_ = STATE_TCP_CONNECT_COMPLETE;
DCHECK(factory_);
socket_.reset(factory_->CreateTCPClientSocket(addresses_));
- // TODO(willchan): Plumb LoadLog into SocketStream.
metrics_->OnStartConnection();
- return socket_->Connect(&io_callback_, NULL);
+ return socket_->Connect(&io_callback_, load_log_);
}
int SocketStream::DoTcpConnectComplete(int result) {
@@ -654,9 +682,8 @@ int SocketStream::DoSOCKSConnect() {
else
s = new SOCKSClientSocket(s, req_info, host_resolver_.get());
socket_.reset(s);
- // TODO(willchan): Plumb LoadLog into SocketStream.
metrics_->OnSOCKSProxy();
- return socket_->Connect(&io_callback_, NULL);
+ return socket_->Connect(&io_callback_, load_log_);
}
int SocketStream::DoSOCKSConnectComplete(int result) {
@@ -667,6 +694,8 @@ int SocketStream::DoSOCKSConnectComplete(int result) {
next_state_ = STATE_SSL_CONNECT;
else
result = DidEstablishConnection();
+ } else {
+ next_state_ = STATE_CLOSE;
}
return result;
}
@@ -676,9 +705,8 @@ int SocketStream::DoSSLConnect() {
socket_.reset(factory_->CreateSSLClientSocket(
socket_.release(), url_.HostNoBrackets(), ssl_config_));
next_state_ = STATE_SSL_CONNECT_COMPLETE;
- // TODO(willchan): Plumb LoadLog into SocketStream.
metrics_->OnSSLConnection();
- return socket_->Connect(&io_callback_, NULL);
+ return socket_->Connect(&io_callback_, load_log_);
}
int SocketStream::DoSSLConnectComplete(int result) {
@@ -848,4 +876,10 @@ ProxyService* SocketStream::proxy_service() const {
return context_->proxy_service();
}
+void SocketStream::GetInfoForTracker(
+ RequestTracker<SocketStream>::RecentRequestInfo* info) const {
+ info->original_url = url_;
+ info->load_log = load_log_;
+}
+
} // namespace net
diff --git a/net/socket_stream/socket_stream.h b/net/socket_stream/socket_stream.h
index ad06be0..1892022 100644
--- a/net/socket_stream/socket_stream.h
+++ b/net/socket_stream/socket_stream.h
@@ -22,6 +22,7 @@
#include "net/http/http_auth_handler.h"
#include "net/proxy/proxy_service.h"
#include "net/socket/tcp_client_socket.h"
+#include "net/url_request/request_tracker.h"
#include "net/url_request/url_request_context.h"
namespace net {
@@ -29,6 +30,7 @@ namespace net {
class AuthChallengeInfo;
class ClientSocketFactory;
class HostResolver;
+class LoadLog;
class SSLConfigService;
class SingleRequestHostResolver;
class SocketStreamMetrics;
@@ -105,6 +107,8 @@ class SocketStream : public base::RefCountedThreadSafe<SocketStream> {
URLRequestContext* context() const { return context_.get(); }
void set_context(URLRequestContext* context);
+ LoadLog* load_log() const { return load_log_; }
+
// Opens the connection on the IO thread.
// Once the connection is established, calls delegate's OnConnected.
void Connect();
@@ -195,6 +199,7 @@ class SocketStream : public base::RefCountedThreadSafe<SocketStream> {
};
typedef std::deque< scoped_refptr<IOBufferWithSize> > PendingDataQueue;
+ friend class RequestTracker<SocketStream>;
friend class base::RefCountedThreadSafe<SocketStream>;
~SocketStream();
@@ -246,6 +251,11 @@ class SocketStream : public base::RefCountedThreadSafe<SocketStream> {
SSLConfigService* ssl_config_service() const;
ProxyService* proxy_service() const;
+ void GetInfoForTracker(
+ RequestTracker<SocketStream>::RecentRequestInfo *info) const;
+
+ scoped_refptr<LoadLog> load_log_;
+
GURL url_;
Delegate* delegate_;
int max_pending_send_allowed_;
@@ -306,6 +316,8 @@ class SocketStream : public base::RefCountedThreadSafe<SocketStream> {
scoped_ptr<SocketStreamMetrics> metrics_;
+ RequestTracker<SocketStream>::Node request_tracker_node_;
+
DISALLOW_COPY_AND_ASSIGN(SocketStream);
};
diff --git a/net/socket_stream/socket_stream_unittest.cc b/net/socket_stream/socket_stream_unittest.cc
index fa44afb..3f0997c 100644
--- a/net/socket_stream/socket_stream_unittest.cc
+++ b/net/socket_stream/socket_stream_unittest.cc
@@ -5,6 +5,8 @@
#include <string>
#include <vector>
+#include "net/base/load_log.h"
+#include "net/base/load_log_unittest.h"
#include "net/base/mock_host_resolver.h"
#include "net/base/test_completion_callback.h"
#include "net/socket/socket_test_util.h"
@@ -205,6 +207,16 @@ TEST_F(SocketStreamTest, BasicAuthProxy) {
EXPECT_EQ(SocketStreamEvent::EVENT_AUTH_REQUIRED, events[0].event_type);
EXPECT_EQ(SocketStreamEvent::EVENT_CONNECTED, events[1].event_type);
EXPECT_EQ(SocketStreamEvent::EVENT_CLOSE, events[2].event_type);
+
+ // The first and last entries of the LoadLog should be for
+ // SOCKET_STREAM_CONNECT.
+ ExpectLogContains(socket_stream->load_log(), 0,
+ LoadLog::TYPE_SOCKET_STREAM_CONNECT,
+ LoadLog::PHASE_BEGIN);
+ ExpectLogContains(socket_stream->load_log(),
+ socket_stream->load_log()->events().size() - 1,
+ LoadLog::TYPE_SOCKET_STREAM_CONNECT,
+ LoadLog::PHASE_END);
}
} // namespace net
diff --git a/net/url_request/request_tracker.h b/net/url_request/request_tracker.h
new file mode 100644
index 0000000..f4e2425
--- /dev/null
+++ b/net/url_request/request_tracker.h
@@ -0,0 +1,124 @@
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NET_URL_REQUEST_REQUEST_TRACKER_H_
+#define NET_URL_REQUEST_REQUEST_TRACKER_H_
+
+#include <vector>
+
+#include "base/ref_counted.h"
+#include "base/linked_list.h"
+#include "base/logging.h"
+#include "googleurl/src/gurl.h"
+#include "net/base/load_log.h"
+
+// Class to track all of the live instances of Request associated with a
+// particular URLRequestContext. It keeps a circular queue of the LoadLogs
+// for recently deceased requests.
+template<typename Request>
+class RequestTracker {
+ public:
+ struct RecentRequestInfo {
+ GURL original_url;
+ scoped_refptr<net::LoadLog> load_log;
+ };
+
+ // Helper class to make Request insertable into a base::LinkedList,
+ // without making the public interface expose base::LinkNode.
+ class Node : public base::LinkNode<Node> {
+ public:
+ Node(Request* request) : request_(request) {}
+ ~Node() {}
+
+ Request* request() const { return request_; }
+
+ private:
+ Request* request_;
+ };
+
+ typedef std::vector<RecentRequestInfo> RecentRequestInfoList;
+
+ // The maximum number of entries for |graveyard_|.
+ static const size_t kMaxGraveyardSize;
+
+ // The maximum size of URLs to stuff into RecentRequestInfo.
+ static const size_t kMaxGraveyardURLSize;
+
+ RequestTracker() : next_graveyard_index_(0) {}
+ ~RequestTracker() {}
+
+ // Returns a list of Requests that are alive.
+ std::vector<Request*> GetLiveRequests() {
+ std::vector<Request*> list;
+ for (base::LinkNode<Node>* node = live_instances_.head();
+ node != live_instances_.end();
+ node = node->next()) {
+ Request* request = node->value()->request();
+ list.push_back(request);
+ }
+ return list;
+ }
+
+ // Clears the circular buffer of RecentRequestInfos.
+ void ClearRecentlyDeceased() {
+ next_graveyard_index_ = 0;
+ graveyard_.clear();
+ }
+
+ // Returns a list of recently completed Requests.
+ const RecentRequestInfoList GetRecentlyDeceased() {
+ RecentRequestInfoList list;
+
+ // Copy the items from |graveyard_| (our circular queue of recently
+ // deceased request infos) into a vector, ordered from oldest to newest.
+ for (size_t i = 0; i < graveyard_.size(); ++i) {
+ size_t index = (next_graveyard_index_ + i) % graveyard_.size();
+ list.push_back(graveyard_[index]);
+ }
+ return list;
+ }
+
+ void Add(Request* request) {
+ live_instances_.Append(&request->request_tracker_node_);
+ }
+ void Remove(Request* request) {
+ // Remove from |live_instances_|.
+ request->request_tracker_node_.RemoveFromList();
+
+ RecentRequestInfo info;
+ request->GetInfoForTracker(&info);
+ // Paranoia check: truncate |info.original_url| if it is really big.
+ const std::string& spec = info.original_url.possibly_invalid_spec();
+ if (spec.size() > kMaxGraveyardURLSize)
+ info.original_url = GURL(spec.substr(0, kMaxGraveyardURLSize));
+ // Add into |graveyard_|.
+ InsertIntoGraveyard(info);
+ }
+
+ private:
+ void InsertIntoGraveyard(const RecentRequestInfo& info) {
+ if (graveyard_.size() < kMaxGraveyardSize) {
+ // Still growing to maximum capacity.
+ DCHECK_EQ(next_graveyard_index_, graveyard_.size());
+ graveyard_.push_back(info);
+ } else {
+ // At maximum capacity, overwite the oldest entry.
+ graveyard_[next_graveyard_index_] = info;
+ }
+ next_graveyard_index_ = (next_graveyard_index_ + 1) % kMaxGraveyardSize;
+ }
+
+ base::LinkedList<Node> live_instances_;
+
+ size_t next_graveyard_index_;
+ RecentRequestInfoList graveyard_;
+};
+
+template<typename Request>
+const size_t RequestTracker<Request>::kMaxGraveyardSize = 25;
+
+template<typename Request>
+const size_t RequestTracker<Request>::kMaxGraveyardURLSize = 1000;
+
+#endif // NET_URL_REQUEST_REQUEST_TRACKER_H_
diff --git a/net/url_request/url_request_tracker_unittest.cc b/net/url_request/request_tracker_unittest.cc
index 53196ac..c3c41f4 100644
--- a/net/url_request/url_request_tracker_unittest.cc
+++ b/net/url_request/request_tracker_unittest.cc
@@ -2,24 +2,57 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "net/url_request/url_request_tracker.h"
+#include "net/url_request/request_tracker.h"
+#include "base/compiler_specific.h"
#include "base/string_util.h"
-#include "net/url_request/url_request.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace {
-TEST(URLRequestTrackerTest, Basic) {
- URLRequestTracker tracker;
+static const int kMaxNumLoadLogEntries = 1;
+
+class TestRequest {
+ public:
+ explicit TestRequest(const GURL& url)
+ : url_(url),
+ load_log_(new net::LoadLog(kMaxNumLoadLogEntries)),
+ ALLOW_THIS_IN_INITIALIZER_LIST(request_tracker_node_(this)) {}
+ ~TestRequest() {}
+
+ // This method is used in RequestTrackerTest::Basic test.
+ const GURL& original_url() const { return url_; }
+
+ private:
+ // RequestTracker<T> will access GetRecentRequestInfo() and
+ // |request_tracker_node_|.
+ friend class RequestTracker<TestRequest>;
+
+ void GetInfoForTracker(
+ RequestTracker<TestRequest>::RecentRequestInfo *info) const {
+ info->original_url = url_;
+ info->load_log = load_log_;
+ }
+
+ const GURL url_;
+ scoped_refptr<net::LoadLog> load_log_;
+
+ RequestTracker<TestRequest>::Node request_tracker_node_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestRequest);
+};
+
+
+TEST(RequestTrackerTest, Basic) {
+ RequestTracker<TestRequest> tracker;
EXPECT_EQ(0u, tracker.GetLiveRequests().size());
EXPECT_EQ(0u, tracker.GetRecentlyDeceased().size());
- URLRequest req1(GURL("http://req1"), NULL);
- URLRequest req2(GURL("http://req2"), NULL);
- URLRequest req3(GURL("http://req3"), NULL);
- URLRequest req4(GURL("http://req4"), NULL);
- URLRequest req5(GURL("http://req5"), NULL);
+ TestRequest req1(GURL("http://req1"));
+ TestRequest req2(GURL("http://req2"));
+ TestRequest req3(GURL("http://req3"));
+ TestRequest req4(GURL("http://req4"));
+ TestRequest req5(GURL("http://req5"));
tracker.Add(&req1);
tracker.Add(&req2);
@@ -27,7 +60,7 @@ TEST(URLRequestTrackerTest, Basic) {
tracker.Add(&req4);
tracker.Add(&req5);
- std::vector<URLRequest*> live_reqs = tracker.GetLiveRequests();
+ std::vector<TestRequest*> live_reqs = tracker.GetLiveRequests();
ASSERT_EQ(5u, live_reqs.size());
EXPECT_EQ(GURL("http://req1"), live_reqs[0]->original_url());
@@ -49,40 +82,43 @@ TEST(URLRequestTrackerTest, Basic) {
EXPECT_EQ(GURL("http://req4"), live_reqs[1]->original_url());
}
-TEST(URLRequestTrackerTest, GraveyardBounded) {
- URLRequestTracker tracker;
+TEST(RequestTrackerTest, GraveyardBounded) {
+ RequestTracker<TestRequest> tracker;
EXPECT_EQ(0u, tracker.GetLiveRequests().size());
EXPECT_EQ(0u, tracker.GetRecentlyDeceased().size());
// Add twice as many requests as will fit in the graveyard.
- for (size_t i = 0; i < URLRequestTracker::kMaxGraveyardSize * 2; ++i) {
- URLRequest req(GURL(StringPrintf("http://req%d", i).c_str()), NULL);
+ for (size_t i = 0;
+ i < RequestTracker<TestRequest>::kMaxGraveyardSize * 2;
+ ++i) {
+ TestRequest req(GURL(StringPrintf("http://req%d", i).c_str()));
tracker.Add(&req);
tracker.Remove(&req);
}
// Check that only the last |kMaxGraveyardSize| requests are in-memory.
- URLRequestTracker::RecentRequestInfoList recent_reqs =
+ RequestTracker<TestRequest>::RecentRequestInfoList recent_reqs =
tracker.GetRecentlyDeceased();
- ASSERT_EQ(URLRequestTracker::kMaxGraveyardSize, recent_reqs.size());
+ ASSERT_EQ(RequestTracker<TestRequest>::kMaxGraveyardSize, recent_reqs.size());
- for (size_t i = 0; i < URLRequestTracker::kMaxGraveyardSize; ++i) {
- size_t req_number = i + URLRequestTracker::kMaxGraveyardSize;
+ for (size_t i = 0; i < RequestTracker<TestRequest>::kMaxGraveyardSize; ++i) {
+ size_t req_number = i + RequestTracker<TestRequest>::kMaxGraveyardSize;
GURL url(StringPrintf("http://req%d", req_number).c_str());
EXPECT_EQ(url, recent_reqs[i].original_url);
}
}
// Check that very long URLs are truncated.
-TEST(URLRequestTrackerTest, GraveyardURLBounded) {
- URLRequestTracker tracker;
+TEST(RequestTrackerTest, GraveyardURLBounded) {
+ RequestTracker<TestRequest> tracker;
std::string big_url_spec("http://");
- big_url_spec.resize(2 * URLRequestTracker::kMaxGraveyardURLSize, 'x');
+ big_url_spec.resize(2 * RequestTracker<TestRequest>::kMaxGraveyardURLSize,
+ 'x');
GURL big_url(big_url_spec);
- URLRequest req(big_url, NULL);
+ TestRequest req(big_url);
tracker.Add(&req);
tracker.Remove(&req);
@@ -90,20 +126,20 @@ TEST(URLRequestTrackerTest, GraveyardURLBounded) {
ASSERT_EQ(1u, tracker.GetRecentlyDeceased().size());
// The +1 is because GURL canonicalizes with a trailing '/' ... maybe
// we should just save the std::string rather than the GURL.
- EXPECT_EQ(URLRequestTracker::kMaxGraveyardURLSize + 1,
+ EXPECT_EQ(RequestTracker<TestRequest>::kMaxGraveyardURLSize + 1,
tracker.GetRecentlyDeceased()[0].original_url.spec().size());
}
// Test the doesn't fail if the URL was invalid. http://crbug.com/21423.
TEST(URLRequestTrackerTest, TrackingInvalidURL) {
- URLRequestTracker tracker;
+ RequestTracker<TestRequest> tracker;
EXPECT_EQ(0u, tracker.GetLiveRequests().size());
EXPECT_EQ(0u, tracker.GetRecentlyDeceased().size());
GURL invalid_url("xabc");
EXPECT_FALSE(invalid_url.is_valid());
- URLRequest req(invalid_url, NULL);
+ TestRequest req(invalid_url);
tracker.Add(&req);
tracker.Remove(&req);
diff --git a/net/url_request/url_request.cc b/net/url_request/url_request.cc
index 96fb051..9577a4a 100644
--- a/net/url_request/url_request.cc
+++ b/net/url_request/url_request.cc
@@ -49,7 +49,7 @@ URLRequest::URLRequest(const GURL& url, Delegate* delegate)
redirect_limit_(kMaxRedirects),
final_upload_progress_(0),
priority_(0),
- ALLOW_THIS_IN_INITIALIZER_LIST(url_request_tracker_node_(this)) {
+ ALLOW_THIS_IN_INITIALIZER_LIST(request_tracker_node_(this)) {
SIMPLE_STATS_COUNTER("URLRequestCount");
// Sanity check out environment.
@@ -495,9 +495,9 @@ void URLRequest::set_context(URLRequestContext* context) {
// If the context this request belongs to has changed, update the tracker(s).
if (prev_context != context) {
if (prev_context)
- prev_context->request_tracker()->Remove(this);
+ prev_context->url_request_tracker()->Remove(this);
if (context)
- context->request_tracker()->Add(this);
+ context->url_request_tracker()->Add(this);
}
}
@@ -519,3 +519,10 @@ URLRequest::UserData* URLRequest::GetUserData(const void* key) const {
void URLRequest::SetUserData(const void* key, UserData* data) {
user_data_[key] = linked_ptr<UserData>(data);
}
+
+void URLRequest::GetInfoForTracker(
+ RequestTracker<URLRequest>::RecentRequestInfo* info) const {
+ DCHECK(info);
+ info->original_url = original_url_;
+ info->load_log = load_log_;
+}
diff --git a/net/url_request/url_request.h b/net/url_request/url_request.h
index e6aaf47..bd07767 100644
--- a/net/url_request/url_request.h
+++ b/net/url_request/url_request.h
@@ -19,8 +19,8 @@
#include "net/base/load_log.h"
#include "net/base/load_states.h"
#include "net/http/http_response_info.h"
+#include "net/url_request/request_tracker.h"
#include "net/url_request/url_request_status.h"
-#include "net/url_request/url_request_tracker.h"
namespace base {
class Time;
@@ -528,7 +528,7 @@ class URLRequest {
private:
friend class URLRequestJob;
- friend class URLRequestTracker;
+ friend class RequestTracker<URLRequest>;
void StartJob(URLRequestJob* job);
@@ -550,6 +550,11 @@ class URLRequest {
// Origin).
static std::string StripPostSpecificHeaders(const std::string& headers);
+ // Gets the goodies out of this that we want to show the user later on the
+ // chrome://net-internals/ page.
+ void GetInfoForTracker(
+ RequestTracker<URLRequest>::RecentRequestInfo *info) const;
+
// Contextual information used for this request (can be NULL). This contains
// most of the dependencies which are shared between requests (disk cache,
// cookie store, socket poool, etc.)
@@ -603,7 +608,7 @@ class URLRequest {
// this to determine which URLRequest to allocate sockets to first.
int priority_;
- URLRequestTracker::Node url_request_tracker_node_;
+ RequestTracker<URLRequest>::Node request_tracker_node_;
base::LeakTracker<URLRequest> leak_tracker_;
DISALLOW_COPY_AND_ASSIGN(URLRequest);
diff --git a/net/url_request/url_request_context.h b/net/url_request/url_request_context.h
index 67b378a..f02bc43 100644
--- a/net/url_request/url_request_context.h
+++ b/net/url_request/url_request_context.h
@@ -19,11 +19,12 @@
#include "net/base/strict_transport_security_state.h"
#include "net/ftp/ftp_auth_cache.h"
#include "net/proxy/proxy_service.h"
-#include "net/url_request/url_request_tracker.h"
+#include "net/url_request/request_tracker.h"
namespace net {
class FtpTransactionFactory;
class HttpTransactionFactory;
+class SocketStream;
}
class URLRequest;
@@ -81,7 +82,14 @@ class URLRequestContext :
const std::string& accept_language() const { return accept_language_; }
// Gets the tracker for URLRequests associated with this context.
- URLRequestTracker* request_tracker() { return &request_tracker_; }
+ RequestTracker<URLRequest>* url_request_tracker() {
+ return &url_request_tracker_;
+ }
+
+ // Gets the tracker for SocketStreams associated with this context.
+ RequestTracker<net::SocketStream>* socket_stream_tracker() {
+ return &socket_stream_tracker_;
+ }
// Gets the UA string to use for the given URL. Pass an invalid URL (such as
// GURL()) to get the default UA string. Subclasses should override this
@@ -135,7 +143,10 @@ class URLRequestContext :
std::string referrer_charset_;
// Tracks the requests associated with this context.
- URLRequestTracker request_tracker_;
+ RequestTracker<URLRequest> url_request_tracker_;
+
+ // Trakcs the socket streams associated with this context.
+ RequestTracker<net::SocketStream> socket_stream_tracker_;
private:
DISALLOW_COPY_AND_ASSIGN(URLRequestContext);
diff --git a/net/url_request/url_request_tracker.cc b/net/url_request/url_request_tracker.cc
deleted file mode 100644
index 14e5bc3..0000000
--- a/net/url_request/url_request_tracker.cc
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "net/url_request/url_request_tracker.h"
-
-#include "base/logging.h"
-#include "net/url_request/url_request.h"
-
-const size_t URLRequestTracker::kMaxGraveyardSize = 25;
-const size_t URLRequestTracker::kMaxGraveyardURLSize = 1000;
-
-URLRequestTracker::URLRequestTracker() : next_graveyard_index_(0) {}
-
-URLRequestTracker::~URLRequestTracker() {}
-
-std::vector<URLRequest*> URLRequestTracker::GetLiveRequests() {
- std::vector<URLRequest*> list;
- for (base::LinkNode<Node>* node = live_instances_.head();
- node != live_instances_.end();
- node = node->next()) {
- URLRequest* url_request = node->value()->url_request();
- list.push_back(url_request);
- }
- return list;
-}
-
-void URLRequestTracker::ClearRecentlyDeceased() {
- next_graveyard_index_ = 0;
- graveyard_.clear();
-}
-
-const URLRequestTracker::RecentRequestInfoList
-URLRequestTracker::GetRecentlyDeceased() {
- RecentRequestInfoList list;
-
- // Copy the items from |graveyard_| (our circular queue of recently
- // deceased request infos) into a vector, ordered from oldest to
- // newest.
- for (size_t i = 0; i < graveyard_.size(); ++i) {
- size_t index = (next_graveyard_index_ + i) % graveyard_.size();
- list.push_back(graveyard_[index]);
- }
- return list;
-}
-
-void URLRequestTracker::Add(URLRequest* url_request) {
- live_instances_.Append(&url_request->url_request_tracker_node_);
-}
-
-void URLRequestTracker::Remove(URLRequest* url_request) {
- // Remove from |live_instances_|.
- url_request->url_request_tracker_node_.RemoveFromList();
-
- // Add into |graveyard_|.
- InsertIntoGraveyard(ExtractInfo(url_request));
-}
-
-// static
-const URLRequestTracker::RecentRequestInfo
-URLRequestTracker::ExtractInfo(URLRequest* url_request) {
- RecentRequestInfo info;
- info.original_url = url_request->original_url();
- info.load_log = url_request->load_log();
-
- // Paranoia check: truncate |info.original_url| if it is really big.
- const std::string& spec = info.original_url.possibly_invalid_spec();
- if (spec.size() > kMaxGraveyardURLSize)
- info.original_url = GURL(spec.substr(0, kMaxGraveyardURLSize));
- return info;
-}
-
-void URLRequestTracker::InsertIntoGraveyard(
- const RecentRequestInfo& info) {
- if (graveyard_.size() < kMaxGraveyardSize) {
- // Still growing to maximum capacity.
- DCHECK_EQ(next_graveyard_index_, graveyard_.size());
- graveyard_.push_back(info);
- } else {
- // At maximum capacity, overwrite the oldest entry.
- graveyard_[next_graveyard_index_] = info;
- }
-
- next_graveyard_index_ = (next_graveyard_index_ + 1) % kMaxGraveyardSize;
-}
diff --git a/net/url_request/url_request_tracker.h b/net/url_request/url_request_tracker.h
deleted file mode 100644
index 36d05d1..0000000
--- a/net/url_request/url_request_tracker.h
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef NET_URL_REQUEST_URL_REQUEST_TRACKER_H_
-#define NET_URL_REQUEST_URL_REQUEST_TRACKER_H_
-
-#include <vector>
-
-#include "base/ref_counted.h"
-#include "base/linked_list.h"
-#include "googleurl/src/gurl.h"
-#include "net/base/load_log.h"
-
-class URLRequest;
-
-// Class to track all of the live instances of URLRequest associated with a
-// particular URLRequestContext. It keep a circular queue of the LoadLogs
-// for recently deceased requests.
-class URLRequestTracker {
- public:
- struct RecentRequestInfo {
- GURL original_url;
- scoped_refptr<net::LoadLog> load_log;
- };
-
- // Helper class to make URLRequest insertable into a base::LinkedList,
- // without making the public interface expose base::LinkNode.
- class Node : public base::LinkNode<Node> {
- public:
- Node(URLRequest* url_request) : url_request_(url_request) {}
- ~Node() {}
-
- URLRequest* url_request() const { return url_request_; }
-
- private:
- URLRequest* url_request_;
- };
-
- typedef std::vector<RecentRequestInfo> RecentRequestInfoList;
-
- // The maximum number of entries for |graveyard_|.
- static const size_t kMaxGraveyardSize;
-
- // The maximum size of URLs to stuff into RecentRequestInfo.
- static const size_t kMaxGraveyardURLSize;
-
- URLRequestTracker();
- ~URLRequestTracker();
-
- // Returns a list of URLRequests that are alive.
- std::vector<URLRequest*> GetLiveRequests();
-
- // Clears the circular buffer of RecentRequestInfos.
- void ClearRecentlyDeceased();
-
- // Returns a list of recently completed URLRequests.
- const RecentRequestInfoList GetRecentlyDeceased();
-
- void Add(URLRequest* url_request);
- void Remove(URLRequest* url_request);
-
- private:
- // Copy the goodies out of |url_request| that we want to show the
- // user later on the about:net-internal page.
- static const RecentRequestInfo ExtractInfo(URLRequest* url_request);
-
- void InsertIntoGraveyard(const RecentRequestInfo& info);
-
- base::LinkedList<Node> live_instances_;
-
- size_t next_graveyard_index_;
- RecentRequestInfoList graveyard_;
-};
-
-#endif // NET_URL_REQUEST_URL_REQUEST_TRACKER_H_
diff --git a/net/url_request/url_request_view_net_internals_job.cc b/net/url_request/url_request_view_net_internals_job.cc
index ad97b63..b98e9ef 100644
--- a/net/url_request/url_request_view_net_internals_job.cc
+++ b/net/url_request/url_request_view_net_internals_job.cc
@@ -20,6 +20,7 @@
#include "net/base/net_errors.h"
#include "net/base/net_util.h"
#include "net/proxy/proxy_service.h"
+#include "net/socket_stream/socket_stream.h"
#include "net/url_request/url_request.h"
#include "net/url_request/url_request_context.h"
#include "net/url_request/view_cache_helper.h"
@@ -330,7 +331,7 @@ class URLRequestLiveSubSection : public SubSection {
virtual void OutputBody(URLRequestContext* context, std::string* out) {
std::vector<URLRequest*> requests =
- context->request_tracker()->GetLiveRequests();
+ context->url_request_tracker()->GetLiveRequests();
out->append("<ol>");
for (size_t i = 0; i < requests.size(); ++i) {
@@ -351,8 +352,8 @@ class URLRequestRecentSubSection : public SubSection {
}
virtual void OutputBody(URLRequestContext* context, std::string* out) {
- URLRequestTracker::RecentRequestInfoList recent =
- context->request_tracker()->GetRecentlyDeceased();
+ RequestTracker<URLRequest>::RecentRequestInfoList recent =
+ context->url_request_tracker()->GetRecentlyDeceased();
out->append("<ol>");
for (size_t i = 0; i < recent.size(); ++i) {
@@ -400,6 +401,58 @@ class HttpCacheSection : public SubSection {
}
};
+class SocketStreamLiveSubSection : public SubSection {
+ public:
+ SocketStreamLiveSubSection(SubSection* parent)
+ : SubSection(parent, "live", "Live SocketStreams") {
+ }
+
+ virtual void OutputBody(URLRequestContext* context, std::string* out) {
+ std::vector<net::SocketStream*> sockets =
+ context->socket_stream_tracker()->GetLiveRequests();
+
+ out->append("<ol>");
+ for (size_t i = 0; i < sockets.size(); ++i) {
+ // Reverse the list order, so we dispay from most recent to oldest.
+ size_t index = sockets.size() - i - 1;
+ OutputURLAndLoadLog(sockets[index]->url(),
+ sockets[index]->load_log(),
+ out);
+ }
+ out->append("</ol>");
+ }
+};
+
+class SocketStreamRecentSubSection : public SubSection {
+ public:
+ SocketStreamRecentSubSection(SubSection* parent)
+ : SubSection(parent, "recent", "Recently completed SocketStreams") {
+ }
+
+ virtual void OutputBody(URLRequestContext* context, std::string* out) {
+ RequestTracker<net::SocketStream>::RecentRequestInfoList recent =
+ context->socket_stream_tracker()->GetRecentlyDeceased();
+
+ out->append("<ol>");
+ for (size_t i = 0; i < recent.size(); ++i) {
+ // Reverse the list order, so we dispay from most recent to oldest.
+ size_t index = recent.size() - i - 1;
+ OutputURLAndLoadLog(recent[index].original_url,
+ recent[index].load_log, out);
+ }
+ out->append("</ol>");
+ }
+};
+
+class SocketStreamSubSection : public SubSection {
+ public:
+ SocketStreamSubSection(SubSection* parent)
+ : SubSection(parent, "socketstream", "SocketStream") {
+ AddSubSection(new SocketStreamLiveSubSection(this));
+ AddSubSection(new SocketStreamRecentSubSection(this));
+ }
+};
+
class AllSubSections : public SubSection {
public:
AllSubSections() : SubSection(NULL, "", "") {
@@ -407,6 +460,7 @@ class AllSubSections : public SubSection {
AddSubSection(new HostResolverSubSection(this));
AddSubSection(new URLRequestSubSection(this));
AddSubSection(new HttpCacheSection(this));
+ AddSubSection(new SocketStreamSubSection(this));
}
};
@@ -480,4 +534,3 @@ bool URLRequestViewNetInternalsJob::GetData(std::string* mime_type,
return true;
}
-