summaryrefslogtreecommitdiffstats
path: root/net/url_request
diff options
context:
space:
mode:
authoreroman@chromium.org <eroman@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-03-16 07:03:53 +0000
committereroman@chromium.org <eroman@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-03-16 07:03:53 +0000
commit9e743cddfd631038fe6f1cdde050e18d61319ec6 (patch)
tree7ef974e43b23f570433fe819bcd07966165c517f /net/url_request
parent2e7aff66fe443c29b2fc14a776dca5512b0b4729 (diff)
downloadchromium_src-9e743cddfd631038fe6f1cdde050e18d61319ec6.zip
chromium_src-9e743cddfd631038fe6f1cdde050e18d61319ec6.tar.gz
chromium_src-9e743cddfd631038fe6f1cdde050e18d61319ec6.tar.bz2
Generalize the net module's LoadLog facility from a passive container, to an event stream (NetLog).
This makes it possible to associate a single NetLog with a URLRequestContext, and then attach observers to that log to watch the stream of events. This changelist attempts to do the most direct translation, so there will be subsequent iterations to clean up. The user-visible behavior should remain unchanged. BUG=37421 Review URL: http://codereview.chromium.org/848006 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@41689 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net/url_request')
-rw-r--r--net/url_request/request_tracker.h192
-rw-r--r--net/url_request/request_tracker_unittest.cc266
-rw-r--r--net/url_request/url_request.cc39
-rw-r--r--net/url_request/url_request.h14
-rw-r--r--net/url_request/url_request_context.h26
-rw-r--r--net/url_request/url_request_http_job.cc2
-rw-r--r--net/url_request/url_request_new_ftp_job.cc2
-rw-r--r--net/url_request/url_request_unittest.cc15
8 files changed, 31 insertions, 525 deletions
diff --git a/net/url_request/request_tracker.h b/net/url_request/request_tracker.h
deleted file mode 100644
index 3202e60..0000000
--- a/net/url_request/request_tracker.h
+++ /dev/null
@@ -1,192 +0,0 @@
-// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef NET_URL_REQUEST_REQUEST_TRACKER_H_
-#define NET_URL_REQUEST_REQUEST_TRACKER_H_
-
-#include <vector>
-
-#include "base/ref_counted.h"
-#include "base/linked_list.h"
-#include "base/logging.h"
-#include "googleurl/src/gurl.h"
-#include "net/base/load_log.h"
-
-// Class to track all of the live instances of Request associated with a
-// particular URLRequestContext. It keeps a circular queue of the LoadLogs
-// for recently deceased requests.
-template<typename Request>
-class RequestTracker {
- public:
- struct RecentRequestInfo {
- GURL original_url;
- scoped_refptr<net::LoadLog> load_log;
- };
-
- // Helper class to make Request insertable into a base::LinkedList,
- // without making the public interface expose base::LinkNode.
- class Node : public base::LinkNode<Node> {
- public:
- Node(Request* request) : request_(request) {}
- ~Node() {}
-
- Request* request() const { return request_; }
-
- private:
- Request* request_;
- };
-
- typedef std::vector<RecentRequestInfo> RecentRequestInfoList;
- typedef bool (*RecentRequestsFilterFunc)(const GURL&);
-
- // The maximum number of entries for |graveyard_|, when in bounded mode.
- static const size_t kMaxGraveyardSize;
-
- // The maximum size of URLs to stuff into RecentRequestInfo, when in bounded
- // mode.
- static const size_t kMaxGraveyardURLSize;
-
- // The maximum number of entries to use for LoadLogs when in bounded mode.
- static const size_t kBoundedLoadLogMaxEntries;
-
- RequestTracker()
- : next_graveyard_index_(0),
- graveyard_filter_func_(NULL),
- is_unbounded_(false) {
- }
-
- ~RequestTracker() {}
-
- // Returns a list of Requests that are alive.
- std::vector<Request*> GetLiveRequests() {
- std::vector<Request*> list;
- for (base::LinkNode<Node>* node = live_instances_.head();
- node != live_instances_.end();
- node = node->next()) {
- Request* request = node->value()->request();
- list.push_back(request);
- }
- return list;
- }
-
- // Clears the circular buffer of RecentRequestInfos.
- void ClearRecentlyDeceased() {
- next_graveyard_index_ = 0;
- graveyard_.clear();
- }
-
- // Returns a list of recently completed Requests.
- const RecentRequestInfoList GetRecentlyDeceased() {
- RecentRequestInfoList list;
-
- // Copy the items from |graveyard_| (our circular queue of recently
- // deceased request infos) into a vector, ordered from oldest to newest.
- for (size_t i = 0; i < graveyard_.size(); ++i) {
- size_t index = (next_graveyard_index_ + i) % graveyard_.size();
- list.push_back(graveyard_[index]);
- }
- return list;
- }
-
- void Add(Request* request) {
- live_instances_.Append(&request->request_tracker_node_);
- }
-
- void Remove(Request* request) {
- // Remove from |live_instances_|.
- request->request_tracker_node_.RemoveFromList();
-
- RecentRequestInfo info;
- request->GetInfoForTracker(&info);
-
- if (!is_unbounded_) {
- // Paranoia check: truncate |info.original_url| if it is really big.
- const std::string& spec = info.original_url.possibly_invalid_spec();
- if (spec.size() > kMaxGraveyardURLSize)
- info.original_url = GURL(spec.substr(0, kMaxGraveyardURLSize));
- }
-
- if (ShouldInsertIntoGraveyard(info)) {
- // Add into |graveyard_|.
- InsertIntoGraveyard(info);
- }
- }
-
- // This function lets you exclude requests from being saved to the graveyard.
- // The graveyard is a circular buffer of the most recently completed
- // requests. Pass NULL turn off filtering. Otherwise pass in a function
- // returns false to exclude requests, true otherwise.
- void SetGraveyardFilter(RecentRequestsFilterFunc filter_func) {
- graveyard_filter_func_ = filter_func;
- }
-
- bool IsUnbounded() const {
- return is_unbounded_;
- }
-
- void SetUnbounded(bool unbounded) {
- // No change.
- if (is_unbounded_ == unbounded)
- return;
-
- // If we are going from unbounded to bounded, we need to trim the
- // graveyard. For simplicity we will simply clear it.
- if (is_unbounded_ && !unbounded)
- ClearRecentlyDeceased();
-
- is_unbounded_ = unbounded;
- }
-
- // Creates a LoadLog using the unbounded/bounded constraints that
- // apply to this tracker.
- net::LoadLog* CreateLoadLog() {
- if (IsUnbounded())
- return new net::LoadLog(net::LoadLog::kUnbounded);
- return new net::LoadLog(kBoundedLoadLogMaxEntries);
- }
-
- private:
- bool ShouldInsertIntoGraveyard(const RecentRequestInfo& info) {
- if (!graveyard_filter_func_)
- return true;
- return graveyard_filter_func_(info.original_url);
- }
-
- void InsertIntoGraveyard(const RecentRequestInfo& info) {
- if (is_unbounded_) {
- graveyard_.push_back(info);
- return;
- }
-
- // Otherwise enforce a bound on the graveyard size, by treating it as a
- // circular buffer.
- if (graveyard_.size() < kMaxGraveyardSize) {
- // Still growing to maximum capacity.
- DCHECK_EQ(next_graveyard_index_, graveyard_.size());
- graveyard_.push_back(info);
- } else {
- // At maximum capacity, overwite the oldest entry.
- graveyard_[next_graveyard_index_] = info;
- }
- next_graveyard_index_ = (next_graveyard_index_ + 1) % kMaxGraveyardSize;
- }
-
- base::LinkedList<Node> live_instances_;
-
- size_t next_graveyard_index_;
- RecentRequestInfoList graveyard_;
- RecentRequestsFilterFunc graveyard_filter_func_;
- bool is_unbounded_;
-};
-
-template<typename Request>
-const size_t RequestTracker<Request>::kMaxGraveyardSize = 25;
-
-template<typename Request>
-const size_t RequestTracker<Request>::kMaxGraveyardURLSize = 1000;
-
-template<typename Request>
-const size_t RequestTracker<Request>::kBoundedLoadLogMaxEntries = 50;
-
-#endif // NET_URL_REQUEST_REQUEST_TRACKER_H_
diff --git a/net/url_request/request_tracker_unittest.cc b/net/url_request/request_tracker_unittest.cc
deleted file mode 100644
index 633c923..0000000
--- a/net/url_request/request_tracker_unittest.cc
+++ /dev/null
@@ -1,266 +0,0 @@
-// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "net/url_request/request_tracker.h"
-
-#include "base/compiler_specific.h"
-#include "base/format_macros.h"
-#include "base/string_util.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace {
-
-static const int kMaxNumLoadLogEntries = 1;
-
-class TestRequest {
- public:
- explicit TestRequest(const GURL& url)
- : url_(url),
- load_log_(new net::LoadLog(kMaxNumLoadLogEntries)),
- ALLOW_THIS_IN_INITIALIZER_LIST(request_tracker_node_(this)) {}
- ~TestRequest() {}
-
- // This method is used in RequestTrackerTest::Basic test.
- const GURL& original_url() const { return url_; }
-
- private:
- // RequestTracker<T> will access GetRecentRequestInfo() and
- // |request_tracker_node_|.
- friend class RequestTracker<TestRequest>;
-
- void GetInfoForTracker(
- RequestTracker<TestRequest>::RecentRequestInfo* info) const {
- info->original_url = url_;
- info->load_log = load_log_;
- }
-
- const GURL url_;
- scoped_refptr<net::LoadLog> load_log_;
-
- RequestTracker<TestRequest>::Node request_tracker_node_;
-
- DISALLOW_COPY_AND_ASSIGN(TestRequest);
-};
-
-
-TEST(RequestTrackerTest, BasicBounded) {
- RequestTracker<TestRequest> tracker;
- EXPECT_FALSE(tracker.IsUnbounded());
- EXPECT_EQ(0u, tracker.GetLiveRequests().size());
- EXPECT_EQ(0u, tracker.GetRecentlyDeceased().size());
-
- TestRequest req1(GURL("http://req1"));
- TestRequest req2(GURL("http://req2"));
- TestRequest req3(GURL("http://req3"));
- TestRequest req4(GURL("http://req4"));
- TestRequest req5(GURL("http://req5"));
-
- tracker.Add(&req1);
- tracker.Add(&req2);
- tracker.Add(&req3);
- tracker.Add(&req4);
- tracker.Add(&req5);
-
- std::vector<TestRequest*> live_reqs = tracker.GetLiveRequests();
-
- ASSERT_EQ(5u, live_reqs.size());
- EXPECT_EQ(GURL("http://req1"), live_reqs[0]->original_url());
- EXPECT_EQ(GURL("http://req2"), live_reqs[1]->original_url());
- EXPECT_EQ(GURL("http://req3"), live_reqs[2]->original_url());
- EXPECT_EQ(GURL("http://req4"), live_reqs[3]->original_url());
- EXPECT_EQ(GURL("http://req5"), live_reqs[4]->original_url());
-
- tracker.Remove(&req1);
- tracker.Remove(&req5);
- tracker.Remove(&req3);
-
- ASSERT_EQ(3u, tracker.GetRecentlyDeceased().size());
-
- live_reqs = tracker.GetLiveRequests();
-
- ASSERT_EQ(2u, live_reqs.size());
- EXPECT_EQ(GURL("http://req2"), live_reqs[0]->original_url());
- EXPECT_EQ(GURL("http://req4"), live_reqs[1]->original_url());
-}
-
-TEST(RequestTrackerTest, GraveyardBounded) {
- RequestTracker<TestRequest> tracker;
- EXPECT_FALSE(tracker.IsUnbounded());
- EXPECT_EQ(0u, tracker.GetLiveRequests().size());
- EXPECT_EQ(0u, tracker.GetRecentlyDeceased().size());
-
- // Add twice as many requests as will fit in the graveyard.
- for (size_t i = 0;
- i < RequestTracker<TestRequest>::kMaxGraveyardSize * 2;
- ++i) {
- TestRequest req(GURL(StringPrintf("http://req%" PRIuS, i).c_str()));
- tracker.Add(&req);
- tracker.Remove(&req);
- }
-
- // Check that only the last |kMaxGraveyardSize| requests are in-memory.
-
- RequestTracker<TestRequest>::RecentRequestInfoList recent_reqs =
- tracker.GetRecentlyDeceased();
-
- ASSERT_EQ(RequestTracker<TestRequest>::kMaxGraveyardSize, recent_reqs.size());
-
- for (size_t i = 0; i < RequestTracker<TestRequest>::kMaxGraveyardSize; ++i) {
- size_t req_number = i + RequestTracker<TestRequest>::kMaxGraveyardSize;
- GURL url(StringPrintf("http://req%" PRIuS, req_number).c_str());
- EXPECT_EQ(url, recent_reqs[i].original_url);
- }
-}
-
-TEST(RequestTrackerTest, GraveyardUnbounded) {
- RequestTracker<TestRequest> tracker;
- EXPECT_FALSE(tracker.IsUnbounded());
- EXPECT_EQ(0u, tracker.GetLiveRequests().size());
- EXPECT_EQ(0u, tracker.GetRecentlyDeceased().size());
-
- tracker.SetUnbounded(true);
-
- EXPECT_TRUE(tracker.IsUnbounded());
-
- // Add twice as many requests as would fit in the bounded graveyard.
-
- size_t kMaxSize = RequestTracker<TestRequest>::kMaxGraveyardSize * 2;
- for (size_t i = 0; i < kMaxSize; ++i) {
- TestRequest req(GURL(StringPrintf("http://req%" PRIuS, i).c_str()));
- tracker.Add(&req);
- tracker.Remove(&req);
- }
-
- // Check that all of them got saved.
-
- RequestTracker<TestRequest>::RecentRequestInfoList recent_reqs =
- tracker.GetRecentlyDeceased();
-
- ASSERT_EQ(kMaxSize, recent_reqs.size());
-
- for (size_t i = 0; i < kMaxSize; ++i) {
- GURL url(StringPrintf("http://req%" PRIuS, i).c_str());
- EXPECT_EQ(url, recent_reqs[i].original_url);
- }
-}
-
-// Check that very long URLs are truncated.
-TEST(RequestTrackerTest, GraveyardURLBounded) {
- RequestTracker<TestRequest> tracker;
- EXPECT_FALSE(tracker.IsUnbounded());
-
- std::string big_url_spec("http://");
- big_url_spec.resize(2 * RequestTracker<TestRequest>::kMaxGraveyardURLSize,
- 'x');
- GURL big_url(big_url_spec);
- TestRequest req(big_url);
-
- tracker.Add(&req);
- tracker.Remove(&req);
-
- ASSERT_EQ(1u, tracker.GetRecentlyDeceased().size());
- // The +1 is because GURL canonicalizes with a trailing '/' ... maybe
- // we should just save the std::string rather than the GURL.
- EXPECT_EQ(RequestTracker<TestRequest>::kMaxGraveyardURLSize + 1,
- tracker.GetRecentlyDeceased()[0].original_url.spec().size());
-}
-
-// Test the doesn't fail if the URL was invalid. http://crbug.com/21423.
-TEST(URLRequestTrackerTest, TrackingInvalidURL) {
- RequestTracker<TestRequest> tracker;
- EXPECT_FALSE(tracker.IsUnbounded());
-
- EXPECT_EQ(0u, tracker.GetLiveRequests().size());
- EXPECT_EQ(0u, tracker.GetRecentlyDeceased().size());
-
- GURL invalid_url("xabc");
- EXPECT_FALSE(invalid_url.is_valid());
- TestRequest req(invalid_url);
-
- tracker.Add(&req);
- tracker.Remove(&req);
-
- // Check that the invalid URL made it into graveyard.
- ASSERT_EQ(1u, tracker.GetRecentlyDeceased().size());
- EXPECT_FALSE(tracker.GetRecentlyDeceased()[0].original_url.is_valid());
-}
-
-bool ShouldRequestBeAddedToGraveyard(const GURL& url) {
- return !url.SchemeIs("chrome") && !url.SchemeIs("data");
-}
-
-// Check that we can exclude "chrome://" URLs and "data:" URLs from being
-// saved into the recent requests list (graveyard), by using a filter.
-TEST(RequestTrackerTest, GraveyardCanBeFiltered) {
- RequestTracker<TestRequest> tracker;
- EXPECT_FALSE(tracker.IsUnbounded());
-
- tracker.SetGraveyardFilter(ShouldRequestBeAddedToGraveyard);
-
- // This will be excluded.
- GURL url1("chrome://dontcare/");
- TestRequest req1(url1);
- tracker.Add(&req1);
- tracker.Remove(&req1);
-
- // This will be be added to graveyard.
- GURL url2("chrome2://dontcare/");
- TestRequest req2(url2);
- tracker.Add(&req2);
- tracker.Remove(&req2);
-
- // This will be be added to graveyard.
- GURL url3("http://foo/");
- TestRequest req3(url3);
- tracker.Add(&req3);
- tracker.Remove(&req3);
-
- // This will be be excluded.
- GURL url4("data:sup");
- TestRequest req4(url4);
- tracker.Add(&req4);
- tracker.Remove(&req4);
-
- ASSERT_EQ(2u, tracker.GetRecentlyDeceased().size());
- EXPECT_EQ(url2, tracker.GetRecentlyDeceased()[0].original_url);
- EXPECT_EQ(url3, tracker.GetRecentlyDeceased()[1].original_url);
-}
-
-// Convert an unbounded tracker back to being bounded.
-TEST(RequestTrackerTest, ConvertUnboundedToBounded) {
- RequestTracker<TestRequest> tracker;
- EXPECT_FALSE(tracker.IsUnbounded());
- EXPECT_EQ(0u, tracker.GetLiveRequests().size());
- EXPECT_EQ(0u, tracker.GetRecentlyDeceased().size());
-
- tracker.SetUnbounded(true);
- EXPECT_TRUE(tracker.IsUnbounded());
-
- // Add twice as many requests as would fit in the bounded graveyard.
-
- size_t kMaxSize = RequestTracker<TestRequest>::kMaxGraveyardSize * 2;
- for (size_t i = 0; i < kMaxSize; ++i) {
- TestRequest req(GURL(StringPrintf("http://req%" PRIuS, i).c_str()));
- tracker.Add(&req);
- tracker.Remove(&req);
- }
-
- // Check that all of them got saved.
- ASSERT_EQ(kMaxSize, tracker.GetRecentlyDeceased().size());
-
- // Now make the tracker bounded, and add more entries to its graveyard.
- tracker.SetUnbounded(false);
-
- kMaxSize = RequestTracker<TestRequest>::kMaxGraveyardSize;
- for (size_t i = 0; i < kMaxSize; ++i) {
- TestRequest req(GURL(StringPrintf("http://req%" PRIuS, i).c_str()));
- tracker.Add(&req);
- tracker.Remove(&req);
- }
-
- // We should only have kMaxGraveyardSize entries now.
- ASSERT_EQ(kMaxSize, tracker.GetRecentlyDeceased().size());
-}
-
-} // namespace
diff --git a/net/url_request/url_request.cc b/net/url_request/url_request.cc
index 5c4566e..a44792d 100644
--- a/net/url_request/url_request.cc
+++ b/net/url_request/url_request.cc
@@ -9,8 +9,8 @@
#include "base/singleton.h"
#include "base/stats_counters.h"
#include "net/base/load_flags.h"
-#include "net/base/load_log.h"
#include "net/base/net_errors.h"
+#include "net/base/net_log.h"
#include "net/base/ssl_cert_request_info.h"
#include "net/base/upload_data.h"
#include "net/http/http_response_headers.h"
@@ -44,8 +44,7 @@ URLRequest::URLRequest(const GURL& url, Delegate* delegate)
enable_profiling_(false),
redirect_limit_(kMaxRedirects),
final_upload_progress_(0),
- priority_(net::LOWEST),
- ALLOW_THIS_IN_INITIALIZER_LIST(request_tracker_node_(this)) {
+ priority_(net::LOWEST) {
SIMPLE_STATS_COUNTER("URLRequestCount");
// Sanity check out environment.
@@ -256,7 +255,7 @@ void URLRequest::StartJob(URLRequestJob* job) {
DCHECK(!is_pending_);
DCHECK(!job_);
- net::LoadLog::BeginEvent(load_log_, net::LoadLog::TYPE_URL_REQUEST_START);
+ net_log_.BeginEvent(net::NetLog::TYPE_URL_REQUEST_START);
job_ = job;
job_->SetExtraRequestHeaders(extra_request_headers_);
@@ -363,9 +362,9 @@ void URLRequest::ReceivedRedirect(const GURL& location, bool* defer_redirect) {
void URLRequest::ResponseStarted() {
if (!status_.is_success())
- net::LoadLog::AddErrorCode(load_log_, status_.os_error());
+ net_log_.AddErrorCode(status_.os_error());
- net::LoadLog::EndEvent(load_log_, net::LoadLog::TYPE_URL_REQUEST_START);
+ net_log_.EndEvent(net::NetLog::TYPE_URL_REQUEST_START);
URLRequestJob* job = GetJobManager()->MaybeInterceptResponse(this);
if (job) {
@@ -438,8 +437,8 @@ std::string URLRequest::StripPostSpecificHeaders(const std::string& headers) {
}
int URLRequest::Redirect(const GURL& location, int http_status_code) {
- if (net::LoadLog::IsUnbounded(load_log_)) {
- net::LoadLog::AddString(load_log_, StringPrintf("Redirected (%d) to %s",
+ if (net_log_.HasListener()) {
+ net_log_.AddString(StringPrintf("Redirected (%d) to %s",
http_status_code, location.spec().c_str()));
}
if (redirect_limit_ <= 0) {
@@ -504,18 +503,17 @@ void URLRequest::set_context(URLRequestContext* context) {
context_ = context;
- // If the context this request belongs to has changed, update the tracker(s).
+ // If the context this request belongs to has changed, update the tracker.
if (prev_context != context) {
- if (prev_context)
- prev_context->url_request_tracker()->Remove(this);
+ net_log_.EndEvent(net::NetLog::TYPE_REQUEST_ALIVE);
+ net_log_ = net::BoundNetLog();
+
if (context) {
- if (!load_log_) {
- // Create the LoadLog -- we waited until now to create it so we know
- // what constraints the URLRequestContext is enforcing on log levels.
- load_log_ = context->url_request_tracker()->CreateLoadLog();
- }
+ net_log_ = net::BoundNetLog::Make(context->net_log(),
+ net::NetLog::SOURCE_URL_REQUEST);
- context->url_request_tracker()->Add(this);
+ net_log_.BeginEventWithString(net::NetLog::TYPE_REQUEST_ALIVE,
+ original_url_.possibly_invalid_spec());
}
}
}
@@ -538,10 +536,3 @@ URLRequest::UserData* URLRequest::GetUserData(const void* key) const {
void URLRequest::SetUserData(const void* key, UserData* data) {
user_data_[key] = linked_ptr<UserData>(data);
}
-
-void URLRequest::GetInfoForTracker(
- RequestTracker<URLRequest>::RecentRequestInfo* info) const {
- DCHECK(info);
- info->original_url = original_url_;
- info->load_log = load_log_;
-}
diff --git a/net/url_request/url_request.h b/net/url_request/url_request.h
index c8bc2bf..81d2436 100644
--- a/net/url_request/url_request.h
+++ b/net/url_request/url_request.h
@@ -16,11 +16,10 @@
#include "base/ref_counted.h"
#include "base/scoped_ptr.h"
#include "googleurl/src/gurl.h"
-#include "net/base/load_log.h"
#include "net/base/load_states.h"
+#include "net/base/net_log.h"
#include "net/base/request_priority.h"
#include "net/http/http_response_info.h"
-#include "net/url_request/request_tracker.h"
#include "net/url_request/url_request_status.h"
namespace base {
@@ -510,7 +509,7 @@ class URLRequest {
URLRequestContext* context();
void set_context(URLRequestContext* context);
- net::LoadLog* load_log() { return load_log_; }
+ const net::BoundNetLog& net_log() const { return net_log_; }
// Returns the expected content size if available
int64 GetExpectedContentSize() const;
@@ -551,7 +550,6 @@ class URLRequest {
private:
friend class URLRequestJob;
- friend class RequestTracker<URLRequest>;
void StartJob(URLRequestJob* job);
@@ -573,18 +571,13 @@ class URLRequest {
// Origin).
static std::string StripPostSpecificHeaders(const std::string& headers);
- // Gets the goodies out of this that we want to show the user later on the
- // chrome://net-internals/ page.
- void GetInfoForTracker(
- RequestTracker<URLRequest>::RecentRequestInfo* info) const;
-
// Contextual information used for this request (can be NULL). This contains
// most of the dependencies which are shared between requests (disk cache,
// cookie store, socket poool, etc.)
scoped_refptr<URLRequestContext> context_;
// Tracks the time spent in various load states throughout this request.
- scoped_refptr<net::LoadLog> load_log_;
+ net::BoundNetLog net_log_;
scoped_refptr<URLRequestJob> job_;
scoped_refptr<net::UploadData> upload_;
@@ -631,7 +624,6 @@ class URLRequest {
// this to determine which URLRequest to allocate sockets to first.
net::RequestPriority priority_;
- RequestTracker<URLRequest>::Node request_tracker_node_;
base::LeakTracker<URLRequest> leak_tracker_;
DISALLOW_COPY_AND_ASSIGN(URLRequest);
diff --git a/net/url_request/url_request_context.h b/net/url_request/url_request_context.h
index fb2608f..070942f 100644
--- a/net/url_request/url_request_context.h
+++ b/net/url_request/url_request_context.h
@@ -14,11 +14,11 @@
#include "base/string_util.h"
#include "net/base/cookie_store.h"
#include "net/base/host_resolver.h"
+#include "net/base/net_log.h"
#include "net/base/ssl_config_service.h"
#include "net/base/transport_security_state.h"
#include "net/ftp/ftp_auth_cache.h"
#include "net/proxy/proxy_service.h"
-#include "net/url_request/request_tracker.h"
namespace net {
class CookiePolicy;
@@ -34,12 +34,17 @@ class URLRequestContext :
public base::RefCountedThreadSafe<URLRequestContext> {
public:
URLRequestContext()
- : http_transaction_factory_(NULL),
+ : net_log_(NULL),
+ http_transaction_factory_(NULL),
ftp_transaction_factory_(NULL),
cookie_policy_(NULL),
transport_security_state_(NULL) {
}
+ net::NetLog* net_log() const {
+ return net_log_;
+ }
+
net::HostResolver* host_resolver() const {
return host_resolver_;
}
@@ -90,16 +95,6 @@ class URLRequestContext :
// Gets the value of 'Accept-Language' header field.
const std::string& accept_language() const { return accept_language_; }
- // Gets the tracker for URLRequests associated with this context.
- RequestTracker<URLRequest>* url_request_tracker() {
- return &url_request_tracker_;
- }
-
- // Gets the tracker for SocketStreams associated with this context.
- RequestTracker<net::SocketStream>* socket_stream_tracker() {
- return &socket_stream_tracker_;
- }
-
// Gets the UA string to use for the given URL. Pass an invalid URL (such as
// GURL()) to get the default UA string. Subclasses should override this
// method to provide a UA string.
@@ -135,6 +130,7 @@ class URLRequestContext :
// The following members are expected to be initialized and owned by
// subclasses.
+ net::NetLog* net_log_;
scoped_refptr<net::HostResolver> host_resolver_;
scoped_refptr<net::ProxyService> proxy_service_;
scoped_refptr<net::SSLConfigService> ssl_config_service_;
@@ -152,12 +148,6 @@ class URLRequestContext :
// filename for file download.
std::string referrer_charset_;
- // Tracks the requests associated with this context.
- RequestTracker<URLRequest> url_request_tracker_;
-
- // Trakcs the socket streams associated with this context.
- RequestTracker<net::SocketStream> socket_stream_tracker_;
-
private:
DISALLOW_COPY_AND_ASSIGN(URLRequestContext);
};
diff --git a/net/url_request/url_request_http_job.cc b/net/url_request/url_request_http_job.cc
index 8f2412e..783ee42 100644
--- a/net/url_request/url_request_http_job.cc
+++ b/net/url_request/url_request_http_job.cc
@@ -617,7 +617,7 @@ void URLRequestHttpJob::StartTransaction() {
&transaction_);
if (rv == net::OK) {
rv = transaction_->Start(
- &request_info_, &start_callback_, request_->load_log());
+ &request_info_, &start_callback_, request_->net_log());
}
}
diff --git a/net/url_request/url_request_new_ftp_job.cc b/net/url_request/url_request_new_ftp_job.cc
index 171eed9..1af7eda 100644
--- a/net/url_request/url_request_new_ftp_job.cc
+++ b/net/url_request/url_request_new_ftp_job.cc
@@ -225,7 +225,7 @@ void URLRequestNewFtpJob::StartTransaction() {
int rv;
if (transaction_.get()) {
rv = transaction_->Start(
- &request_info_, &start_callback_, request_->load_log());
+ &request_info_, &start_callback_, request_->net_log());
if (rv == net::ERR_IO_PENDING)
return;
} else {
diff --git a/net/url_request/url_request_unittest.cc b/net/url_request/url_request_unittest.cc
index a1f0cf5e..dc39667 100644
--- a/net/url_request/url_request_unittest.cc
+++ b/net/url_request/url_request_unittest.cc
@@ -26,8 +26,8 @@
#include "net/base/cookie_monster.h"
#include "net/base/cookie_policy.h"
#include "net/base/load_flags.h"
-#include "net/base/load_log.h"
-#include "net/base/load_log_unittest.h"
+#include "net/base/net_log.h"
+#include "net/base/net_log_unittest.h"
#include "net/base/net_errors.h"
#include "net/base/net_module.h"
#include "net/base/net_util.h"
@@ -217,16 +217,7 @@ TEST_F(URLRequestTestHTTP, GetTest_NoCache) {
EXPECT_FALSE(d.received_data_before_response());
EXPECT_NE(0, d.bytes_received());
- // The first part of the log will be for URL_REQUEST_START.
- // After that, there should be an HTTP_TRANSACTION_READ_BODY
- EXPECT_TRUE(net::LogContainsBeginEvent(
- *r.load_log(), 0, net::LoadLog::TYPE_URL_REQUEST_START));
- EXPECT_TRUE(net::LogContainsEndEvent(
- *r.load_log(), -3, net::LoadLog::TYPE_URL_REQUEST_START));
- EXPECT_TRUE(net::LogContainsBeginEvent(
- *r.load_log(), -2, net::LoadLog::TYPE_HTTP_TRANSACTION_READ_BODY));
- EXPECT_TRUE(net::LogContainsEndEvent(
- *r.load_log(), -1, net::LoadLog::TYPE_HTTP_TRANSACTION_READ_BODY));
+ // TODO(eroman): Add back the NetLog tests...
}
}