diff options
author | eroman@chromium.org <eroman@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2009-10-22 20:06:52 +0000 |
---|---|---|
committer | eroman@chromium.org <eroman@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2009-10-22 20:06:52 +0000 |
commit | 7bffd08c27787a806bd89529e694ffa3f90883cb (patch) | |
tree | e144b438606c783dc81506534ec5805b00739f44 /net/url_request | |
parent | 8d1c406dc8ae48e357ea315df4e9c965eb8a811a (diff) | |
download | chromium_src-7bffd08c27787a806bd89529e694ffa3f90883cb.zip chromium_src-7bffd08c27787a806bd89529e694ffa3f90883cb.tar.gz chromium_src-7bffd08c27787a806bd89529e694ffa3f90883cb.tar.bz2 |
Change the request tracking done by about:net-internals to be per context rather than global accross all contexts.
Before there was a singleton "request tracker" (URLRequest::InstanceTracker) that kept track of all outstanding requests and recently completed.
Whereas now, each URLRequestContext gets its own "request tracker" (URLRequestTracker) to track the requests associated with that context.
This change is to limit the lifetime of information relating to incognito windows. Before you were able to see the recent requests issued by incognito windows even after the last incognito windows was closed (by loading about:net-internals in a non-incognito window).
Whereas now you can only see the incognito requests information by loading "about:net-internals" within an incognito tab. And once the last incognito tab is closed, the OTR context is destroyed, which in turn destroys any profiling information that was being stored.
BUG=24630
Review URL: http://codereview.chromium.org/295050
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@29804 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net/url_request')
-rw-r--r-- | net/url_request/url_request.cc | 118 | ||||
-rw-r--r-- | net/url_request/url_request.h | 73 | ||||
-rw-r--r-- | net/url_request/url_request_context.h | 7 | ||||
-rw-r--r-- | net/url_request/url_request_tracker.cc | 85 | ||||
-rw-r--r-- | net/url_request/url_request_tracker.h | 76 | ||||
-rw-r--r-- | net/url_request/url_request_tracker_unittest.cc | 116 | ||||
-rw-r--r-- | net/url_request/url_request_unittest.cc | 107 | ||||
-rw-r--r-- | net/url_request/url_request_view_net_internals_job.cc | 17 |
8 files changed, 306 insertions, 293 deletions
diff --git a/net/url_request/url_request.cc b/net/url_request/url_request.cc index 0919a7b..ad13b6f 100644 --- a/net/url_request/url_request.cc +++ b/net/url_request/url_request.cc @@ -8,7 +8,6 @@ #include "base/message_loop.h" #include "base/singleton.h" #include "base/stats_counters.h" -#include "base/string_util.h" #include "net/base/load_flags.h" #include "net/base/load_log.h" #include "net/base/net_errors.h" @@ -33,97 +32,6 @@ static URLRequestJobManager* GetJobManager() { } /////////////////////////////////////////////////////////////////////////////// -// URLRequest::InstanceTracker - -const size_t URLRequest::InstanceTracker::kMaxGraveyardSize = 25; -const size_t URLRequest::InstanceTracker::kMaxGraveyardURLSize = 1000; - -URLRequest::InstanceTracker::~InstanceTracker() { - base::LeakTracker<URLRequest>::CheckForLeaks(); - - // Only check in Debug mode, because this is triggered too often. - // See http://crbug.com/21199, http://crbug.com/18372 - DCHECK_EQ(0u, GetLiveRequests().size()); -} - -// static -URLRequest::InstanceTracker* URLRequest::InstanceTracker::Get() { - return Singleton<InstanceTracker>::get(); -} - -std::vector<URLRequest*> URLRequest::InstanceTracker::GetLiveRequests() { - std::vector<URLRequest*> list; - for (base::LinkNode<InstanceTrackerNode>* node = live_instances_.head(); - node != live_instances_.end(); - node = node->next()) { - URLRequest* url_request = node->value()->url_request(); - list.push_back(url_request); - } - return list; -} - -void URLRequest::InstanceTracker::ClearRecentlyDeceased() { - next_graveyard_index_ = 0; - graveyard_.clear(); -} - -const URLRequest::InstanceTracker::RecentRequestInfoList -URLRequest::InstanceTracker::GetRecentlyDeceased() { - RecentRequestInfoList list; - - // Copy the items from |graveyard_| (our circular queue of recently - // deceased request infos) into a vector, ordered from oldest to - // newest. - for (size_t i = 0; i < graveyard_.size(); ++i) { - size_t index = (next_graveyard_index_ + i) % graveyard_.size(); - list.push_back(graveyard_[index]); - } - return list; -} - -URLRequest::InstanceTracker::InstanceTracker() : next_graveyard_index_(0) {} - -void URLRequest::InstanceTracker::Add(InstanceTrackerNode* node) { - live_instances_.Append(node); -} - -void URLRequest::InstanceTracker::Remove(InstanceTrackerNode* node) { - // Remove from |live_instances_|. - node->RemoveFromList(); - - // Add into |graveyard_|. - InsertIntoGraveyard(ExtractInfo(node->url_request())); -} - -// static -const URLRequest::InstanceTracker::RecentRequestInfo -URLRequest::InstanceTracker::ExtractInfo(URLRequest* url_request) { - RecentRequestInfo info; - info.original_url = url_request->original_url(); - info.load_log = url_request->load_log(); - - // Paranoia check: truncate |info.original_url| if it is really big. - const std::string& spec = info.original_url.possibly_invalid_spec(); - if (spec.size() > kMaxGraveyardURLSize) - info.original_url = GURL(spec.substr(0, kMaxGraveyardURLSize)); - return info; -} - -void URLRequest::InstanceTracker::InsertIntoGraveyard( - const RecentRequestInfo& info) { - if (graveyard_.size() < kMaxGraveyardSize) { - // Still growing to maximum capacity. - DCHECK_EQ(next_graveyard_index_, graveyard_.size()); - graveyard_.push_back(info); - } else { - // At maximum capacity, overwrite the oldest entry. - graveyard_[next_graveyard_index_] = info; - } - - next_graveyard_index_ = (next_graveyard_index_ + 1) % kMaxGraveyardSize; -} - -/////////////////////////////////////////////////////////////////////////////// // URLRequest URLRequest::URLRequest(const GURL& url, Delegate* delegate) @@ -138,7 +46,7 @@ URLRequest::URLRequest(const GURL& url, Delegate* delegate) redirect_limit_(kMaxRedirects), final_upload_progress_(0), priority_(0), - ALLOW_THIS_IN_INITIALIZER_LIST(instance_tracker_node_(this)) { + ALLOW_THIS_IN_INITIALIZER_LIST(url_request_tracker_node_(this)) { SIMPLE_STATS_COUNTER("URLRequestCount"); // Sanity check out environment. @@ -153,6 +61,8 @@ URLRequest::~URLRequest() { if (job_) OrphanJob(); + + set_context(NULL); } // static @@ -343,18 +253,6 @@ void URLRequest::Start() { } /////////////////////////////////////////////////////////////////////////////// -// URLRequest::InstanceTrackerNode - -URLRequest::InstanceTrackerNode:: -InstanceTrackerNode(URLRequest* url_request) : url_request_(url_request) { - InstanceTracker::Get()->Add(this); -} - -URLRequest::InstanceTrackerNode::~InstanceTrackerNode() { - InstanceTracker::Get()->Remove(this); -} - -/////////////////////////////////////////////////////////////////////////////// void URLRequest::StartJob(URLRequestJob* job) { DCHECK(!is_pending_); @@ -587,7 +485,17 @@ URLRequestContext* URLRequest::context() { } void URLRequest::set_context(URLRequestContext* context) { + scoped_refptr<URLRequestContext> prev_context = context_; + context_ = context; + + // If the context this request belongs to has changed, update the tracker(s). + if (prev_context != context) { + if (prev_context) + prev_context->request_tracker()->Remove(this); + if (context) + context->request_tracker()->Add(this); + } } int64 URLRequest::GetExpectedContentSize() const { diff --git a/net/url_request/url_request.h b/net/url_request/url_request.h index d91d684..e6aaf47 100644 --- a/net/url_request/url_request.h +++ b/net/url_request/url_request.h @@ -20,6 +20,7 @@ #include "net/base/load_states.h" #include "net/http/http_response_info.h" #include "net/url_request/url_request_status.h" +#include "net/url_request/url_request_tracker.h" namespace base { class Time; @@ -203,8 +204,6 @@ class URLRequest { virtual void OnReadCompleted(URLRequest* request, int bytes_read) = 0; }; - class InstanceTracker; - // Initialize an URL request. URLRequest(const GURL& url, Delegate* delegate); @@ -529,19 +528,7 @@ class URLRequest { private: friend class URLRequestJob; - - // Helper class to make URLRequest insertable into a base::LinkedList, - // without making the public interface expose base::LinkNode. - class InstanceTrackerNode : public base::LinkNode<InstanceTrackerNode> { - public: - InstanceTrackerNode(URLRequest* url_request); - ~InstanceTrackerNode(); - - URLRequest* url_request() const { return url_request_; } - - private: - URLRequest* url_request_; - }; + friend class URLRequestTracker; void StartJob(URLRequestJob* job); @@ -616,64 +603,10 @@ class URLRequest { // this to determine which URLRequest to allocate sockets to first. int priority_; - InstanceTrackerNode instance_tracker_node_; + URLRequestTracker::Node url_request_tracker_node_; base::LeakTracker<URLRequest> leak_tracker_; DISALLOW_COPY_AND_ASSIGN(URLRequest); }; -// ---------------------------------------------------------------------- -// Singleton to track all of the live instances of URLRequest, and -// keep a circular queue of the LoadLogs for recently deceased requests. -// -class URLRequest::InstanceTracker { - public: - struct RecentRequestInfo { - GURL original_url; - scoped_refptr<net::LoadLog> load_log; - }; - - typedef std::vector<RecentRequestInfo> RecentRequestInfoList; - - // The maximum number of entries for |graveyard_|. - static const size_t kMaxGraveyardSize; - - // The maximum size of URLs to stuff into RecentRequestInfo. - static const size_t kMaxGraveyardURLSize; - - ~InstanceTracker(); - - // Returns the singleton instance of InstanceTracker. - static InstanceTracker* Get(); - - // Returns a list of URLRequests that are alive. - std::vector<URLRequest*> GetLiveRequests(); - - // Clears the circular buffer of RecentRequestInfos. - void ClearRecentlyDeceased(); - - // Returns a list of recently completed URLRequests. - const RecentRequestInfoList GetRecentlyDeceased(); - - private: - friend class URLRequest; - friend struct DefaultSingletonTraits<InstanceTracker>; - - InstanceTracker(); - - void Add(InstanceTrackerNode* node); - void Remove(InstanceTrackerNode* node); - - // Copy the goodies out of |url_request| that we want to show the - // user later on the about:net-internal page. - static const RecentRequestInfo ExtractInfo(URLRequest* url_request); - - void InsertIntoGraveyard(const RecentRequestInfo& info); - - base::LinkedList<InstanceTrackerNode> live_instances_; - - size_t next_graveyard_index_; - RecentRequestInfoList graveyard_; -}; - #endif // NET_URL_REQUEST_URL_REQUEST_H_ diff --git a/net/url_request/url_request_context.h b/net/url_request/url_request_context.h index a1cc345..67b378a 100644 --- a/net/url_request/url_request_context.h +++ b/net/url_request/url_request_context.h @@ -19,6 +19,7 @@ #include "net/base/strict_transport_security_state.h" #include "net/ftp/ftp_auth_cache.h" #include "net/proxy/proxy_service.h" +#include "net/url_request/url_request_tracker.h" namespace net { class FtpTransactionFactory; @@ -79,6 +80,9 @@ class URLRequestContext : // Gets the value of 'Accept-Language' header field. const std::string& accept_language() const { return accept_language_; } + // Gets the tracker for URLRequests associated with this context. + URLRequestTracker* request_tracker() { return &request_tracker_; } + // Gets the UA string to use for the given URL. Pass an invalid URL (such as // GURL()) to get the default UA string. Subclasses should override this // method to provide a UA string. @@ -130,6 +134,9 @@ class URLRequestContext : // filename for file download. std::string referrer_charset_; + // Tracks the requests associated with this context. + URLRequestTracker request_tracker_; + private: DISALLOW_COPY_AND_ASSIGN(URLRequestContext); }; diff --git a/net/url_request/url_request_tracker.cc b/net/url_request/url_request_tracker.cc new file mode 100644 index 0000000..14e5bc3 --- /dev/null +++ b/net/url_request/url_request_tracker.cc @@ -0,0 +1,85 @@ +// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "net/url_request/url_request_tracker.h" + +#include "base/logging.h" +#include "net/url_request/url_request.h" + +const size_t URLRequestTracker::kMaxGraveyardSize = 25; +const size_t URLRequestTracker::kMaxGraveyardURLSize = 1000; + +URLRequestTracker::URLRequestTracker() : next_graveyard_index_(0) {} + +URLRequestTracker::~URLRequestTracker() {} + +std::vector<URLRequest*> URLRequestTracker::GetLiveRequests() { + std::vector<URLRequest*> list; + for (base::LinkNode<Node>* node = live_instances_.head(); + node != live_instances_.end(); + node = node->next()) { + URLRequest* url_request = node->value()->url_request(); + list.push_back(url_request); + } + return list; +} + +void URLRequestTracker::ClearRecentlyDeceased() { + next_graveyard_index_ = 0; + graveyard_.clear(); +} + +const URLRequestTracker::RecentRequestInfoList +URLRequestTracker::GetRecentlyDeceased() { + RecentRequestInfoList list; + + // Copy the items from |graveyard_| (our circular queue of recently + // deceased request infos) into a vector, ordered from oldest to + // newest. + for (size_t i = 0; i < graveyard_.size(); ++i) { + size_t index = (next_graveyard_index_ + i) % graveyard_.size(); + list.push_back(graveyard_[index]); + } + return list; +} + +void URLRequestTracker::Add(URLRequest* url_request) { + live_instances_.Append(&url_request->url_request_tracker_node_); +} + +void URLRequestTracker::Remove(URLRequest* url_request) { + // Remove from |live_instances_|. + url_request->url_request_tracker_node_.RemoveFromList(); + + // Add into |graveyard_|. + InsertIntoGraveyard(ExtractInfo(url_request)); +} + +// static +const URLRequestTracker::RecentRequestInfo +URLRequestTracker::ExtractInfo(URLRequest* url_request) { + RecentRequestInfo info; + info.original_url = url_request->original_url(); + info.load_log = url_request->load_log(); + + // Paranoia check: truncate |info.original_url| if it is really big. + const std::string& spec = info.original_url.possibly_invalid_spec(); + if (spec.size() > kMaxGraveyardURLSize) + info.original_url = GURL(spec.substr(0, kMaxGraveyardURLSize)); + return info; +} + +void URLRequestTracker::InsertIntoGraveyard( + const RecentRequestInfo& info) { + if (graveyard_.size() < kMaxGraveyardSize) { + // Still growing to maximum capacity. + DCHECK_EQ(next_graveyard_index_, graveyard_.size()); + graveyard_.push_back(info); + } else { + // At maximum capacity, overwrite the oldest entry. + graveyard_[next_graveyard_index_] = info; + } + + next_graveyard_index_ = (next_graveyard_index_ + 1) % kMaxGraveyardSize; +} diff --git a/net/url_request/url_request_tracker.h b/net/url_request/url_request_tracker.h new file mode 100644 index 0000000..36d05d1 --- /dev/null +++ b/net/url_request/url_request_tracker.h @@ -0,0 +1,76 @@ +// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef NET_URL_REQUEST_URL_REQUEST_TRACKER_H_ +#define NET_URL_REQUEST_URL_REQUEST_TRACKER_H_ + +#include <vector> + +#include "base/ref_counted.h" +#include "base/linked_list.h" +#include "googleurl/src/gurl.h" +#include "net/base/load_log.h" + +class URLRequest; + +// Class to track all of the live instances of URLRequest associated with a +// particular URLRequestContext. It keep a circular queue of the LoadLogs +// for recently deceased requests. +class URLRequestTracker { + public: + struct RecentRequestInfo { + GURL original_url; + scoped_refptr<net::LoadLog> load_log; + }; + + // Helper class to make URLRequest insertable into a base::LinkedList, + // without making the public interface expose base::LinkNode. + class Node : public base::LinkNode<Node> { + public: + Node(URLRequest* url_request) : url_request_(url_request) {} + ~Node() {} + + URLRequest* url_request() const { return url_request_; } + + private: + URLRequest* url_request_; + }; + + typedef std::vector<RecentRequestInfo> RecentRequestInfoList; + + // The maximum number of entries for |graveyard_|. + static const size_t kMaxGraveyardSize; + + // The maximum size of URLs to stuff into RecentRequestInfo. + static const size_t kMaxGraveyardURLSize; + + URLRequestTracker(); + ~URLRequestTracker(); + + // Returns a list of URLRequests that are alive. + std::vector<URLRequest*> GetLiveRequests(); + + // Clears the circular buffer of RecentRequestInfos. + void ClearRecentlyDeceased(); + + // Returns a list of recently completed URLRequests. + const RecentRequestInfoList GetRecentlyDeceased(); + + void Add(URLRequest* url_request); + void Remove(URLRequest* url_request); + + private: + // Copy the goodies out of |url_request| that we want to show the + // user later on the about:net-internal page. + static const RecentRequestInfo ExtractInfo(URLRequest* url_request); + + void InsertIntoGraveyard(const RecentRequestInfo& info); + + base::LinkedList<Node> live_instances_; + + size_t next_graveyard_index_; + RecentRequestInfoList graveyard_; +}; + +#endif // NET_URL_REQUEST_URL_REQUEST_TRACKER_H_ diff --git a/net/url_request/url_request_tracker_unittest.cc b/net/url_request/url_request_tracker_unittest.cc new file mode 100644 index 0000000..53196ac --- /dev/null +++ b/net/url_request/url_request_tracker_unittest.cc @@ -0,0 +1,116 @@ +// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "net/url_request/url_request_tracker.h" + +#include "base/string_util.h" +#include "net/url_request/url_request.h" +#include "testing/gtest/include/gtest/gtest.h" + +namespace { + +TEST(URLRequestTrackerTest, Basic) { + URLRequestTracker tracker; + EXPECT_EQ(0u, tracker.GetLiveRequests().size()); + EXPECT_EQ(0u, tracker.GetRecentlyDeceased().size()); + + URLRequest req1(GURL("http://req1"), NULL); + URLRequest req2(GURL("http://req2"), NULL); + URLRequest req3(GURL("http://req3"), NULL); + URLRequest req4(GURL("http://req4"), NULL); + URLRequest req5(GURL("http://req5"), NULL); + + tracker.Add(&req1); + tracker.Add(&req2); + tracker.Add(&req3); + tracker.Add(&req4); + tracker.Add(&req5); + + std::vector<URLRequest*> live_reqs = tracker.GetLiveRequests(); + + ASSERT_EQ(5u, live_reqs.size()); + EXPECT_EQ(GURL("http://req1"), live_reqs[0]->original_url()); + EXPECT_EQ(GURL("http://req2"), live_reqs[1]->original_url()); + EXPECT_EQ(GURL("http://req3"), live_reqs[2]->original_url()); + EXPECT_EQ(GURL("http://req4"), live_reqs[3]->original_url()); + EXPECT_EQ(GURL("http://req5"), live_reqs[4]->original_url()); + + tracker.Remove(&req1); + tracker.Remove(&req5); + tracker.Remove(&req3); + + ASSERT_EQ(3u, tracker.GetRecentlyDeceased().size()); + + live_reqs = tracker.GetLiveRequests(); + + ASSERT_EQ(2u, live_reqs.size()); + EXPECT_EQ(GURL("http://req2"), live_reqs[0]->original_url()); + EXPECT_EQ(GURL("http://req4"), live_reqs[1]->original_url()); +} + +TEST(URLRequestTrackerTest, GraveyardBounded) { + URLRequestTracker tracker; + EXPECT_EQ(0u, tracker.GetLiveRequests().size()); + EXPECT_EQ(0u, tracker.GetRecentlyDeceased().size()); + + // Add twice as many requests as will fit in the graveyard. + for (size_t i = 0; i < URLRequestTracker::kMaxGraveyardSize * 2; ++i) { + URLRequest req(GURL(StringPrintf("http://req%d", i).c_str()), NULL); + tracker.Add(&req); + tracker.Remove(&req); + } + + // Check that only the last |kMaxGraveyardSize| requests are in-memory. + + URLRequestTracker::RecentRequestInfoList recent_reqs = + tracker.GetRecentlyDeceased(); + + ASSERT_EQ(URLRequestTracker::kMaxGraveyardSize, recent_reqs.size()); + + for (size_t i = 0; i < URLRequestTracker::kMaxGraveyardSize; ++i) { + size_t req_number = i + URLRequestTracker::kMaxGraveyardSize; + GURL url(StringPrintf("http://req%d", req_number).c_str()); + EXPECT_EQ(url, recent_reqs[i].original_url); + } +} + +// Check that very long URLs are truncated. +TEST(URLRequestTrackerTest, GraveyardURLBounded) { + URLRequestTracker tracker; + + std::string big_url_spec("http://"); + big_url_spec.resize(2 * URLRequestTracker::kMaxGraveyardURLSize, 'x'); + GURL big_url(big_url_spec); + URLRequest req(big_url, NULL); + + tracker.Add(&req); + tracker.Remove(&req); + + ASSERT_EQ(1u, tracker.GetRecentlyDeceased().size()); + // The +1 is because GURL canonicalizes with a trailing '/' ... maybe + // we should just save the std::string rather than the GURL. + EXPECT_EQ(URLRequestTracker::kMaxGraveyardURLSize + 1, + tracker.GetRecentlyDeceased()[0].original_url.spec().size()); +} + +// Test the doesn't fail if the URL was invalid. http://crbug.com/21423. +TEST(URLRequestTrackerTest, TrackingInvalidURL) { + URLRequestTracker tracker; + + EXPECT_EQ(0u, tracker.GetLiveRequests().size()); + EXPECT_EQ(0u, tracker.GetRecentlyDeceased().size()); + + GURL invalid_url("xabc"); + EXPECT_FALSE(invalid_url.is_valid()); + URLRequest req(invalid_url, NULL); + + tracker.Add(&req); + tracker.Remove(&req); + + // Check that the invalid URL made it into graveyard. + ASSERT_EQ(1u, tracker.GetRecentlyDeceased().size()); + EXPECT_FALSE(tracker.GetRecentlyDeceased()[0].original_url.is_valid()); +} + +} // namespace diff --git a/net/url_request/url_request_unittest.cc b/net/url_request/url_request_unittest.cc index 21fd22b..0693ac1 100644 --- a/net/url_request/url_request_unittest.cc +++ b/net/url_request/url_request_unittest.cc @@ -120,10 +120,6 @@ scoped_refptr<net::UploadData> CreateSimpleUploadData(const char* data) { // Inherit PlatformTest since we require the autorelease pool on Mac OS X.f class URLRequestTest : public PlatformTest { - public: - ~URLRequestTest() { - EXPECT_EQ(0u, URLRequest::InstanceTracker::Get()->GetLiveRequests().size()); - } }; class URLRequestTestHTTP : public URLRequestTest { @@ -234,109 +230,6 @@ TEST_F(URLRequestTestHTTP, GetTest) { } } -// Test the instance tracking functionality of URLRequest. -TEST_F(URLRequestTest, Tracking) { - URLRequest::InstanceTracker::Get()->ClearRecentlyDeceased(); - EXPECT_EQ(0u, URLRequest::InstanceTracker::Get()->GetLiveRequests().size()); - EXPECT_EQ(0u, - URLRequest::InstanceTracker::Get()->GetRecentlyDeceased().size()); - - { - URLRequest req1(GURL("http://req1"), NULL); - URLRequest req2(GURL("http://req2"), NULL); - URLRequest req3(GURL("http://req3"), NULL); - - std::vector<URLRequest*> live_reqs = - URLRequest::InstanceTracker::Get()->GetLiveRequests(); - ASSERT_EQ(3u, live_reqs.size()); - EXPECT_EQ(GURL("http://req1"), live_reqs[0]->original_url()); - EXPECT_EQ(GURL("http://req2"), live_reqs[1]->original_url()); - EXPECT_EQ(GURL("http://req3"), live_reqs[2]->original_url()); - } - - EXPECT_EQ(0u, URLRequest::InstanceTracker::Get()->GetLiveRequests().size()); - - URLRequest::InstanceTracker::RecentRequestInfoList recent_reqs = - URLRequest::InstanceTracker::Get()->GetRecentlyDeceased(); - - // Note that the order is reversed from definition order, because - // this matches the destructor order. - ASSERT_EQ(3u, recent_reqs.size()); - EXPECT_EQ(GURL("http://req3"), recent_reqs[0].original_url); - EXPECT_EQ(GURL("http://req2"), recent_reqs[1].original_url); - EXPECT_EQ(GURL("http://req1"), recent_reqs[2].original_url); -} - -// Test the instance tracking functionality of URLRequest. -TEST_F(URLRequestTest, TrackingGraveyardBounded) { - URLRequest::InstanceTracker::Get()->ClearRecentlyDeceased(); - EXPECT_EQ(0u, URLRequest::InstanceTracker::Get()->GetLiveRequests().size()); - EXPECT_EQ(0u, - URLRequest::InstanceTracker::Get()->GetRecentlyDeceased().size()); - - const size_t kMaxGraveyardSize = - URLRequest::InstanceTracker::kMaxGraveyardSize; - const size_t kMaxURLLen = URLRequest::InstanceTracker::kMaxGraveyardURLSize; - - // Add twice as many requests as will fit in the graveyard. - for (size_t i = 0; i < kMaxGraveyardSize * 2; ++i) - URLRequest req(GURL(StringPrintf("http://req%d", i).c_str()), NULL); - - // Check that only the last |kMaxGraveyardSize| requests are in-memory. - - URLRequest::InstanceTracker::RecentRequestInfoList recent_reqs = - URLRequest::InstanceTracker::Get()->GetRecentlyDeceased(); - - ASSERT_EQ(kMaxGraveyardSize, recent_reqs.size()); - - for (size_t i = 0; i < kMaxGraveyardSize; ++i) { - size_t req_number = i + kMaxGraveyardSize; - GURL url(StringPrintf("http://req%d", req_number).c_str()); - EXPECT_EQ(url, recent_reqs[i].original_url); - } - - URLRequest::InstanceTracker::Get()->ClearRecentlyDeceased(); - EXPECT_EQ(0u, - URLRequest::InstanceTracker::Get()->GetRecentlyDeceased().size()); - - // Check that very long URLs are truncated. - std::string big_url_spec("http://"); - big_url_spec.resize(2 * kMaxURLLen, 'x'); - GURL big_url(big_url_spec); - { - URLRequest req(big_url, NULL); - } - ASSERT_EQ(1u, - URLRequest::InstanceTracker::Get()->GetRecentlyDeceased().size()); - // The +1 is because GURL canonicalizes with a trailing '/' ... maybe - // we should just save the std::string rather than the GURL. - EXPECT_EQ(kMaxURLLen + 1, - URLRequest::InstanceTracker::Get()->GetRecentlyDeceased()[0] - .original_url.spec().size()); -} - -// Test the instance tracking functionality of URLRequest does not -// fail if the URL was invalid. http://crbug.com/21423. -TEST_F(URLRequestTest, TrackingInvalidURL) { - URLRequest::InstanceTracker::Get()->ClearRecentlyDeceased(); - EXPECT_EQ(0u, URLRequest::InstanceTracker::Get()->GetLiveRequests().size()); - EXPECT_EQ(0u, - URLRequest::InstanceTracker::Get()->GetRecentlyDeceased().size()); - - { - GURL invalid_url("xabc"); - EXPECT_FALSE(invalid_url.is_valid()); - URLRequest req(invalid_url, NULL); - } - - // Check that the invalid URL made it into graveyard. - URLRequest::InstanceTracker::RecentRequestInfoList recent_reqs = - URLRequest::InstanceTracker::Get()->GetRecentlyDeceased(); - - ASSERT_EQ(1u, recent_reqs.size()); - EXPECT_FALSE(recent_reqs[0].original_url.is_valid()); -} - TEST_F(URLRequestTest, QuitTest) { // Don't use shared server here because we order it to quit. // It would impact other tests. diff --git a/net/url_request/url_request_view_net_internals_job.cc b/net/url_request/url_request_view_net_internals_job.cc index 5cadc075..92fcc14 100644 --- a/net/url_request/url_request_view_net_internals_job.cc +++ b/net/url_request/url_request_view_net_internals_job.cc @@ -323,11 +323,9 @@ class URLRequestLiveSubSection : public SubSection { : SubSection(parent, "outstanding", "Outstanding requests") { } - virtual void OutputBody(URLRequestContext* /*context*/, std::string* out) { - URLRequest::InstanceTracker* tracker = URLRequest::InstanceTracker::Get(); - - // Note that these are the requests across ALL contexts. - std::vector<URLRequest*> requests = tracker->GetLiveRequests(); + virtual void OutputBody(URLRequestContext* context, std::string* out) { + std::vector<URLRequest*> requests = + context->request_tracker()->GetLiveRequests(); out->append("<ol>"); for (size_t i = 0; i < requests.size(); ++i) { @@ -347,12 +345,9 @@ class URLRequestRecentSubSection : public SubSection { : SubSection(parent, "recent", "Recently completed requests") { } - virtual void OutputBody(URLRequestContext* /*context*/, std::string* out) { - URLRequest::InstanceTracker* tracker = URLRequest::InstanceTracker::Get(); - - // Note that these are the recently completed requests across ALL contexts. - URLRequest::InstanceTracker::RecentRequestInfoList recent = - tracker->GetRecentlyDeceased(); + virtual void OutputBody(URLRequestContext* context, std::string* out) { + URLRequestTracker::RecentRequestInfoList recent = + context->request_tracker()->GetRecentlyDeceased(); out->append("<ol>"); for (size_t i = 0; i < recent.size(); ++i) { |