diff options
author | eroman@chromium.org <eroman@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2009-12-21 20:03:19 +0000 |
---|---|---|
committer | eroman@chromium.org <eroman@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2009-12-21 20:03:19 +0000 |
commit | c1173dd1715877a08f641c4f94ff709de51ada58 (patch) | |
tree | 3a9eb057baa02f54dd863788562bbb6ea765bc45 /net/url_request | |
parent | 4460d9ceda27029cd1474141ec9f114a5b4eadef (diff) | |
download | chromium_src-c1173dd1715877a08f641c4f94ff709de51ada58.zip chromium_src-c1173dd1715877a08f641c4f94ff709de51ada58.tar.gz chromium_src-c1173dd1715877a08f641c4f94ff709de51ada58.tar.bz2 |
Add the ability to enable full logging for LoadLogs.
This works by clicking a button in about:net-internals to turn on active logging.
Right now full logging means:
- Instead of just the most recent 25 requests, keep all requests info.
- Instead of limiting each request to 50 log entries, keep all log entries.
- Instead of saving the first 1000 bytes of request URLs, save all its bytes.
In the future full logging will be expanded to include other log events, and also string messages (so it can mirror what was sent to LOG(INFO) / LOG(WARNING)).
BUG=27552
Review URL: http://codereview.chromium.org/507055
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@35098 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net/url_request')
-rw-r--r-- | net/url_request/request_tracker.h | 62 | ||||
-rw-r--r-- | net/url_request/request_tracker_unittest.cc | 75 | ||||
-rw-r--r-- | net/url_request/url_request.cc | 15 | ||||
-rw-r--r-- | net/url_request/url_request_view_net_internals_job.cc | 58 |
4 files changed, 196 insertions, 14 deletions
diff --git a/net/url_request/request_tracker.h b/net/url_request/request_tracker.h index abfdf4b..3202e60 100644 --- a/net/url_request/request_tracker.h +++ b/net/url_request/request_tracker.h @@ -40,13 +40,22 @@ class RequestTracker { typedef std::vector<RecentRequestInfo> RecentRequestInfoList; typedef bool (*RecentRequestsFilterFunc)(const GURL&); - // The maximum number of entries for |graveyard_|. + // The maximum number of entries for |graveyard_|, when in bounded mode. static const size_t kMaxGraveyardSize; - // The maximum size of URLs to stuff into RecentRequestInfo. + // The maximum size of URLs to stuff into RecentRequestInfo, when in bounded + // mode. static const size_t kMaxGraveyardURLSize; - RequestTracker() : next_graveyard_index_(0), graveyard_filter_func_(NULL) {} + // The maximum number of entries to use for LoadLogs when in bounded mode. + static const size_t kBoundedLoadLogMaxEntries; + + RequestTracker() + : next_graveyard_index_(0), + graveyard_filter_func_(NULL), + is_unbounded_(false) { + } + ~RequestTracker() {} // Returns a list of Requests that are alive. @@ -90,10 +99,13 @@ class RequestTracker { RecentRequestInfo info; request->GetInfoForTracker(&info); - // Paranoia check: truncate |info.original_url| if it is really big. - const std::string& spec = info.original_url.possibly_invalid_spec(); - if (spec.size() > kMaxGraveyardURLSize) - info.original_url = GURL(spec.substr(0, kMaxGraveyardURLSize)); + + if (!is_unbounded_) { + // Paranoia check: truncate |info.original_url| if it is really big. + const std::string& spec = info.original_url.possibly_invalid_spec(); + if (spec.size() > kMaxGraveyardURLSize) + info.original_url = GURL(spec.substr(0, kMaxGraveyardURLSize)); + } if (ShouldInsertIntoGraveyard(info)) { // Add into |graveyard_|. @@ -109,6 +121,31 @@ class RequestTracker { graveyard_filter_func_ = filter_func; } + bool IsUnbounded() const { + return is_unbounded_; + } + + void SetUnbounded(bool unbounded) { + // No change. + if (is_unbounded_ == unbounded) + return; + + // If we are going from unbounded to bounded, we need to trim the + // graveyard. For simplicity we will simply clear it. + if (is_unbounded_ && !unbounded) + ClearRecentlyDeceased(); + + is_unbounded_ = unbounded; + } + + // Creates a LoadLog using the unbounded/bounded constraints that + // apply to this tracker. + net::LoadLog* CreateLoadLog() { + if (IsUnbounded()) + return new net::LoadLog(net::LoadLog::kUnbounded); + return new net::LoadLog(kBoundedLoadLogMaxEntries); + } + private: bool ShouldInsertIntoGraveyard(const RecentRequestInfo& info) { if (!graveyard_filter_func_) @@ -117,6 +154,13 @@ class RequestTracker { } void InsertIntoGraveyard(const RecentRequestInfo& info) { + if (is_unbounded_) { + graveyard_.push_back(info); + return; + } + + // Otherwise enforce a bound on the graveyard size, by treating it as a + // circular buffer. if (graveyard_.size() < kMaxGraveyardSize) { // Still growing to maximum capacity. DCHECK_EQ(next_graveyard_index_, graveyard_.size()); @@ -133,6 +177,7 @@ class RequestTracker { size_t next_graveyard_index_; RecentRequestInfoList graveyard_; RecentRequestsFilterFunc graveyard_filter_func_; + bool is_unbounded_; }; template<typename Request> @@ -141,4 +186,7 @@ const size_t RequestTracker<Request>::kMaxGraveyardSize = 25; template<typename Request> const size_t RequestTracker<Request>::kMaxGraveyardURLSize = 1000; +template<typename Request> +const size_t RequestTracker<Request>::kBoundedLoadLogMaxEntries = 50; + #endif // NET_URL_REQUEST_REQUEST_TRACKER_H_ diff --git a/net/url_request/request_tracker_unittest.cc b/net/url_request/request_tracker_unittest.cc index e603129..d31587c 100644 --- a/net/url_request/request_tracker_unittest.cc +++ b/net/url_request/request_tracker_unittest.cc @@ -44,8 +44,9 @@ class TestRequest { }; -TEST(RequestTrackerTest, Basic) { +TEST(RequestTrackerTest, BasicBounded) { RequestTracker<TestRequest> tracker; + EXPECT_FALSE(tracker.IsUnbounded()); EXPECT_EQ(0u, tracker.GetLiveRequests().size()); EXPECT_EQ(0u, tracker.GetRecentlyDeceased().size()); @@ -85,6 +86,7 @@ TEST(RequestTrackerTest, Basic) { TEST(RequestTrackerTest, GraveyardBounded) { RequestTracker<TestRequest> tracker; + EXPECT_FALSE(tracker.IsUnbounded()); EXPECT_EQ(0u, tracker.GetLiveRequests().size()); EXPECT_EQ(0u, tracker.GetRecentlyDeceased().size()); @@ -111,9 +113,42 @@ TEST(RequestTrackerTest, GraveyardBounded) { } } +TEST(RequestTrackerTest, GraveyardUnbounded) { + RequestTracker<TestRequest> tracker; + EXPECT_FALSE(tracker.IsUnbounded()); + EXPECT_EQ(0u, tracker.GetLiveRequests().size()); + EXPECT_EQ(0u, tracker.GetRecentlyDeceased().size()); + + tracker.SetUnbounded(true); + + EXPECT_TRUE(tracker.IsUnbounded()); + + // Add twice as many requests as would fit in the bounded graveyard. + + size_t kMaxSize = RequestTracker<TestRequest>::kMaxGraveyardSize * 2; + for (size_t i = 0; i < kMaxSize; ++i) { + TestRequest req(GURL(StringPrintf("http://req%" PRIuS, i).c_str())); + tracker.Add(&req); + tracker.Remove(&req); + } + + // Check that all of them got saved. + + RequestTracker<TestRequest>::RecentRequestInfoList recent_reqs = + tracker.GetRecentlyDeceased(); + + ASSERT_EQ(kMaxSize, recent_reqs.size()); + + for (size_t i = 0; i < kMaxSize; ++i) { + GURL url(StringPrintf("http://req%" PRIuS, i).c_str()); + EXPECT_EQ(url, recent_reqs[i].original_url); + } +} + // Check that very long URLs are truncated. TEST(RequestTrackerTest, GraveyardURLBounded) { RequestTracker<TestRequest> tracker; + EXPECT_FALSE(tracker.IsUnbounded()); std::string big_url_spec("http://"); big_url_spec.resize(2 * RequestTracker<TestRequest>::kMaxGraveyardURLSize, @@ -134,6 +169,7 @@ TEST(RequestTrackerTest, GraveyardURLBounded) { // Test the doesn't fail if the URL was invalid. http://crbug.com/21423. TEST(URLRequestTrackerTest, TrackingInvalidURL) { RequestTracker<TestRequest> tracker; + EXPECT_FALSE(tracker.IsUnbounded()); EXPECT_EQ(0u, tracker.GetLiveRequests().size()); EXPECT_EQ(0u, tracker.GetRecentlyDeceased().size()); @@ -158,6 +194,7 @@ bool ShouldRequestBeAddedToGraveyard(const GURL& url) { // saved into the recent requests list (graveyard), by using a filter. TEST(RequestTrackerTest, GraveyardCanBeFiltered) { RequestTracker<TestRequest> tracker; + EXPECT_FALSE(tracker.IsUnbounded()); tracker.SetGraveyardFilter(ShouldRequestBeAddedToGraveyard); @@ -188,4 +225,40 @@ TEST(RequestTrackerTest, GraveyardCanBeFiltered) { tracker.GetRecentlyDeceased()[1].original_url.spec()); } +// Convert an unbounded tracker back to being bounded. +TEST(RequestTrackerTest, ConvertUnboundedToBounded) { + RequestTracker<TestRequest> tracker; + EXPECT_FALSE(tracker.IsUnbounded()); + EXPECT_EQ(0u, tracker.GetLiveRequests().size()); + EXPECT_EQ(0u, tracker.GetRecentlyDeceased().size()); + + tracker.SetUnbounded(true); + EXPECT_TRUE(tracker.IsUnbounded()); + + // Add twice as many requests as would fit in the bounded graveyard. + + size_t kMaxSize = RequestTracker<TestRequest>::kMaxGraveyardSize * 2; + for (size_t i = 0; i < kMaxSize; ++i) { + TestRequest req(GURL(StringPrintf("http://req%" PRIuS, i).c_str())); + tracker.Add(&req); + tracker.Remove(&req); + } + + // Check that all of them got saved. + ASSERT_EQ(kMaxSize, tracker.GetRecentlyDeceased().size()); + + // Now make the tracker bounded, and add more entries to its graveyard. + tracker.SetUnbounded(false); + + kMaxSize = RequestTracker<TestRequest>::kMaxGraveyardSize; + for (size_t i = 0; i < kMaxSize; ++i) { + TestRequest req(GURL(StringPrintf("http://req%" PRIuS, i).c_str())); + tracker.Add(&req); + tracker.Remove(&req); + } + + // We should only have kMaxGraveyardSize entries now. + ASSERT_EQ(kMaxSize, tracker.GetRecentlyDeceased().size()); +} + } // namespace diff --git a/net/url_request/url_request.cc b/net/url_request/url_request.cc index e85b055..ec2eace 100644 --- a/net/url_request/url_request.cc +++ b/net/url_request/url_request.cc @@ -27,9 +27,6 @@ using std::wstring; // Max number of http redirects to follow. Same number as gecko. static const int kMaxRedirects = 20; -// The maximum size of the passive LoadLog associated with each request. -static const int kMaxNumLoadLogEntries = 50; - static URLRequestJobManager* GetJobManager() { return Singleton<URLRequestJobManager>::get(); } @@ -38,8 +35,7 @@ static URLRequestJobManager* GetJobManager() { // URLRequest URLRequest::URLRequest(const GURL& url, Delegate* delegate) - : load_log_(new net::LoadLog(kMaxNumLoadLogEntries)), - url_(url), + : url_(url), original_url_(url), method_("GET"), load_flags_(net::LOAD_NORMAL), @@ -500,8 +496,15 @@ void URLRequest::set_context(URLRequestContext* context) { if (prev_context != context) { if (prev_context) prev_context->url_request_tracker()->Remove(this); - if (context) + if (context) { + if (!load_log_) { + // Create the LoadLog -- we waited until now to create it so we know + // what constraints the URLRequestContext is enforcing on log levels. + load_log_ = context->url_request_tracker()->CreateLoadLog(); + } + context->url_request_tracker()->Add(this); + } } } diff --git a/net/url_request/url_request_view_net_internals_job.cc b/net/url_request/url_request_view_net_internals_job.cc index 3c74d5b..e3759eb 100644 --- a/net/url_request/url_request_view_net_internals_job.cc +++ b/net/url_request/url_request_view_net_internals_job.cc @@ -480,6 +480,51 @@ bool GetViewCacheKeyFromPath(const std::string path, return true; } +// Process any query strings in the request (for actions like toggling +// full logging. As a side-effect, also append some status text to the HTML +// describing what we did. +void ProcessQueryStringCommands(URLRequestContext* context, + const std::string& query, + std::string* data) { + if (StartsWithASCII(query, "logging=", true)) { + bool enable_unbounded = StartsWithASCII(query, "logging=E", true); + context->url_request_tracker()->SetUnbounded(enable_unbounded); + context->socket_stream_tracker()->SetUnbounded(enable_unbounded); + + if (enable_unbounded) + data->append("<i>Enabled full logging</i>\n"); + else + data->append("<i>Disabled full logging, and cleared the recent " + "requests</i>\n"); + + } else if (StartsWithASCII(query, "data=Clear", true)) { + context->url_request_tracker()->ClearRecentlyDeceased(); + context->socket_stream_tracker()->ClearRecentlyDeceased(); + + data->append("<i>Cleared the recent request logs</i>\n"); + } +} + +// Append some HTML controls to |data| that allow the user to enable full +// logging, and clear some of the already logged data. +void DrawControlsHeader(URLRequestContext* context, std::string* data) { + bool is_full_logging_enabled = + context->url_request_tracker()->IsUnbounded() && + context->socket_stream_tracker()->IsUnbounded(); + + data->append("<form action='' method=GET style='margin-bottom: 10px'>\n"); + if (is_full_logging_enabled) { + data->append( + "<input type=submit name=logging value='Disable full logging' />"); + } else { + data->append( + "<input type=submit name=logging value='Enable full logging' />"); + } + + data->append("<input type=submit name=data value='Clear recent requests' />"); + data->append("</form>\n"); +} + } // namespace bool URLRequestViewNetInternalsJob::GetData(std::string* mime_type, @@ -491,6 +536,16 @@ bool URLRequestViewNetInternalsJob::GetData(std::string* mime_type, URLRequestContext* context = request_->context(); std::string details = url_format_->GetDetails(request_->url()); + std::string query; + + // Split out the query parameters. + std::string::size_type query_start = details.find('?'); + if (query_start != std::string::npos) { + if (query_start + 1 < details.size()) + query = details.substr(query_start + 1); + details = details.substr(0, query_start); + } + data->clear(); // Use a different handler for "view-cache/*" subpaths. @@ -514,6 +569,9 @@ bool URLRequestViewNetInternalsJob::GetData(std::string* mime_type, "developers/design-documents/view-net-internals'>" "Help: how do I use this?</a></p>"); + ProcessQueryStringCommands(context, query, data); + DrawControlsHeader(context, data); + SubSection* all = Singleton<AllSubSections>::get(); SubSection* section = all; |