diff options
author | adamk@chromium.org <adamk@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2011-03-24 00:03:44 +0000 |
---|---|---|
committer | adamk@chromium.org <adamk@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2011-03-24 00:03:44 +0000 |
commit | dd29bcd78feba27e2093d0b2cacbd6e9d8e314ef (patch) | |
tree | e6681274f9630a74e9406e0df1ffa9b3a95406a7 | |
parent | 1e6f58db132a6e641299f7cb2502dbf88a06b454 (diff) | |
download | chromium_src-dd29bcd78feba27e2093d0b2cacbd6e9d8e314ef.zip chromium_src-dd29bcd78feba27e2093d0b2cacbd6e9d8e314ef.tar.gz chromium_src-dd29bcd78feba27e2093d0b2cacbd6e9d8e314ef.tar.bz2 |
Move URLRequestJob's histogram logic to URLRequestHttpJob.
While this slightly degeneralizes some stats-collection code,
the svn history shows that this code has, over several years, only been used for SDCH,
and thus it's worth some degeneralization in order to greatly simplify URLRequestJob.
BUG=none
TEST=try bots
Review URL: http://codereview.chromium.org/6713019
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@79219 0039d316-1c4b-4281-b951-d872f2087c98
-rw-r--r-- | net/url_request/url_request_http_job.cc | 278 | ||||
-rw-r--r-- | net/url_request/url_request_http_job.h | 42 | ||||
-rw-r--r-- | net/url_request/url_request_job.cc | 274 | ||||
-rw-r--r-- | net/url_request/url_request_job.h | 83 | ||||
-rw-r--r-- | webkit/appcache/appcache_url_request_job.cc | 4 | ||||
-rw-r--r-- | webkit/appcache/appcache_url_request_job.h | 3 |
6 files changed, 324 insertions, 360 deletions
diff --git a/net/url_request/url_request_http_job.cc b/net/url_request/url_request_http_job.cc index 027ff89..d8f0ab4 100644 --- a/net/url_request/url_request_http_job.cc +++ b/net/url_request/url_request_http_job.cc @@ -14,12 +14,14 @@ #include "base/metrics/histogram.h" #include "base/rand_util.h" #include "base/string_util.h" +#include "base/time.h" #include "net/base/cert_status_flags.h" #include "net/base/cookie_policy.h" #include "net/base/cookie_store.h" #include "net/base/filter.h" #include "net/base/host_port_pair.h" #include "net/base/load_flags.h" +#include "net/base/mime_util.h" #include "net/base/net_errors.h" #include "net/base/net_util.h" #include "net/base/sdch_manager.h" @@ -41,6 +43,13 @@ static const char kAvailDictionaryHeader[] = "Avail-Dictionary"; +// When histogramming results related to SDCH and/or an SDCH latency test, the +// number of packets for which we need to record arrival times so as to +// calculate interpacket latencies. We currently are only looking at the +// first few packets, as we're monitoring the impact of the initial TCP +// congestion window on stalling of transmissions. +static const size_t kSdchPacketHistogramCount = 5; + namespace net { namespace { @@ -106,7 +115,7 @@ base::Time URLRequestHttpJob::HttpFilterContext::GetRequestTime() const { } bool URLRequestHttpJob::HttpFilterContext::IsCachedContent() const { - return job_->IsCachedContent(); + return job_->is_cached_content_; } bool URLRequestHttpJob::HttpFilterContext::IsDownload() const { @@ -193,6 +202,12 @@ URLRequestHttpJob::URLRequestHttpJob(URLRequest* request) sdch_test_control_(false), is_cached_content_(false), request_creation_time_(), + packet_timing_enabled_(false), + bytes_observed_in_packets_(0), + packet_times_(), + request_time_snapshot_(), + final_packet_time_(), + observed_packet_count_(0), ALLOW_THIS_IN_INITIALIZER_LIST(filter_context_(this)), ALLOW_THIS_IN_INITIALIZER_LIST(method_factory_(this)) { ResetTimer(); @@ -229,7 +244,7 @@ void URLRequestHttpJob::NotifyHeadersComplete() { if (response_info_->headers->EnumerateHeader(&iter, name, &url_text)) { // request_->url() won't be valid in the destructor, so we use an // alternate copy. - DCHECK(request_->url() == request_info_.url); + DCHECK_EQ(request_->url(), request_info_.url); // Resolve suggested URL relative to request url. sdch_dictionary_url_ = request_info_.url.Resolve(url_text); } @@ -247,6 +262,11 @@ void URLRequestHttpJob::NotifyHeadersComplete() { URLRequestJob::NotifyHeadersComplete(); } +void URLRequestHttpJob::NotifyDone(const URLRequestStatus& status) { + RecordCompressionHistograms(); + URLRequestJob::NotifyDone(status); +} + void URLRequestHttpJob::DestroyTransaction() { DCHECK(transaction_.get()); @@ -312,7 +332,7 @@ void URLRequestHttpJob::AddExtraHeaders() { // We are participating in the test (or control), and hence we'll // eventually record statistics via either SDCH_EXPERIMENT_DECODE or // SDCH_EXPERIMENT_HOLDBACK, and we'll need some packet timing data. - EnablePacketCounting(kSdchPacketHistogramCount); + packet_timing_enabled_ = true; if (base::RandDouble() < .01) { sdch_test_control_ = true; // 1% probability. advertise_sdch = false; @@ -343,7 +363,7 @@ void URLRequestHttpJob::AddExtraHeaders() { // definately employ an SDCH filter (or tentative sdch filter) when we get // a response. When done, we'll record histograms via SDCH_DECODE or // SDCH_PASSTHROUGH. Hence we need to record packet arrival times. - EnablePacketCounting(kSdchPacketHistogramCount); + packet_timing_enabled_ = true; } } @@ -805,10 +825,6 @@ Filter* URLRequestHttpJob::SetupFilter() const { ? Filter::Factory(encoding_types, filter_context_) : NULL; } -bool URLRequestHttpJob::IsCachedContent() const { - return is_cached_content_; -} - bool URLRequestHttpJob::IsSafeRedirect(const GURL& location) { // We only allow redirects to certain "safe" protocols. This does not // restrict redirects to externally handled protocols. Our consumer would @@ -875,7 +891,7 @@ void URLRequestHttpJob::SetAuth(const string16& username, if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) { proxy_auth_state_ = AUTH_STATE_HAVE_AUTH; } else { - DCHECK(server_auth_state_ == AUTH_STATE_NEED_AUTH); + DCHECK_EQ(server_auth_state_, AUTH_STATE_NEED_AUTH); server_auth_state_ = AUTH_STATE_HAVE_AUTH; } @@ -887,7 +903,7 @@ void URLRequestHttpJob::CancelAuth() { if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) { proxy_auth_state_ = AUTH_STATE_CANCELED; } else { - DCHECK(server_auth_state_ == AUTH_STATE_NEED_AUTH); + DCHECK_EQ(server_auth_state_, AUTH_STATE_NEED_AUTH); server_auth_state_ = AUTH_STATE_CANCELED; } @@ -993,14 +1009,14 @@ HostPortPair URLRequestHttpJob::GetSocketAddress() const { URLRequestHttpJob::~URLRequestHttpJob() { DCHECK(!sdch_test_control_ || !sdch_test_activated_); - if (!IsCachedContent()) { + if (!is_cached_content_) { if (sdch_test_control_) RecordPacketStats(FilterContext::SDCH_EXPERIMENT_HOLDBACK); if (sdch_test_activated_) RecordPacketStats(FilterContext::SDCH_EXPERIMENT_DECODE); } - // Make sure SDCH filters are told to emit histogram data while this class - // can still service the IsCachedContent() call. + // Make sure SDCH filters are told to emit histogram data while + // filter_context_ is still alive. DestroyFilters(); if (sdch_dictionary_url_.is_valid()) { @@ -1072,4 +1088,240 @@ void URLRequestHttpJob::ResetTimer() { request_creation_time_ = base::Time::Now(); } +void URLRequestHttpJob::UpdatePacketReadTimes() { + if (!packet_timing_enabled_) + return; + + if (filter_input_byte_count() <= bytes_observed_in_packets_) { + DCHECK_EQ(filter_input_byte_count(), bytes_observed_in_packets_); + return; // No new bytes have arrived. + } + + if (!bytes_observed_in_packets_) + request_time_snapshot_ = request_ ? request_->request_time() : base::Time(); + + final_packet_time_ = base::Time::Now(); + const size_t kTypicalPacketSize = 1430; + while (filter_input_byte_count() > bytes_observed_in_packets_) { + ++observed_packet_count_; + if (packet_times_.size() < kSdchPacketHistogramCount) { + packet_times_.push_back(final_packet_time_); + DCHECK_EQ(static_cast<size_t>(observed_packet_count_), + packet_times_.size()); + } + bytes_observed_in_packets_ += kTypicalPacketSize; + } + // Since packets may not be full, we'll remember the number of bytes we've + // accounted for in packets thus far. + bytes_observed_in_packets_ = filter_input_byte_count(); +} + +void URLRequestHttpJob::RecordPacketStats( + FilterContext::StatisticSelector statistic) const { + if (!packet_timing_enabled_ || (final_packet_time_ == base::Time())) + return; + + base::TimeDelta duration = final_packet_time_ - request_time_snapshot_; + switch (statistic) { + case FilterContext::SDCH_DECODE: { + UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_Latency_F_a", duration, + base::TimeDelta::FromMilliseconds(20), + base::TimeDelta::FromMinutes(10), 100); + UMA_HISTOGRAM_COUNTS_100("Sdch3.Network_Decode_Packets_b", + static_cast<int>(observed_packet_count_)); + UMA_HISTOGRAM_CUSTOM_COUNTS("Sdch3.Network_Decode_Bytes_Processed_b", + static_cast<int>(bytes_observed_in_packets_), 500, 100000, 100); + if (packet_times_.empty()) + return; + UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_1st_To_Last_a", + final_packet_time_ - packet_times_[0], + base::TimeDelta::FromMilliseconds(20), + base::TimeDelta::FromMinutes(10), 100); + + DCHECK_GT(kSdchPacketHistogramCount, 4u); + if (packet_times_.size() <= 4) + return; + UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_1st_To_2nd_c", + packet_times_[1] - packet_times_[0], + base::TimeDelta::FromMilliseconds(1), + base::TimeDelta::FromSeconds(10), 100); + UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_2nd_To_3rd_c", + packet_times_[2] - packet_times_[1], + base::TimeDelta::FromMilliseconds(1), + base::TimeDelta::FromSeconds(10), 100); + UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_3rd_To_4th_c", + packet_times_[3] - packet_times_[2], + base::TimeDelta::FromMilliseconds(1), + base::TimeDelta::FromSeconds(10), 100); + UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_4th_To_5th_c", + packet_times_[4] - packet_times_[3], + base::TimeDelta::FromMilliseconds(1), + base::TimeDelta::FromSeconds(10), 100); + return; + } + case FilterContext::SDCH_PASSTHROUGH: { + // Despite advertising a dictionary, we handled non-sdch compressed + // content. + UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_Latency_F_a", + duration, + base::TimeDelta::FromMilliseconds(20), + base::TimeDelta::FromMinutes(10), 100); + UMA_HISTOGRAM_COUNTS_100("Sdch3.Network_Pass-through_Packets_b", + observed_packet_count_); + if (packet_times_.empty()) + return; + UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_1st_To_Last_a", + final_packet_time_ - packet_times_[0], + base::TimeDelta::FromMilliseconds(20), + base::TimeDelta::FromMinutes(10), 100); + DCHECK_GT(kSdchPacketHistogramCount, 4u); + if (packet_times_.size() <= 4) + return; + UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_1st_To_2nd_c", + packet_times_[1] - packet_times_[0], + base::TimeDelta::FromMilliseconds(1), + base::TimeDelta::FromSeconds(10), 100); + UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_2nd_To_3rd_c", + packet_times_[2] - packet_times_[1], + base::TimeDelta::FromMilliseconds(1), + base::TimeDelta::FromSeconds(10), 100); + UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_3rd_To_4th_c", + packet_times_[3] - packet_times_[2], + base::TimeDelta::FromMilliseconds(1), + base::TimeDelta::FromSeconds(10), 100); + UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_4th_To_5th_c", + packet_times_[4] - packet_times_[3], + base::TimeDelta::FromMilliseconds(1), + base::TimeDelta::FromSeconds(10), 100); + return; + } + + case FilterContext::SDCH_EXPERIMENT_DECODE: { + UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Decode", + duration, + base::TimeDelta::FromMilliseconds(20), + base::TimeDelta::FromMinutes(10), 100); + // We already provided interpacket histograms above in the SDCH_DECODE + // case, so we don't need them here. + return; + } + case FilterContext::SDCH_EXPERIMENT_HOLDBACK: { + UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback", + duration, + base::TimeDelta::FromMilliseconds(20), + base::TimeDelta::FromMinutes(10), 100); + UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_1st_To_Last_a", + final_packet_time_ - packet_times_[0], + base::TimeDelta::FromMilliseconds(20), + base::TimeDelta::FromMinutes(10), 100); + + DCHECK_GT(kSdchPacketHistogramCount, 4u); + if (packet_times_.size() <= 4) + return; + UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_1st_To_2nd_c", + packet_times_[1] - packet_times_[0], + base::TimeDelta::FromMilliseconds(1), + base::TimeDelta::FromSeconds(10), 100); + UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_2nd_To_3rd_c", + packet_times_[2] - packet_times_[1], + base::TimeDelta::FromMilliseconds(1), + base::TimeDelta::FromSeconds(10), 100); + UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_3rd_To_4th_c", + packet_times_[3] - packet_times_[2], + base::TimeDelta::FromMilliseconds(1), + base::TimeDelta::FromSeconds(10), 100); + UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_4th_To_5th_c", + packet_times_[4] - packet_times_[3], + base::TimeDelta::FromMilliseconds(1), + base::TimeDelta::FromSeconds(10), 100); + return; + } + default: + NOTREACHED(); + return; + } +} + +// The common type of histogram we use for all compression-tracking histograms. +#define COMPRESSION_HISTOGRAM(name, sample) \ + do { \ + UMA_HISTOGRAM_CUSTOM_COUNTS("Net.Compress." name, sample, \ + 500, 1000000, 100); \ + } while(0) + +void URLRequestHttpJob::RecordCompressionHistograms() { + DCHECK(request_); + if (!request_) + return; + + if (is_cached_content_ || // Don't record cached content + !GetStatus().is_success() || // Don't record failed content + !IsCompressibleContent() || // Only record compressible content + !prefilter_bytes_read()) // Zero-byte responses aren't useful. + return; + + // Miniature requests aren't really compressible. Don't count them. + const int kMinSize = 16; + if (prefilter_bytes_read() < kMinSize) + return; + + // Only record for http or https urls. + bool is_http = request_->url().SchemeIs("http"); + bool is_https = request_->url().SchemeIs("https"); + if (!is_http && !is_https) + return; + + int compressed_B = prefilter_bytes_read(); + int decompressed_B = postfilter_bytes_read(); + bool was_filtered = HasFilter(); + + // We want to record how often downloaded resources are compressed. + // But, we recognize that different protocols may have different + // properties. So, for each request, we'll put it into one of 3 + // groups: + // a) SSL resources + // Proxies cannot tamper with compression headers with SSL. + // b) Non-SSL, loaded-via-proxy resources + // In this case, we know a proxy might have interfered. + // c) Non-SSL, loaded-without-proxy resources + // In this case, we know there was no explicit proxy. However, + // it is possible that a transparent proxy was still interfering. + // + // For each group, we record the same 3 histograms. + + if (is_https) { + if (was_filtered) { + COMPRESSION_HISTOGRAM("SSL.BytesBeforeCompression", compressed_B); + COMPRESSION_HISTOGRAM("SSL.BytesAfterCompression", decompressed_B); + } else { + COMPRESSION_HISTOGRAM("SSL.ShouldHaveBeenCompressed", decompressed_B); + } + return; + } + + if (request_->was_fetched_via_proxy()) { + if (was_filtered) { + COMPRESSION_HISTOGRAM("Proxy.BytesBeforeCompression", compressed_B); + COMPRESSION_HISTOGRAM("Proxy.BytesAfterCompression", decompressed_B); + } else { + COMPRESSION_HISTOGRAM("Proxy.ShouldHaveBeenCompressed", decompressed_B); + } + return; + } + + if (was_filtered) { + COMPRESSION_HISTOGRAM("NoProxy.BytesBeforeCompression", compressed_B); + COMPRESSION_HISTOGRAM("NoProxy.BytesAfterCompression", decompressed_B); + } else { + COMPRESSION_HISTOGRAM("NoProxy.ShouldHaveBeenCompressed", decompressed_B); + } +} + +bool URLRequestHttpJob::IsCompressibleContent() const { + std::string mime_type; + return GetMimeType(&mime_type) && + (IsSupportedJavascriptMimeType(mime_type.c_str()) || + IsSupportedNonImageMimeType(mime_type.c_str())); +} + } // namespace net diff --git a/net/url_request/url_request_http_job.h b/net/url_request/url_request_http_job.h index 84bf038..d4e37f6 100644 --- a/net/url_request/url_request_http_job.h +++ b/net/url_request/url_request_http_job.h @@ -38,6 +38,9 @@ class URLRequestHttpJob : public URLRequestJob { // Shadows URLRequestJob's version of this method so we can grab cookies. void NotifyHeadersComplete(); + // Shadows URLRequestJob's method so we can record histograms. + void NotifyDone(const URLRequestStatus& status); + void DestroyTransaction(); void StartTransaction(); void AddExtraHeaders(); @@ -73,7 +76,6 @@ class URLRequestHttpJob : public URLRequestJob { virtual bool GetResponseCookies(std::vector<std::string>* cookies); virtual int GetResponseCode() const; virtual Filter* SetupFilter() const; - virtual bool IsCachedContent() const; virtual bool IsSafeRedirect(const GURL& location); virtual bool NeedsAuth(); virtual void GetAuthChallengeInfo(scoped_refptr<AuthChallengeInfo>*); @@ -161,7 +163,45 @@ class URLRequestHttpJob : public URLRequestJob { void RecordTimer(); void ResetTimer(); + virtual void UpdatePacketReadTimes(); + void RecordPacketStats(FilterContext::StatisticSelector statistic) const; + + void RecordCompressionHistograms(); + bool IsCompressibleContent() const; + base::Time request_creation_time_; + + // Data used for statistics gathering. This data is only used for histograms + // and is not required. It is only gathered if packet_timing_enabled_ == true. + // + // TODO(jar): improve the quality of the gathered info by gathering most times + // at a lower point in the network stack, assuring we have actual packet + // boundaries, rather than approximations. Also note that input byte count + // as gathered here is post-SSL, and post-cache-fetch, and does not reflect + // true packet arrival times in such cases. + + // Enable recording of packet arrival times for histogramming. + bool packet_timing_enabled_; + + // The number of bytes that have been accounted for in packets (where some of + // those packets may possibly have had their time of arrival recorded). + int64 bytes_observed_in_packets_; + + // Arrival times for some of the first few packets. + std::vector<base::Time> packet_times_; + + // The request time may not be available when we are being destroyed, so we + // snapshot it early on. + base::Time request_time_snapshot_; + + // Since we don't save all packet times in packet_times_, we save the + // last time for use in histograms. + base::Time final_packet_time_; + + // The count of the number of packets, some of which may not have been timed. + // We're ignoring overflow, as 1430 x 2^31 is a LOT of bytes. + int observed_packet_count_; + HttpFilterContext filter_context_; ScopedRunnableMethodFactory<URLRequestHttpJob> method_factory_; diff --git a/net/url_request/url_request_job.cc b/net/url_request/url_request_job.cc index 66c48f4..11d33da9 100644 --- a/net/url_request/url_request_job.cc +++ b/net/url_request/url_request_job.cc @@ -6,14 +6,12 @@ #include "base/compiler_specific.h" #include "base/message_loop.h" -#include "base/metrics/histogram.h" #include "base/string_number_conversions.h" #include "base/string_util.h" #include "net/base/auth.h" #include "net/base/host_port_pair.h" #include "net/base/io_buffer.h" #include "net/base/load_states.h" -#include "net/base/mime_util.h" #include "net/base/net_errors.h" #include "net/base/network_delegate.h" #include "net/http/http_response_headers.h" @@ -21,9 +19,6 @@ #include "net/url_request/url_request_context.h" #include "net/url_request/url_request_job_tracker.h" -using base::Time; -using base::TimeTicks; - namespace net { URLRequestJob::URLRequestJob(URLRequest* request) @@ -31,18 +26,12 @@ URLRequestJob::URLRequestJob(URLRequest* request) done_(false), prefilter_bytes_read_(0), postfilter_bytes_read_(0), - is_compressible_content_(false), - is_compressed_(false), + filter_input_byte_count_(0), filter_needs_more_output_space_(false), filtered_read_buffer_len_(0), has_handled_response_(false), expected_content_size_(-1), deferred_redirect_status_code_(-1), - packet_timing_enabled_(false), - filter_input_byte_count_(0), - bytes_observed_in_packets_(0), - max_packets_timed_(0), - observed_packet_count_(0), ALLOW_THIS_IN_INITIALIZER_LIST(method_factory_(this)) { g_url_request_job_tracker.AddNewJob(this); } @@ -206,149 +195,10 @@ bool URLRequestJob::GetMimeType(std::string* mime_type) const { return false; } -bool URLRequestJob::IsCachedContent() const { - return false; -} - int URLRequestJob::GetResponseCode() const { return -1; } -void URLRequestJob::RecordPacketStats( - FilterContext::StatisticSelector statistic) const { - if (!packet_timing_enabled_ || (final_packet_time_ == base::Time())) - return; - - // Caller should verify that we're not cached content, but we can't always - // really check for it here because we may (at destruction time) call our own - // class method and get a bogus const answer of false. This DCHECK only helps - // when this method has a valid overridden definition. - DCHECK(!IsCachedContent()); - - base::TimeDelta duration = final_packet_time_ - request_time_snapshot_; - switch (statistic) { - case FilterContext::SDCH_DECODE: { - UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_Latency_F_a", duration, - base::TimeDelta::FromMilliseconds(20), - base::TimeDelta::FromMinutes(10), 100); - UMA_HISTOGRAM_COUNTS_100("Sdch3.Network_Decode_Packets_b", - static_cast<int>(observed_packet_count_)); - UMA_HISTOGRAM_CUSTOM_COUNTS("Sdch3.Network_Decode_Bytes_Processed_b", - static_cast<int>(bytes_observed_in_packets_), 500, 100000, 100); - if (packet_times_.empty()) - return; - UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_1st_To_Last_a", - final_packet_time_ - packet_times_[0], - base::TimeDelta::FromMilliseconds(20), - base::TimeDelta::FromMinutes(10), 100); - - DCHECK(max_packets_timed_ >= kSdchPacketHistogramCount); - DCHECK(kSdchPacketHistogramCount > 4); - if (packet_times_.size() <= 4) - return; - UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_1st_To_2nd_c", - packet_times_[1] - packet_times_[0], - base::TimeDelta::FromMilliseconds(1), - base::TimeDelta::FromSeconds(10), 100); - UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_2nd_To_3rd_c", - packet_times_[2] - packet_times_[1], - base::TimeDelta::FromMilliseconds(1), - base::TimeDelta::FromSeconds(10), 100); - UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_3rd_To_4th_c", - packet_times_[3] - packet_times_[2], - base::TimeDelta::FromMilliseconds(1), - base::TimeDelta::FromSeconds(10), 100); - UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Decode_4th_To_5th_c", - packet_times_[4] - packet_times_[3], - base::TimeDelta::FromMilliseconds(1), - base::TimeDelta::FromSeconds(10), 100); - return; - } - case FilterContext::SDCH_PASSTHROUGH: { - // Despite advertising a dictionary, we handled non-sdch compressed - // content. - UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_Latency_F_a", - duration, - base::TimeDelta::FromMilliseconds(20), - base::TimeDelta::FromMinutes(10), 100); - UMA_HISTOGRAM_COUNTS_100("Sdch3.Network_Pass-through_Packets_b", - observed_packet_count_); - if (packet_times_.empty()) - return; - UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_1st_To_Last_a", - final_packet_time_ - packet_times_[0], - base::TimeDelta::FromMilliseconds(20), - base::TimeDelta::FromMinutes(10), 100); - DCHECK(max_packets_timed_ >= kSdchPacketHistogramCount); - DCHECK(kSdchPacketHistogramCount > 4); - if (packet_times_.size() <= 4) - return; - UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_1st_To_2nd_c", - packet_times_[1] - packet_times_[0], - base::TimeDelta::FromMilliseconds(1), - base::TimeDelta::FromSeconds(10), 100); - UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_2nd_To_3rd_c", - packet_times_[2] - packet_times_[1], - base::TimeDelta::FromMilliseconds(1), - base::TimeDelta::FromSeconds(10), 100); - UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_3rd_To_4th_c", - packet_times_[3] - packet_times_[2], - base::TimeDelta::FromMilliseconds(1), - base::TimeDelta::FromSeconds(10), 100); - UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Network_Pass-through_4th_To_5th_c", - packet_times_[4] - packet_times_[3], - base::TimeDelta::FromMilliseconds(1), - base::TimeDelta::FromSeconds(10), 100); - return; - } - - case FilterContext::SDCH_EXPERIMENT_DECODE: { - UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Decode", - duration, - base::TimeDelta::FromMilliseconds(20), - base::TimeDelta::FromMinutes(10), 100); - // We already provided interpacket histograms above in the SDCH_DECODE - // case, so we don't need them here. - return; - } - case FilterContext::SDCH_EXPERIMENT_HOLDBACK: { - UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback", - duration, - base::TimeDelta::FromMilliseconds(20), - base::TimeDelta::FromMinutes(10), 100); - UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_1st_To_Last_a", - final_packet_time_ - packet_times_[0], - base::TimeDelta::FromMilliseconds(20), - base::TimeDelta::FromMinutes(10), 100); - - DCHECK(max_packets_timed_ >= kSdchPacketHistogramCount); - DCHECK(kSdchPacketHistogramCount > 4); - if (packet_times_.size() <= 4) - return; - UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_1st_To_2nd_c", - packet_times_[1] - packet_times_[0], - base::TimeDelta::FromMilliseconds(1), - base::TimeDelta::FromSeconds(10), 100); - UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_2nd_To_3rd_c", - packet_times_[2] - packet_times_[1], - base::TimeDelta::FromMilliseconds(1), - base::TimeDelta::FromSeconds(10), 100); - UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_3rd_To_4th_c", - packet_times_[3] - packet_times_[2], - base::TimeDelta::FromMilliseconds(1), - base::TimeDelta::FromSeconds(10), 100); - UMA_HISTOGRAM_CLIPPED_TIMES("Sdch3.Experiment_Holdback_4th_To_5th_c", - packet_times_[4] - packet_times_[3], - base::TimeDelta::FromMilliseconds(1), - base::TimeDelta::FromSeconds(10), 100); - return; - } - default: - NOTREACHED(); - return; - } -} - HostPortPair URLRequestJob::GetSocketAddress() const { return HostPortPair(); } @@ -369,7 +219,7 @@ void URLRequestJob::NotifyHeadersComplete() { // Initialize to the current time, and let the subclass optionally override // the time stamps if it has that information. The default request_time is // set by net::URLRequest before it calls our Start method. - request_->response_info_.response_time = Time::Now(); + request_->response_info_.response_time = base::Time::Now(); GetResponseInfo(&request_->response_info_); // When notifying the delegate, the delegate can release the request @@ -425,28 +275,14 @@ void URLRequestJob::NotifyHeadersComplete() { } has_handled_response_ = true; - if (request_->status().is_success()) { + if (request_->status().is_success()) filter_.reset(SetupFilter()); - // Check if this content appears to be compressible. - std::string mime_type; - if (GetMimeType(&mime_type) && - (net::IsSupportedJavascriptMimeType(mime_type.c_str()) || - net::IsSupportedNonImageMimeType(mime_type.c_str()))) { - is_compressible_content_ = true; - } - } - if (!filter_.get()) { std::string content_length; request_->GetResponseHeaderByName("content-length", &content_length); if (!content_length.empty()) base::StringToInt64(content_length, &expected_content_size_); - } else { - // Chrome today only sends "Accept-Encoding" for compression schemes. - // So, if there is a filter on the response, we know that the content - // was compressed. - is_compressed_ = true; } request_->ResponseStarted(); @@ -516,8 +352,6 @@ void URLRequestJob::NotifyDone(const URLRequestStatus &status) { return; done_ = true; - RecordCompressionHistograms(); - // Unless there was an error, we should have at least tried to handle // the response before getting here. DCHECK(has_handled_response_ || !status.is_success()); @@ -697,12 +531,6 @@ bool URLRequestJob::ReadFilteredData(int* bytes_read) { return rv; } -void URLRequestJob::EnablePacketCounting(size_t max_packets_timed) { - if (max_packets_timed_ < max_packets_timed) - max_packets_timed_ = max_packets_timed; - packet_timing_enabled_ = true; -} - const URLRequestStatus URLRequestJob::GetStatus() { if (request_) return request_->status(); @@ -783,102 +611,6 @@ bool URLRequestJob::FilterHasData() { } void URLRequestJob::UpdatePacketReadTimes() { - if (!packet_timing_enabled_) - return; - - if (filter_input_byte_count_ <= bytes_observed_in_packets_) { - DCHECK(filter_input_byte_count_ == bytes_observed_in_packets_); - return; // No new bytes have arrived. - } - - if (!bytes_observed_in_packets_) - request_time_snapshot_ = request_ ? request_->request_time() : base::Time(); - - final_packet_time_ = base::Time::Now(); - const size_t kTypicalPacketSize = 1430; - while (filter_input_byte_count_ > bytes_observed_in_packets_) { - ++observed_packet_count_; - if (max_packets_timed_ > packet_times_.size()) { - packet_times_.push_back(final_packet_time_); - DCHECK(static_cast<size_t>(observed_packet_count_) == - packet_times_.size()); - } - bytes_observed_in_packets_ += kTypicalPacketSize; - } - // Since packets may not be full, we'll remember the number of bytes we've - // accounted for in packets thus far. - bytes_observed_in_packets_ = filter_input_byte_count_; -} - -// The common type of histogram we use for all compression-tracking histograms. -#define COMPRESSION_HISTOGRAM(name, sample) \ - do { \ - UMA_HISTOGRAM_CUSTOM_COUNTS("Net.Compress." name, sample, \ - 500, 1000000, 100); \ - } while(0) - -void URLRequestJob::RecordCompressionHistograms() { - if (IsCachedContent() || // Don't record cached content - !GetStatus().is_success() || // Don't record failed content - !is_compressible_content_ || // Only record compressible content - !prefilter_bytes_read_) // Zero-byte responses aren't useful. - return; - - // Miniature requests aren't really compressible. Don't count them. - const int kMinSize = 16; - if (prefilter_bytes_read_ < kMinSize) - return; - - // Only record for http or https urls. - bool is_http = request_->url().SchemeIs("http"); - bool is_https = request_->url().SchemeIs("https"); - if (!is_http && !is_https) - return; - - const net::HttpResponseInfo& response = request_->response_info_; - int compressed_B = prefilter_bytes_read_; - int decompressed_B = postfilter_bytes_read_; - - // We want to record how often downloaded resources are compressed. - // But, we recognize that different protocols may have different - // properties. So, for each request, we'll put it into one of 3 - // groups: - // a) SSL resources - // Proxies cannot tamper with compression headers with SSL. - // b) Non-SSL, loaded-via-proxy resources - // In this case, we know a proxy might have interfered. - // c) Non-SSL, loaded-without-proxy resources - // In this case, we know there was no explicit proxy. However, - // it is possible that a transparent proxy was still interfering. - // - // For each group, we record the same 3 histograms. - - if (is_https) { - if (is_compressed_) { - COMPRESSION_HISTOGRAM("SSL.BytesBeforeCompression", compressed_B); - COMPRESSION_HISTOGRAM("SSL.BytesAfterCompression", decompressed_B); - } else { - COMPRESSION_HISTOGRAM("SSL.ShouldHaveBeenCompressed", decompressed_B); - } - return; - } - - if (response.was_fetched_via_proxy) { - if (is_compressed_) { - COMPRESSION_HISTOGRAM("Proxy.BytesBeforeCompression", compressed_B); - COMPRESSION_HISTOGRAM("Proxy.BytesAfterCompression", decompressed_B); - } else { - COMPRESSION_HISTOGRAM("Proxy.ShouldHaveBeenCompressed", decompressed_B); - } - return; - } - - if (is_compressed_) { - COMPRESSION_HISTOGRAM("NoProxy.BytesBeforeCompression", compressed_B); - COMPRESSION_HISTOGRAM("NoProxy.BytesAfterCompression", decompressed_B); - } else { - COMPRESSION_HISTOGRAM("NoProxy.ShouldHaveBeenCompressed", decompressed_B); - } } } // namespace net diff --git a/net/url_request/url_request_job.h b/net/url_request/url_request_job.h index 57f3184..f48a7fb 100644 --- a/net/url_request/url_request_job.h +++ b/net/url_request/url_request_job.h @@ -32,13 +32,6 @@ class X509Certificate; class URLRequestJob : public base::RefCounted<URLRequestJob> { public: - // When histogramming results related to SDCH and/or an SDCH latency test, the - // number of packets for which we need to record arrival times so as to - // calculate interpacket latencies. We currently are only looking at the - // first few packets, as we're monitoring the impact of the initial TCP - // congestion window on stalling of transmissions. - static const size_t kSdchPacketHistogramCount = 5; - explicit URLRequestJob(URLRequest* request); // Returns the request that owns this job. THIS POINTER MAY BE NULL if the @@ -239,9 +232,9 @@ class URLRequestJob : public base::RefCounted<URLRequestJob> { // the hood. bool ReadFilteredData(int *bytes_read); - // Facilitate histogramming by turning on packet counting. - // If called more than once, the largest value will be used. - void EnablePacketCounting(size_t max_packets_timed); + // Whether the response is being filtered in this job. + // Only valid after NotifyHeadersComplete() has been called. + bool HasFilter() { return filter_ != NULL; } // At or near destruction time, a derived class may request that the filters // be destroyed so that statistics can be gathered while the derived class is @@ -255,14 +248,15 @@ class URLRequestJob : public base::RefCounted<URLRequestJob> { // Set the status of the job. void SetStatus(const URLRequestStatus& status); - // TODO(adamk): Remove this method once it's no longer called from - // URLRequestJob. - virtual bool IsCachedContent() const; + // The number of bytes read before passing to the filter. + int prefilter_bytes_read() const { return prefilter_bytes_read_; } - // TODO(adamk): Move this method to url_request_http_job.cc by exposing - // the required stats to URLRequestJob children. - void RecordPacketStats(FilterContext::StatisticSelector statistic) const; + // The number of bytes read after passing through the filter. + int postfilter_bytes_read() const { return postfilter_bytes_read_; } + // Total number of bytes read from network (or cache) and typically handed + // to filter to process. Used to histogram compression ratios, and error + // recovery scenarios in filters. int64 filter_input_byte_count() const { return filter_input_byte_count_; } // The request that initiated this job. This value MAY BE NULL if the @@ -298,25 +292,18 @@ class URLRequestJob : public base::RefCounted<URLRequestJob> { // out. bool FilterHasData(); - // Record packet arrival times for possible use in histograms. - void UpdatePacketReadTimes(); - - void RecordCompressionHistograms(); + // Subclasses may implement this method to record packet arrival times. + // The default implementation does nothing. + virtual void UpdatePacketReadTimes(); // Indicates that the job is done producing data, either it has completed // all the data or an error has been encountered. Set exclusively by // NotifyDone so that it is kept in sync with the request. bool done_; - // The number of bytes read before passing to the filter. int prefilter_bytes_read_; - // The number of bytes read after passing through the filter. int postfilter_bytes_read_; - // True when (we believe) the content in this URLRequest was - // compressible. - bool is_compressible_content_; - // True when the content in this URLRequest was compressed. - bool is_compressed_; + int64 filter_input_byte_count_; // The data stream filter which is enabled on demand. scoped_ptr<Filter> filter_; @@ -347,48 +334,6 @@ class URLRequestJob : public base::RefCounted<URLRequestJob> { GURL deferred_redirect_url_; int deferred_redirect_status_code_; - //---------------------------------------------------------------------------- - // Data used for statistics gathering in some instances. This data is only - // used for histograms etc., and is not required. It is optionally gathered - // based on the settings of several control variables. - - // Enable recording of packet arrival times for histogramming. - bool packet_timing_enabled_; - - // TODO(jar): improve the quality of the gathered info by gathering most times - // at a lower point in the network stack, assuring we have actual packet - // boundaries, rather than approximations. Also note that input byte count - // as gathered here is post-SSL, and post-cache-fetch, and does not reflect - // true packet arrival times in such cases. - - // Total number of bytes read from network (or cache) and typically handed - // to filter to process. Used to histogram compression ratios, and error - // recovery scenarios in filters. - int64 filter_input_byte_count_; - - // The number of bytes that have been accounted for in packets (where some of - // those packets may possibly have had their time of arrival recorded). - int64 bytes_observed_in_packets_; - - // Limit on the size of the array packet_times_. This can be set to - // zero, and then no packet times will be gathered. - size_t max_packets_timed_; - - // Arrival times for some of the first few packets. - std::vector<base::Time> packet_times_; - - // The request time may not be available when we are being destroyed, so we - // snapshot it early on. - base::Time request_time_snapshot_; - - // Since we don't save all packet times in packet_times_, we save the - // last time for use in histograms. - base::Time final_packet_time_; - - // The count of the number of packets, some of which may not have been timed. - // We're ignoring overflow, as 1430 x 2^31 is a LOT of bytes. - int observed_packet_count_; - ScopedRunnableMethodFactory<URLRequestJob> method_factory_; DISALLOW_COPY_AND_ASSIGN(URLRequestJob); diff --git a/webkit/appcache/appcache_url_request_job.cc b/webkit/appcache/appcache_url_request_job.cc index 205458f..f04851f 100644 --- a/webkit/appcache/appcache_url_request_job.cc +++ b/webkit/appcache/appcache_url_request_job.cc @@ -256,10 +256,6 @@ int AppCacheURLRequestJob::GetResponseCode() const { return http_info()->headers->response_code(); } -bool AppCacheURLRequestJob::IsCachedContent() const { - return is_delivering_appcache_response(); -} - bool AppCacheURLRequestJob::ReadRawData(net::IOBuffer* buf, int buf_size, int *bytes_read) { DCHECK(is_delivering_appcache_response()); diff --git a/webkit/appcache/appcache_url_request_job.h b/webkit/appcache/appcache_url_request_job.h index 1ec9f95..e90f077 100644 --- a/webkit/appcache/appcache_url_request_job.h +++ b/webkit/appcache/appcache_url_request_job.h @@ -1,4 +1,4 @@ -// Copyright (c) 2009 The Chromium Authors. All rights reserved. +// Copyright (c) 2011 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. @@ -118,7 +118,6 @@ class AppCacheURLRequestJob : public net::URLRequestJob, // FilterContext methods virtual bool GetMimeType(std::string* mime_type) const; virtual int GetResponseCode() const; - virtual bool IsCachedContent() const; AppCacheStorage* storage_; bool has_been_started_; |