diff options
| author | jar@chromium.org <jar@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2009-02-02 06:50:03 +0000 |
|---|---|---|
| committer | jar@chromium.org <jar@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2009-02-02 06:50:03 +0000 |
| commit | d43e0729bbe1028c64ec29387c5a214e5eaa6dc7 (patch) | |
| tree | 8c458fa18f2140604a1a241ce858a139b5ed4b7e | |
| parent | 0303f31c7e78cd5e1e54773c50b9216710630cfa (diff) | |
| download | chromium_src-d43e0729bbe1028c64ec29387c5a214e5eaa6dc7.zip chromium_src-d43e0729bbe1028c64ec29387c5a214e5eaa6dc7.tar.gz chromium_src-d43e0729bbe1028c64ec29387c5a214e5eaa6dc7.tar.bz2 | |
Add to SDCH histogramming
Define a histogram macro that is customizable, and precise, for detailed
examination of performance when needed.
Provide graceful degradation when entire SDCH window is not
received. We now blacklist the site with an exponential back-off.
This allows teh user to hit reload, and get not-SDCH content.
bug=1609306
r=huanr,mbelshe
Review URL: http://codereview.chromium.org/19718
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@9035 0039d316-1c4b-4281-b951-d872f2087c98
| -rw-r--r-- | base/histogram.h | 27 | ||||
| -rw-r--r-- | net/base/sdch_filter.cc | 67 | ||||
| -rw-r--r-- | net/base/sdch_filter.h | 7 | ||||
| -rw-r--r-- | net/base/sdch_manager.h | 3 |
4 files changed, 74 insertions, 30 deletions
diff --git a/base/histogram.h b/base/histogram.h index 8b4bded..9860e93 100644 --- a/base/histogram.h +++ b/base/histogram.h @@ -41,7 +41,7 @@ //------------------------------------------------------------------------------ // Provide easy general purpose histogram in a macro, just like stats counters. -// These macros all use 50 buckets. +// The first two macros use 50 buckets. #define HISTOGRAM_TIMES(name, sample) do { \ static Histogram counter((name), base::TimeDelta::FromMilliseconds(1), \ @@ -54,6 +54,13 @@ counter.Add(sample); \ } while (0) +// For folks that need real specific times, use this, but you'll only get +// samples that are in the range (overly large samples are discarded). +#define HISTOGRAM_CLIPPED_TIMES(name, sample, min, max, bucket_count) do { \ + static Histogram counter((name), min, max, bucket_count); \ + if ((sample) < (max)) counter.AddTime(sample); \ + } while (0) + //------------------------------------------------------------------------------ // This macro set is for a histogram that can support both addition and removal // of samples. It should be used to render the accumulated asset allocation @@ -85,12 +92,16 @@ #define DHISTOGRAM_COUNTS(name, sample) HISTOGRAM_COUNTS(name, sample) #define DASSET_HISTOGRAM_COUNTS(name, sample) ASSET_HISTOGRAM_COUNTS(name, \ sample) +#define DHISTOGRAM_CLIPPED_TIMES(name, sample, min, max, bucket_count) \ + HISTOGRAM_CLIPPED_TIMES(name, sample, min, max, bucket_count) #else // NDEBUG #define DHISTOGRAM_TIMES(name, sample) do {} while (0) #define DHISTOGRAM_COUNTS(name, sample) do {} while (0) #define DASSET_HISTOGRAM_COUNTS(name, sample) do {} while (0) +#define DHISTOGRAM_CLIPPED_TIMES(name, sample, min, max, bucket_count) \ + do {} while (0) #endif // NDEBUG @@ -124,6 +135,12 @@ static const int kUmaTargetedHistogramFlag = 0x1; counter.AddTime(sample); \ } while (0) +#define UMA_HISTOGRAM_CLIPPED_TIMES(name, sample, min, max, bucket_count) do { \ + static Histogram counter((name), min, max, bucket_count); \ + counter.SetFlags(kUmaTargetedHistogramFlag); \ + if ((sample) < (max)) counter.AddTime(sample); \ + } while (0) + #define UMA_HISTOGRAM_COUNTS(name, sample) do { \ static Histogram counter((name), 1, 1000000, 50); \ counter.SetFlags(kUmaTargetedHistogramFlag); \ @@ -176,7 +193,7 @@ class Histogram : public StatsRate { // Accessor methods. Count counts(size_t i) const { return counts_[i]; } - Count TotalCount() const ; + Count TotalCount() const; int64 sum() const { return sum_; } int64 square_sum() const { return square_sum_; } @@ -276,7 +293,7 @@ class Histogram : public StatsRate { // Write a common header message describing this histogram. void WriteAsciiHeader(const SampleSet& snapshot, - Count sample_count, std::string* output) const ; + Count sample_count, std::string* output) const; // Write information about previous, current, and next buckets. // Information such as cumulative percentage, etc. @@ -375,7 +392,9 @@ class LinearHistogram : public Histogram { // BooleanHistogram is a histogram for booleans. class BooleanHistogram : public LinearHistogram { public: - BooleanHistogram(const wchar_t* name) : LinearHistogram(name, 0, 2, 3) {} + explicit BooleanHistogram(const wchar_t* name) + : LinearHistogram(name, 0, 2, 3) { + } virtual void AddBoolean(bool value) { Add(value ? 1 : 0); } diff --git a/net/base/sdch_filter.cc b/net/base/sdch_filter.cc index 9f50d31..6c0343f 100644 --- a/net/base/sdch_filter.cc +++ b/net/base/sdch_filter.cc @@ -24,8 +24,6 @@ SdchFilter::SdchFilter() dest_buffer_excess_index_(0), source_bytes_(0), output_bytes_(0), - time_of_last_read_(), - size_of_last_read_(0), possible_pass_through_(false) { } @@ -37,30 +35,60 @@ SdchFilter::~SdchFilter() { } if (vcdiff_streaming_decoder_.get()) { - if (!vcdiff_streaming_decoder_->FinishDecoding()) + if (!vcdiff_streaming_decoder_->FinishDecoding()) { decoding_status_ = DECODING_ERROR; + SdchManager::SdchErrorRecovery(SdchManager::INCOMPLETE_SDCH_CONTENT); + // Make it possible for the user to hit reload, and get non-sdch content. + // Note this will "wear off" quickly enough, and is just meant to assure + // in some rare case that the user is not stuck. + SdchManager::BlacklistDomain(url()); + } } if (!was_cached() && base::Time() != connect_time() - && base::Time() != time_of_last_read_) { - base::TimeDelta duration = time_of_last_read_ - connect_time(); + && read_times_.size() > 0 + && base::Time() != read_times_.back()) { + base::TimeDelta duration = read_times_.back() - connect_time(); // We clip our logging at 10 minutes to prevent anamolous data from being // considered (per suggestion from Jake Brutlag). - // The relatively precise histogram only properly covers the range 1ms to 3 - // minutes, so the additional range is just gathered to calculate means and - // variance as is done in other settings. if (10 >= duration.InMinutes()) { - if (DECODING_IN_PROGRESS == decoding_status_) - UMA_HISTOGRAM_MEDIUM_TIMES(L"Sdch.Network_Decode_Latency_M", duration); - if (PASS_THROUGH == decoding_status_) - UMA_HISTOGRAM_MEDIUM_TIMES(L"Sdch.Network_Pass-through_Latency_M", - duration); + if (DECODING_IN_PROGRESS == decoding_status_) { + UMA_HISTOGRAM_CLIPPED_TIMES(L"Sdch.Network_Decode_Latency_F", duration, + base::TimeDelta::FromMilliseconds(20), + base::TimeDelta::FromMinutes(10), 100); + UMA_HISTOGRAM_CLIPPED_TIMES(L"Sdch.Network_Decode_1st_To_Last", + read_times_.back() - read_times_[0], + base::TimeDelta::FromMilliseconds(20), + base::TimeDelta::FromMinutes(10), 100); + if (read_times_.size() > 3) + UMA_HISTOGRAM_CLIPPED_TIMES(L"Sdch.Network_Decode_3rd_To_4th", + read_times_[3] - read_times_[2], + base::TimeDelta::FromMilliseconds(10), + base::TimeDelta::FromSeconds(3), 100); + UMA_HISTOGRAM_COUNTS_100(L"Sdch.Network_Decode_Reads", + read_times_.size()); + UMA_HISTOGRAM_COUNTS(L"Sdch.Network_Decode_Bytes_Read", output_bytes_); + } else if (PASS_THROUGH == decoding_status_) { + UMA_HISTOGRAM_CLIPPED_TIMES(L"Sdch.Network_Pass-through_Latency_F", + duration, + base::TimeDelta::FromMilliseconds(20), + base::TimeDelta::FromMinutes(10), 100); + UMA_HISTOGRAM_CLIPPED_TIMES(L"Sdch.Network_Pass-through_1st_To_Last", + read_times_.back() - read_times_[0], + base::TimeDelta::FromMilliseconds(20), + base::TimeDelta::FromMinutes(10), 100); + if (read_times_.size() > 3) + UMA_HISTOGRAM_CLIPPED_TIMES(L"Sdch.Network_Pass-through_3rd_To_4th", + read_times_[3] - read_times_[2], + base::TimeDelta::FromMilliseconds(10), + base::TimeDelta::FromSeconds(3), 100); + UMA_HISTOGRAM_COUNTS_100(L"Sdch.Network_Pass-through_Reads", + read_times_.size()); + } } } - UMA_HISTOGRAM_COUNTS(L"Sdch.Bytes output", output_bytes_); - if (dictionary_) dictionary_->Release(); } @@ -97,10 +125,8 @@ Filter::FilterStatus SdchFilter::ReadFilteredData(char* dest_buffer, return FILTER_ERROR; // Don't update when we're called to just flush out our internal buffers. - if (next_stream_data_ && stream_data_len_ > 0) { - time_of_last_read_ = base::Time::Now(); - size_of_last_read_ = stream_data_len_; - } + if (next_stream_data_ && stream_data_len_ > 0) + read_times_.push_back(base::Time::Now()); if (WAITING_FOR_DICTIONARY_SELECTION == decoding_status_) { FilterStatus status = InitializeDictionary(); @@ -151,7 +177,7 @@ Filter::FilterStatus SdchFilter::ReadFilteredData(char* dest_buffer, SdchManager::SdchErrorRecovery( SdchManager::META_REFRESH_CACHED_RECOVERY); } else { - // Since it wasn't in the cache, we definately need at lest some + // Since it wasn't in the cache, we definately need at least some // period of blacklisting to get the correct content. SdchManager::BlacklistDomain(url()); SdchManager::SdchErrorRecovery(SdchManager::META_REFRESH_RECOVERY); @@ -195,7 +221,6 @@ Filter::FilterStatus SdchFilter::ReadFilteredData(char* dest_buffer, return FILTER_ERROR; } - if (!next_stream_data_ || stream_data_len_ <= 0) return FILTER_NEED_MORE_DATA; diff --git a/net/base/sdch_filter.h b/net/base/sdch_filter.h index bfc9681..d8ed354 100644 --- a/net/base/sdch_filter.h +++ b/net/base/sdch_filter.h @@ -99,10 +99,9 @@ class SdchFilter : public Filter { size_t source_bytes_; size_t output_bytes_; - // When was the most recent non-zero size data chunk processed? - base::Time time_of_last_read_; - // How large was the most recent non-zero size data chunk? - int size_of_last_read_; + // Record of chunk processing times for this filter. Used only for stats + // generations in histograms. + std::vector<base::Time> read_times_; // Error recovery in content type may add an sdch filter type, in which case // we should gracefully perform pass through if the format is incorrect, or diff --git a/net/base/sdch_manager.h b/net/base/sdch_manager.h index 1363d7ac..0f7cdeb 100644 --- a/net/base/sdch_manager.h +++ b/net/base/sdch_manager.h @@ -107,12 +107,13 @@ class SdchManager { // Problematic decode recovery methods. META_REFRESH_RECOVERY = 70, // Dictionary not found. - // defunct = 71, // ALmost the same as META_REFRESH_UNSUPPORTED. + // defunct = 71, // Almost the same as META_REFRESH_UNSUPPORTED. // defunct = 72, // Almost the same as CACHED_META_REFRESH_UNSUPPORTED. // defunct = 73, // PASSING_THROUGH_NON_SDCH plus DISCARD_TENTATIVE_SDCH. META_REFRESH_UNSUPPORTED = 74, // Unrecoverable error. CACHED_META_REFRESH_UNSUPPORTED = 75, // As above, but pulled from cache. PASSING_THROUGH_NON_SDCH = 76, // Non-html tagged as sdch but malformed. + INCOMPLETE_SDCH_CONTENT = 77, // Last window was not completely decoded. // Common decoded recovery methods. |
