summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorjar@chromium.org <jar@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2009-02-02 06:50:03 +0000
committerjar@chromium.org <jar@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2009-02-02 06:50:03 +0000
commitd43e0729bbe1028c64ec29387c5a214e5eaa6dc7 (patch)
tree8c458fa18f2140604a1a241ce858a139b5ed4b7e /net
parent0303f31c7e78cd5e1e54773c50b9216710630cfa (diff)
downloadchromium_src-d43e0729bbe1028c64ec29387c5a214e5eaa6dc7.zip
chromium_src-d43e0729bbe1028c64ec29387c5a214e5eaa6dc7.tar.gz
chromium_src-d43e0729bbe1028c64ec29387c5a214e5eaa6dc7.tar.bz2
Add to SDCH histogramming
Define a histogram macro that is customizable, and precise, for detailed examination of performance when needed. Provide graceful degradation when entire SDCH window is not received. We now blacklist the site with an exponential back-off. This allows teh user to hit reload, and get not-SDCH content. bug=1609306 r=huanr,mbelshe Review URL: http://codereview.chromium.org/19718 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@9035 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net')
-rw-r--r--net/base/sdch_filter.cc67
-rw-r--r--net/base/sdch_filter.h7
-rw-r--r--net/base/sdch_manager.h3
3 files changed, 51 insertions, 26 deletions
diff --git a/net/base/sdch_filter.cc b/net/base/sdch_filter.cc
index 9f50d31..6c0343f 100644
--- a/net/base/sdch_filter.cc
+++ b/net/base/sdch_filter.cc
@@ -24,8 +24,6 @@ SdchFilter::SdchFilter()
dest_buffer_excess_index_(0),
source_bytes_(0),
output_bytes_(0),
- time_of_last_read_(),
- size_of_last_read_(0),
possible_pass_through_(false) {
}
@@ -37,30 +35,60 @@ SdchFilter::~SdchFilter() {
}
if (vcdiff_streaming_decoder_.get()) {
- if (!vcdiff_streaming_decoder_->FinishDecoding())
+ if (!vcdiff_streaming_decoder_->FinishDecoding()) {
decoding_status_ = DECODING_ERROR;
+ SdchManager::SdchErrorRecovery(SdchManager::INCOMPLETE_SDCH_CONTENT);
+ // Make it possible for the user to hit reload, and get non-sdch content.
+ // Note this will "wear off" quickly enough, and is just meant to assure
+ // in some rare case that the user is not stuck.
+ SdchManager::BlacklistDomain(url());
+ }
}
if (!was_cached()
&& base::Time() != connect_time()
- && base::Time() != time_of_last_read_) {
- base::TimeDelta duration = time_of_last_read_ - connect_time();
+ && read_times_.size() > 0
+ && base::Time() != read_times_.back()) {
+ base::TimeDelta duration = read_times_.back() - connect_time();
// We clip our logging at 10 minutes to prevent anamolous data from being
// considered (per suggestion from Jake Brutlag).
- // The relatively precise histogram only properly covers the range 1ms to 3
- // minutes, so the additional range is just gathered to calculate means and
- // variance as is done in other settings.
if (10 >= duration.InMinutes()) {
- if (DECODING_IN_PROGRESS == decoding_status_)
- UMA_HISTOGRAM_MEDIUM_TIMES(L"Sdch.Network_Decode_Latency_M", duration);
- if (PASS_THROUGH == decoding_status_)
- UMA_HISTOGRAM_MEDIUM_TIMES(L"Sdch.Network_Pass-through_Latency_M",
- duration);
+ if (DECODING_IN_PROGRESS == decoding_status_) {
+ UMA_HISTOGRAM_CLIPPED_TIMES(L"Sdch.Network_Decode_Latency_F", duration,
+ base::TimeDelta::FromMilliseconds(20),
+ base::TimeDelta::FromMinutes(10), 100);
+ UMA_HISTOGRAM_CLIPPED_TIMES(L"Sdch.Network_Decode_1st_To_Last",
+ read_times_.back() - read_times_[0],
+ base::TimeDelta::FromMilliseconds(20),
+ base::TimeDelta::FromMinutes(10), 100);
+ if (read_times_.size() > 3)
+ UMA_HISTOGRAM_CLIPPED_TIMES(L"Sdch.Network_Decode_3rd_To_4th",
+ read_times_[3] - read_times_[2],
+ base::TimeDelta::FromMilliseconds(10),
+ base::TimeDelta::FromSeconds(3), 100);
+ UMA_HISTOGRAM_COUNTS_100(L"Sdch.Network_Decode_Reads",
+ read_times_.size());
+ UMA_HISTOGRAM_COUNTS(L"Sdch.Network_Decode_Bytes_Read", output_bytes_);
+ } else if (PASS_THROUGH == decoding_status_) {
+ UMA_HISTOGRAM_CLIPPED_TIMES(L"Sdch.Network_Pass-through_Latency_F",
+ duration,
+ base::TimeDelta::FromMilliseconds(20),
+ base::TimeDelta::FromMinutes(10), 100);
+ UMA_HISTOGRAM_CLIPPED_TIMES(L"Sdch.Network_Pass-through_1st_To_Last",
+ read_times_.back() - read_times_[0],
+ base::TimeDelta::FromMilliseconds(20),
+ base::TimeDelta::FromMinutes(10), 100);
+ if (read_times_.size() > 3)
+ UMA_HISTOGRAM_CLIPPED_TIMES(L"Sdch.Network_Pass-through_3rd_To_4th",
+ read_times_[3] - read_times_[2],
+ base::TimeDelta::FromMilliseconds(10),
+ base::TimeDelta::FromSeconds(3), 100);
+ UMA_HISTOGRAM_COUNTS_100(L"Sdch.Network_Pass-through_Reads",
+ read_times_.size());
+ }
}
}
- UMA_HISTOGRAM_COUNTS(L"Sdch.Bytes output", output_bytes_);
-
if (dictionary_)
dictionary_->Release();
}
@@ -97,10 +125,8 @@ Filter::FilterStatus SdchFilter::ReadFilteredData(char* dest_buffer,
return FILTER_ERROR;
// Don't update when we're called to just flush out our internal buffers.
- if (next_stream_data_ && stream_data_len_ > 0) {
- time_of_last_read_ = base::Time::Now();
- size_of_last_read_ = stream_data_len_;
- }
+ if (next_stream_data_ && stream_data_len_ > 0)
+ read_times_.push_back(base::Time::Now());
if (WAITING_FOR_DICTIONARY_SELECTION == decoding_status_) {
FilterStatus status = InitializeDictionary();
@@ -151,7 +177,7 @@ Filter::FilterStatus SdchFilter::ReadFilteredData(char* dest_buffer,
SdchManager::SdchErrorRecovery(
SdchManager::META_REFRESH_CACHED_RECOVERY);
} else {
- // Since it wasn't in the cache, we definately need at lest some
+ // Since it wasn't in the cache, we definately need at least some
// period of blacklisting to get the correct content.
SdchManager::BlacklistDomain(url());
SdchManager::SdchErrorRecovery(SdchManager::META_REFRESH_RECOVERY);
@@ -195,7 +221,6 @@ Filter::FilterStatus SdchFilter::ReadFilteredData(char* dest_buffer,
return FILTER_ERROR;
}
-
if (!next_stream_data_ || stream_data_len_ <= 0)
return FILTER_NEED_MORE_DATA;
diff --git a/net/base/sdch_filter.h b/net/base/sdch_filter.h
index bfc9681..d8ed354 100644
--- a/net/base/sdch_filter.h
+++ b/net/base/sdch_filter.h
@@ -99,10 +99,9 @@ class SdchFilter : public Filter {
size_t source_bytes_;
size_t output_bytes_;
- // When was the most recent non-zero size data chunk processed?
- base::Time time_of_last_read_;
- // How large was the most recent non-zero size data chunk?
- int size_of_last_read_;
+ // Record of chunk processing times for this filter. Used only for stats
+ // generations in histograms.
+ std::vector<base::Time> read_times_;
// Error recovery in content type may add an sdch filter type, in which case
// we should gracefully perform pass through if the format is incorrect, or
diff --git a/net/base/sdch_manager.h b/net/base/sdch_manager.h
index 1363d7ac..0f7cdeb 100644
--- a/net/base/sdch_manager.h
+++ b/net/base/sdch_manager.h
@@ -107,12 +107,13 @@ class SdchManager {
// Problematic decode recovery methods.
META_REFRESH_RECOVERY = 70, // Dictionary not found.
- // defunct = 71, // ALmost the same as META_REFRESH_UNSUPPORTED.
+ // defunct = 71, // Almost the same as META_REFRESH_UNSUPPORTED.
// defunct = 72, // Almost the same as CACHED_META_REFRESH_UNSUPPORTED.
// defunct = 73, // PASSING_THROUGH_NON_SDCH plus DISCARD_TENTATIVE_SDCH.
META_REFRESH_UNSUPPORTED = 74, // Unrecoverable error.
CACHED_META_REFRESH_UNSUPPORTED = 75, // As above, but pulled from cache.
PASSING_THROUGH_NON_SDCH = 76, // Non-html tagged as sdch but malformed.
+ INCOMPLETE_SDCH_CONTENT = 77, // Last window was not completely decoded.
// Common decoded recovery methods.