diff options
author | jar@chromium.org <jar@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2009-05-02 01:07:10 +0000 |
---|---|---|
committer | jar@chromium.org <jar@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2009-05-02 01:07:10 +0000 |
commit | 75f6ee7fd8b7aa5222a4d3c5f5221056dc2fdf42 (patch) | |
tree | 212396387ae8d8584ed111280513d2a7750227aa /net | |
parent | 4b68e0d50c4a69b91a384b698a96f6a51ca7241f (diff) | |
download | chromium_src-75f6ee7fd8b7aa5222a4d3c5f5221056dc2fdf42.zip chromium_src-75f6ee7fd8b7aa5222a4d3c5f5221056dc2fdf42.tar.gz chromium_src-75f6ee7fd8b7aa5222a4d3c5f5221056dc2fdf42.tar.bz2 |
Change names of SDCH related histograms.
I now have accumulated too many evolutions of histograms for SDCH,
and it is getting harder to pull out the most recent set from the
lengthly list (and confusing other folks). I've created a new
prefix of "Sdch2." rather than "Sdch." for all the histogram
names.
I also include a few lint fixups on DCHECKs.
r=rafaelw
Review URL: http://codereview.chromium.org/100275
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@15129 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net')
-rw-r--r-- | net/base/sdch_filter.cc | 20 | ||||
-rw-r--r-- | net/base/sdch_manager.cc | 6 | ||||
-rw-r--r-- | net/url_request/url_request_job.cc | 48 |
3 files changed, 37 insertions, 37 deletions
diff --git a/net/base/sdch_filter.cc b/net/base/sdch_filter.cc index 0eb98a0..4987472 100644 --- a/net/base/sdch_filter.cc +++ b/net/base/sdch_filter.cc @@ -40,7 +40,7 @@ SdchFilter::~SdchFilter() { static int filter_use_count = 0; ++filter_use_count; if (META_REFRESH_RECOVERY == decoding_status_) { - UMA_HISTOGRAM_COUNTS("Sdch.FilterUseBeforeDisabling", filter_use_count); + UMA_HISTOGRAM_COUNTS("Sdch2.FilterUseBeforeDisabling", filter_use_count); } if (vcdiff_streaming_decoder_.get()) { @@ -51,22 +51,22 @@ SdchFilter::~SdchFilter() { // Note this will "wear off" quickly enough, and is just meant to assure // in some rare case that the user is not stuck. SdchManager::BlacklistDomain(url_); - UMA_HISTOGRAM_COUNTS("Sdch.PartialBytesIn", + UMA_HISTOGRAM_COUNTS("Sdch2.PartialBytesIn", static_cast<int>(filter_context().GetByteReadCount())); - UMA_HISTOGRAM_COUNTS("Sdch.PartialVcdiffIn", source_bytes_); - UMA_HISTOGRAM_COUNTS("Sdch.PartialVcdiffOut", output_bytes_); + UMA_HISTOGRAM_COUNTS("Sdch2.PartialVcdiffIn", source_bytes_); + UMA_HISTOGRAM_COUNTS("Sdch2.PartialVcdiffOut", output_bytes_); } } if (!dest_buffer_excess_.empty()) { // Filter chaining error, or premature teardown. SdchManager::SdchErrorRecovery(SdchManager::UNFLUSHED_CONTENT); - UMA_HISTOGRAM_COUNTS("Sdch.UnflushedBytesIn", + UMA_HISTOGRAM_COUNTS("Sdch2.UnflushedBytesIn", static_cast<int>(filter_context().GetByteReadCount())); - UMA_HISTOGRAM_COUNTS("Sdch.UnflushedBufferSize", + UMA_HISTOGRAM_COUNTS("Sdch2.UnflushedBufferSize", dest_buffer_excess_.size()); - UMA_HISTOGRAM_COUNTS("Sdch.UnflushedVcdiffIn", source_bytes_); - UMA_HISTOGRAM_COUNTS("Sdch.UnflushedVcdiffOut", output_bytes_); + UMA_HISTOGRAM_COUNTS("Sdch2.UnflushedVcdiffIn", source_bytes_); + UMA_HISTOGRAM_COUNTS("Sdch2.UnflushedVcdiffOut", output_bytes_); } if (was_cached_) { @@ -78,9 +78,9 @@ SdchFilter::~SdchFilter() { switch (decoding_status_) { case DECODING_IN_PROGRESS: { - UMA_HISTOGRAM_PERCENTAGE("Sdch.Network_Decode_Ratio_a", static_cast<int>( + UMA_HISTOGRAM_PERCENTAGE("Sdch2.Network_Decode_Ratio_a", static_cast<int>( (filter_context().GetByteReadCount() * 100) / output_bytes_)); - UMA_HISTOGRAM_COUNTS("Sdch.Network_Decode_Bytes_VcdiffOut_a", + UMA_HISTOGRAM_COUNTS("Sdch2.Network_Decode_Bytes_VcdiffOut_a", output_bytes_); filter_context().RecordPacketStats(FilterContext::SDCH_DECODE); diff --git a/net/base/sdch_manager.cc b/net/base/sdch_manager.cc index 1ea9e89..30995a8 100644 --- a/net/base/sdch_manager.cc +++ b/net/base/sdch_manager.cc @@ -32,7 +32,7 @@ SdchManager* SdchManager::Global() { // static void SdchManager::SdchErrorRecovery(ProblemCodes problem) { - static LinearHistogram histogram("Sdch.ProblemCodes_3", MIN_PROBLEM_CODE, + static LinearHistogram histogram("Sdch2.ProblemCodes_3", MIN_PROBLEM_CODE, MAX_PROBLEM_CODE - 1, MAX_PROBLEM_CODE); histogram.SetFlags(kUmaTargetedHistogramFlag); histogram.Add(problem); @@ -265,7 +265,7 @@ bool SdchManager::AddSdchDictionary(const std::string& dictionary_text, return false; } - UMA_HISTOGRAM_COUNTS("Sdch.Dictionary size loaded", dictionary_text.size()); + UMA_HISTOGRAM_COUNTS("Sdch2.Dictionary size loaded", dictionary_text.size()); DLOG(INFO) << "Loaded dictionary with client hash " << client_hash << " and server hash " << server_hash; Dictionary* dictionary = @@ -306,7 +306,7 @@ void SdchManager::GetAvailDictionaryList(const GURL& target_url, } // Watch to see if we have corrupt or numerous dictionaries. if (count > 0) - UMA_HISTOGRAM_COUNTS("Sdch.Advertisement_Count", count); + UMA_HISTOGRAM_COUNTS("Sdch2.Advertisement_Count", count); } SdchManager::Dictionary::Dictionary(const std::string& dictionary_text, diff --git a/net/url_request/url_request_job.cc b/net/url_request/url_request_job.cc index 0b005a5..6175a0d 100644 --- a/net/url_request/url_request_job.cc +++ b/net/url_request/url_request_job.cc @@ -187,8 +187,8 @@ void URLRequestJob::FilteredDataRead(int bytes_read) { bool URLRequestJob::ReadFilteredData(int *bytes_read) { DCHECK(filter_.get()); // don't add data if there is no filter DCHECK(read_buffer_ != NULL); // we need to have a buffer to fill - DCHECK(read_buffer_len_ > 0); // sanity check - DCHECK(read_buffer_len_ < 1000000); // sanity check + DCHECK_GT(read_buffer_len_, 0); // sanity check + DCHECK_LT(read_buffer_len_, 1000000); // sanity check bool rv = false; *bytes_read = 0; @@ -262,7 +262,7 @@ bool URLRequestJob::ReadFilteredData(int *bytes_read) { } case Filter::FILTER_ERROR: { filter_needs_more_output_space_ = false; - // TODO: Figure out a better error code. + // TODO(jar): Figure out a better error code. NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, net::ERR_FAILED)); rv = false; break; @@ -587,16 +587,16 @@ void URLRequestJob::RecordPacketStats(StatisticSelector statistic) const { base::TimeDelta duration = final_packet_time_ - request_time_snapshot_; switch (statistic) { case SDCH_DECODE: { - UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Decode_Latency_F_a", duration, + UMA_HISTOGRAM_CLIPPED_TIMES("Sdch2.Network_Decode_Latency_F_a", duration, base::TimeDelta::FromMilliseconds(20), base::TimeDelta::FromMinutes(10), 100); - UMA_HISTOGRAM_COUNTS_100("Sdch.Network_Decode_Packets_b", + UMA_HISTOGRAM_COUNTS_100("Sdch2.Network_Decode_Packets_b", static_cast<int>(observed_packet_count_)); - UMA_HISTOGRAM_COUNTS("Sdch.Network_Decode_Bytes_Processed_a", + UMA_HISTOGRAM_COUNTS("Sdch2.Network_Decode_Bytes_Processed_a", static_cast<int>(bytes_observed_in_packets_)); if (packet_times_.empty()) return; - UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Decode_1st_To_Last_a", + UMA_HISTOGRAM_CLIPPED_TIMES("Sdch2.Network_Decode_1st_To_Last_a", final_packet_time_ - packet_times_[0], base::TimeDelta::FromMilliseconds(20), base::TimeDelta::FromMinutes(10), 100); @@ -605,19 +605,19 @@ void URLRequestJob::RecordPacketStats(StatisticSelector statistic) const { DCHECK(kSdchPacketHistogramCount > 4); if (packet_times_.size() <= 4) return; - UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Decode_1st_To_2nd_c", + UMA_HISTOGRAM_CLIPPED_TIMES("Sdch2.Network_Decode_1st_To_2nd_c", packet_times_[1] - packet_times_[0], base::TimeDelta::FromMilliseconds(1), base::TimeDelta::FromSeconds(10), 100); - UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Decode_2nd_To_3rd_c", + UMA_HISTOGRAM_CLIPPED_TIMES("Sdch2.Network_Decode_2nd_To_3rd_c", packet_times_[2] - packet_times_[1], base::TimeDelta::FromMilliseconds(1), base::TimeDelta::FromSeconds(10), 100); - UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Decode_3rd_To_4th_c", + UMA_HISTOGRAM_CLIPPED_TIMES("Sdch2.Network_Decode_3rd_To_4th_c", packet_times_[3] - packet_times_[2], base::TimeDelta::FromMilliseconds(1), base::TimeDelta::FromSeconds(10), 100); - UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Decode_4th_To_5th_c", + UMA_HISTOGRAM_CLIPPED_TIMES("Sdch2.Network_Decode_4th_To_5th_c", packet_times_[4] - packet_times_[3], base::TimeDelta::FromMilliseconds(1), base::TimeDelta::FromSeconds(10), 100); @@ -626,15 +626,15 @@ void URLRequestJob::RecordPacketStats(StatisticSelector statistic) const { case SDCH_PASSTHROUGH: { // Despite advertising a dictionary, we handled non-sdch compressed // content. - UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Pass-through_Latency_F_a", + UMA_HISTOGRAM_CLIPPED_TIMES("Sdch2.Network_Pass-through_Latency_F_a", duration, base::TimeDelta::FromMilliseconds(20), base::TimeDelta::FromMinutes(10), 100); - UMA_HISTOGRAM_COUNTS_100("Sdch.Network_Pass-through_Packets_b", + UMA_HISTOGRAM_COUNTS_100("Sdch2.Network_Pass-through_Packets_b", observed_packet_count_); if (packet_times_.empty()) return; - UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Pass-through_1st_To_Last_a", + UMA_HISTOGRAM_CLIPPED_TIMES("Sdch2.Network_Pass-through_1st_To_Last_a", final_packet_time_ - packet_times_[0], base::TimeDelta::FromMilliseconds(20), base::TimeDelta::FromMinutes(10), 100); @@ -642,19 +642,19 @@ void URLRequestJob::RecordPacketStats(StatisticSelector statistic) const { DCHECK(kSdchPacketHistogramCount > 4); if (packet_times_.size() <= 4) return; - UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Pass-through_1st_To_2nd_c", + UMA_HISTOGRAM_CLIPPED_TIMES("Sdch2.Network_Pass-through_1st_To_2nd_c", packet_times_[1] - packet_times_[0], base::TimeDelta::FromMilliseconds(1), base::TimeDelta::FromSeconds(10), 100); - UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Pass-through_2nd_To_3rd_c", + UMA_HISTOGRAM_CLIPPED_TIMES("Sdch2.Network_Pass-through_2nd_To_3rd_c", packet_times_[2] - packet_times_[1], base::TimeDelta::FromMilliseconds(1), base::TimeDelta::FromSeconds(10), 100); - UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Pass-through_3rd_To_4th_c", + UMA_HISTOGRAM_CLIPPED_TIMES("Sdch2.Network_Pass-through_3rd_To_4th_c", packet_times_[3] - packet_times_[2], base::TimeDelta::FromMilliseconds(1), base::TimeDelta::FromSeconds(10), 100); - UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Pass-through_4th_To_5th_c", + UMA_HISTOGRAM_CLIPPED_TIMES("Sdch2.Network_Pass-through_4th_To_5th_c", packet_times_[4] - packet_times_[3], base::TimeDelta::FromMilliseconds(1), base::TimeDelta::FromSeconds(10), 100); @@ -662,7 +662,7 @@ void URLRequestJob::RecordPacketStats(StatisticSelector statistic) const { } case SDCH_EXPERIMENT_DECODE: { - UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Experiment_Decode", + UMA_HISTOGRAM_CLIPPED_TIMES("Sdch2.Experiment_Decode", duration, base::TimeDelta::FromMilliseconds(20), base::TimeDelta::FromMinutes(10), 100); @@ -671,7 +671,7 @@ void URLRequestJob::RecordPacketStats(StatisticSelector statistic) const { return; } case SDCH_EXPERIMENT_HOLDBACK: { - UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Experiment_Holdback", + UMA_HISTOGRAM_CLIPPED_TIMES("Sdch2.Experiment_Holdback", duration, base::TimeDelta::FromMilliseconds(20), base::TimeDelta::FromMinutes(10), 100); @@ -679,19 +679,19 @@ void URLRequestJob::RecordPacketStats(StatisticSelector statistic) const { DCHECK(kSdchPacketHistogramCount > 4); if (packet_times_.size() <= 4) return; - UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Experiment_Holdback_1st_To_2nd_c", + UMA_HISTOGRAM_CLIPPED_TIMES("Sdch2.Experiment_Holdback_1st_To_2nd_c", packet_times_[1] - packet_times_[0], base::TimeDelta::FromMilliseconds(1), base::TimeDelta::FromSeconds(10), 100); - UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Experiment_Holdback_2nd_To_3rd_c", + UMA_HISTOGRAM_CLIPPED_TIMES("Sdch2.Experiment_Holdback_2nd_To_3rd_c", packet_times_[2] - packet_times_[1], base::TimeDelta::FromMilliseconds(1), base::TimeDelta::FromSeconds(10), 100); - UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Experiment_Holdback_3rd_To_4th_c", + UMA_HISTOGRAM_CLIPPED_TIMES("Sdch2.Experiment_Holdback_3rd_To_4th_c", packet_times_[3] - packet_times_[2], base::TimeDelta::FromMilliseconds(1), base::TimeDelta::FromSeconds(10), 100); - UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Experiment_Holdback_4th_To_5th_c", + UMA_HISTOGRAM_CLIPPED_TIMES("Sdch2.Experiment_Holdback_4th_To_5th_c", packet_times_[4] - packet_times_[3], base::TimeDelta::FromMilliseconds(1), base::TimeDelta::FromSeconds(10), 100); |