summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorjar@chromium.org <jar@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2009-04-17 22:29:34 +0000
committerjar@chromium.org <jar@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2009-04-17 22:29:34 +0000
commitb8b7cfb7f79974550fa76894fa939890604970a4 (patch)
tree4001bdee7e28eb01995ee5771c70cc4a4cbeabc0 /net
parent9d9bbee2f0cf6e02188cef6344f99f758f6fac27 (diff)
downloadchromium_src-b8b7cfb7f79974550fa76894fa939890604970a4.zip
chromium_src-b8b7cfb7f79974550fa76894fa939890604970a4.tar.gz
chromium_src-b8b7cfb7f79974550fa76894fa939890604970a4.tar.bz2
Do a better job of counting SDCH packets
Some servers are splitting packets in SDCH responses, and we need to be more careful about gathering counts with underfilled packets. r=huanr,openvcdiff Review URL: http://codereview.chromium.org/67254 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@13971 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net')
-rw-r--r--net/base/sdch_filter.cc56
-rw-r--r--net/base/sdch_filter.h16
2 files changed, 48 insertions, 24 deletions
diff --git a/net/base/sdch_filter.cc b/net/base/sdch_filter.cc
index a778f59..6265ad7 100644
--- a/net/base/sdch_filter.cc
+++ b/net/base/sdch_filter.cc
@@ -9,7 +9,6 @@
#include "base/file_util.h"
#include "base/histogram.h"
#include "base/logging.h"
-#include "base/time.h"
#include "net/base/sdch_filter.h"
#include "net/base/sdch_manager.h"
@@ -26,6 +25,9 @@ SdchFilter::SdchFilter(const FilterContext& filter_context)
dest_buffer_excess_index_(0),
source_bytes_(0),
output_bytes_(0),
+ observed_packet_count_(0),
+ bytes_observed_in_packets_(0),
+ final_packet_time_(),
possible_pass_through_(false),
connect_time_(filter_context.GetRequestTime()),
was_cached_(filter_context.IsCachedContent()) {
@@ -88,7 +90,7 @@ SdchFilter::~SdchFilter() {
return;
}
- base::TimeDelta duration = read_times_.back() - connect_time_;
+ base::TimeDelta duration = final_packet_time_ - connect_time_;
// We clip our logging at 10 minutes to prevent anamolous data from being
// considered (per suggestion from Jake Brutlag).
if (10 < duration.InMinutes()) {
@@ -104,21 +106,21 @@ SdchFilter::~SdchFilter() {
base::TimeDelta::FromMilliseconds(20),
base::TimeDelta::FromMinutes(10), 100);
UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Decode_1st_To_Last_a",
- read_times_.back() - read_times_[0],
+ final_packet_time_ - read_times_[0],
base::TimeDelta::FromMilliseconds(20),
base::TimeDelta::FromMinutes(10), 100);
if (read_times_.size() > 4) {
- UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Decode_3rd_To_4th_a",
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Decode_3rd_To_4th_b",
read_times_[3] - read_times_[2],
base::TimeDelta::FromMilliseconds(10),
base::TimeDelta::FromSeconds(3), 100);
- UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Decode_4th_To_5th_a",
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Decode_4th_To_5th_b",
read_times_[4] - read_times_[3],
base::TimeDelta::FromMilliseconds(10),
base::TimeDelta::FromSeconds(3), 100);
}
- UMA_HISTOGRAM_COUNTS_100("Sdch.Network_Decode_Packets_a",
- read_times_.size());
+ UMA_HISTOGRAM_COUNTS_100("Sdch.Network_Decode_Packets_b",
+ observed_packet_count_);
UMA_HISTOGRAM_COUNTS("Sdch.Network_Decode_Bytes_Processed_a",
static_cast<int>(filter_context().GetByteReadCount()));
UMA_HISTOGRAM_COUNTS("Sdch.Network_Decode_Bytes_VcdiffOut_a",
@@ -131,21 +133,21 @@ SdchFilter::~SdchFilter() {
base::TimeDelta::FromMilliseconds(20),
base::TimeDelta::FromMinutes(10), 100);
UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Pass-through_1st_To_Last_a",
- read_times_.back() - read_times_[0],
+ final_packet_time_ - read_times_[0],
base::TimeDelta::FromMilliseconds(20),
base::TimeDelta::FromMinutes(10), 100);
if (read_times_.size() > 4) {
- UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Pass-through_3rd_To_4th_a",
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Pass-through_3rd_To_4th_b",
read_times_[3] - read_times_[2],
base::TimeDelta::FromMilliseconds(10),
base::TimeDelta::FromSeconds(3), 100);
- UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Pass-through_4th_To_5th_a",
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Pass-through_4th_To_5th_b",
read_times_[4] - read_times_[3],
base::TimeDelta::FromMilliseconds(10),
base::TimeDelta::FromSeconds(3), 100);
}
- UMA_HISTOGRAM_COUNTS_100("Sdch.Network_Pass-through_Packets_a",
- read_times_.size());
+ UMA_HISTOGRAM_COUNTS_100("Sdch.Network_Pass-through_Packets_b",
+ observed_packet_count_);
return;
}
case DECODING_UNINITIALIZED: {
@@ -172,18 +174,26 @@ void SdchFilter::UpdateReadTimes() {
// Don't update when we're called to just flush out our internal buffers.
return;
}
- const int64 bytes_read_so_far = filter_context().GetByteReadCount();
- if (bytes_read_so_far <= 0)
- return;
+ const size_t bytes_read_so_far =
+ static_cast<size_t>(filter_context().GetByteReadCount());
+ if (bytes_read_so_far <= bytes_observed_in_packets_) {
+ DCHECK(bytes_read_so_far == bytes_observed_in_packets_);
+ return; // No new bytes have arrived.
+ }
+
+ // We only save distinct times for the first 5 packets.
+ const size_t kMaxTimesInArray = 5;
const size_t kTypicalPacketSize = 1430;
- // For ByteReadCount up to 1430, we have 1 packet. Up to 2860, 2 packets etc.
- if (bytes_read_so_far > 100 * kTypicalPacketSize)
- return; // Let's not stress the array size.
- const size_t bytes = static_cast<size_t>(bytes_read_so_far);
- const size_t probable_packet_number = 1 + (bytes - 1) / kTypicalPacketSize;
- base::Time now = base::Time::Now();
- while (probable_packet_number > read_times_.size())
- read_times_.push_back(now);
+ final_packet_time_ = base::Time::Now();
+ while (bytes_read_so_far > bytes_observed_in_packets_) {
+ if (++observed_packet_count_ <= kMaxTimesInArray) {
+ read_times_.push_back(final_packet_time_);
+ }
+ bytes_observed_in_packets_ += kTypicalPacketSize;
+ }
+ // Since packets may not be full, we'll remember the number of bytes we've
+ // accounted for in packets thus far.
+ bytes_observed_in_packets_ = bytes_read_so_far;
}
bool SdchFilter::InitDecoding(Filter::FilterType filter_type) {
diff --git a/net/base/sdch_filter.h b/net/base/sdch_filter.h
index 133dd6b..85dfef6 100644
--- a/net/base/sdch_filter.h
+++ b/net/base/sdch_filter.h
@@ -18,6 +18,7 @@
#include <vector>
#include "base/scoped_ptr.h"
+#include "base/time.h"
#include "net/base/filter.h"
#include "net/base/sdch_manager.h"
@@ -105,10 +106,23 @@ class SdchFilter : public Filter {
size_t source_bytes_;
size_t output_bytes_;
+ // The number of packets we've observed over the net.
+ size_t observed_packet_count_;
+
+ // We can't really see when a packet arrives, but we can record how much data
+ // was accounted for in previously noted packets. We use this count to (later)
+ // identify new packets .
+ size_t bytes_observed_in_packets_;
+
+ // Since we don't save all packet times in read_times_, we save the last time
+ // for use in histograms.
+ base::Time final_packet_time_;
+
// Record of packet processing times for this filter. Used only for stats
// generations in histograms. There is one time entry each time the byte
// count receieved exceeds the next multiple of 1430 bytes (a common
- // per-TCP/IP-packet payload size).
+ // per-TCP/IP-packet payload size). It is currently only valid for the first
+ // 5 packets.
std::vector<base::Time> read_times_;
// Error recovery in content type may add an sdch filter type, in which case