summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorjar@chromium.org <jar@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2009-03-19 00:59:01 +0000
committerjar@chromium.org <jar@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2009-03-19 00:59:01 +0000
commiteee51b3c5f60f19f2394dba82403c010f7f252f6 (patch)
treeda0751fbf009cdbd4c1849d1a1793dfade5437ee /net
parent0b9a1cc9c1ed9892d451ca3e3bb7795c72aedd03 (diff)
downloadchromium_src-eee51b3c5f60f19f2394dba82403c010f7f252f6.zip
chromium_src-eee51b3c5f60f19f2394dba82403c010f7f252f6.tar.gz
chromium_src-eee51b3c5f60f19f2394dba82403c010f7f252f6.tar.bz2
Try to get inter-packet times more precisely
I had tried to rely on the call to ReadData() being made once per TCP/IP packet, but histograms from the field show that does not work well :-(. This version looks for boundary counts of multiples of 1430 bytes, and uses that as a better approximation of when a packet is received. For test data with only a few packets, this is excellent. Some vendors such as Google tend to use at most 1430 bytes per packet, so even if we are off a bit, it will be close (for small packet counts). Alos, add histogram for total transmitted data (compressed via sdch and gzip) r=huanr,ajenjo Review URL: http://codereview.chromium.org/48077 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@12069 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net')
-rw-r--r--net/base/sdch_filter.cc59
-rw-r--r--net/base/sdch_filter.h10
2 files changed, 48 insertions, 21 deletions
diff --git a/net/base/sdch_filter.cc b/net/base/sdch_filter.cc
index dadc610..80d6991 100644
--- a/net/base/sdch_filter.cc
+++ b/net/base/sdch_filter.cc
@@ -9,6 +9,7 @@
#include "base/file_util.h"
#include "base/histogram.h"
#include "base/logging.h"
+#include "base/time.h"
#include "net/base/sdch_filter.h"
#include "net/base/sdch_manager.h"
@@ -97,50 +98,53 @@ SdchFilter::~SdchFilter() {
switch (decoding_status_) {
case DECODING_IN_PROGRESS: {
- UMA_HISTOGRAM_PERCENTAGE("Sdch.Network_Decode_Ratio", static_cast<int>(
+ UMA_HISTOGRAM_PERCENTAGE("Sdch.Network_Decode_Ratio_a", static_cast<int>(
(filter_context().GetByteReadCount() * 100) / output_bytes_));
- UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Decode_Latency_F", duration,
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Decode_Latency_F_a", duration,
base::TimeDelta::FromMilliseconds(20),
base::TimeDelta::FromMinutes(10), 100);
- UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Decode_1st_To_Last",
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Decode_1st_To_Last_a",
read_times_.back() - read_times_[0],
base::TimeDelta::FromMilliseconds(20),
base::TimeDelta::FromMinutes(10), 100);
- if (read_times_.size() > 3) {
- UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Decode_3rd_To_4th",
+ if (read_times_.size() > 4) {
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Decode_3rd_To_4th_a",
read_times_[3] - read_times_[2],
base::TimeDelta::FromMilliseconds(10),
base::TimeDelta::FromSeconds(3), 100);
- UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Decode_2nd_To_3rd",
- read_times_[2] - read_times_[1],
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Decode_4th_To_5th_a",
+ read_times_[4] - read_times_[3],
base::TimeDelta::FromMilliseconds(10),
base::TimeDelta::FromSeconds(3), 100);
}
- UMA_HISTOGRAM_COUNTS_100("Sdch.Network_Decode_Reads",
+ UMA_HISTOGRAM_COUNTS_100("Sdch.Network_Decode_Packets_a",
read_times_.size());
- UMA_HISTOGRAM_COUNTS("Sdch.Network_Decode_Bytes_Read", output_bytes_);
+ UMA_HISTOGRAM_COUNTS("Sdch.Network_Decode_Bytes_Processed_a",
+ static_cast<int>(filter_context().GetByteReadCount()));
+ UMA_HISTOGRAM_COUNTS("Sdch.Network_Decode_Bytes_VcdiffOut_a",
+ output_bytes_);
return;
}
case PASS_THROUGH: {
- UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Pass-through_Latency_F",
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Pass-through_Latency_F_a",
duration,
base::TimeDelta::FromMilliseconds(20),
base::TimeDelta::FromMinutes(10), 100);
- UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Pass-through_1st_To_Last",
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Pass-through_1st_To_Last_a",
read_times_.back() - read_times_[0],
base::TimeDelta::FromMilliseconds(20),
base::TimeDelta::FromMinutes(10), 100);
- if (read_times_.size() > 3) {
- UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Pass-through_3rd_To_4th",
+ if (read_times_.size() > 4) {
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Pass-through_3rd_To_4th_a",
read_times_[3] - read_times_[2],
base::TimeDelta::FromMilliseconds(10),
base::TimeDelta::FromSeconds(3), 100);
- UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Pass-through_2nd_To_3rd",
- read_times_[2] - read_times_[1],
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Pass-through_4th_To_5th_a",
+ read_times_[4] - read_times_[3],
base::TimeDelta::FromMilliseconds(10),
base::TimeDelta::FromSeconds(3), 100);
}
- UMA_HISTOGRAM_COUNTS_100("Sdch.Network_Pass-through_Reads",
+ UMA_HISTOGRAM_COUNTS_100("Sdch.Network_Pass-through_Packets_a",
read_times_.size());
return;
}
@@ -163,6 +167,25 @@ SdchFilter::~SdchFilter() {
} // end of switch.
}
+void SdchFilter::UpdateReadTimes() {
+ if (!next_stream_data_ || (stream_data_len_ == 0)) {
+ // Don't update when we're called to just flush out our internal buffers.
+ return;
+ }
+ const int64 bytes_read_so_far = filter_context().GetByteReadCount();
+ if (bytes_read_so_far <= 0)
+ return;
+ const size_t kTypicalPacketSize = 1430;
+ // For ByteReadCount up to 1430, we have 1 packet. Up to 2860, 2 packets etc.
+ if (bytes_read_so_far > 100 * kTypicalPacketSize)
+ return; // Let's not stress the array size.
+ const size_t bytes = static_cast<size_t>(bytes_read_so_far);
+ const size_t probable_packet_number = 1 + (bytes - 1) / kTypicalPacketSize;
+ base::Time now = base::Time::Now();
+ while (probable_packet_number > read_times_.size())
+ read_times_.push_back(now);
+}
+
bool SdchFilter::InitDecoding(Filter::FilterType filter_type) {
if (decoding_status_ != DECODING_UNINITIALIZED)
return false;
@@ -194,9 +217,7 @@ Filter::FilterStatus SdchFilter::ReadFilteredData(char* dest_buffer,
if (!dest_buffer || available_space <= 0)
return FILTER_ERROR;
- // Don't update when we're called to just flush out our internal buffers.
- if (next_stream_data_ && stream_data_len_ > 0)
- read_times_.push_back(base::Time::Now());
+ UpdateReadTimes();
if (WAITING_FOR_DICTIONARY_SELECTION == decoding_status_) {
FilterStatus status = InitializeDictionary();
diff --git a/net/base/sdch_filter.h b/net/base/sdch_filter.h
index cbb3491..133dd6b 100644
--- a/net/base/sdch_filter.h
+++ b/net/base/sdch_filter.h
@@ -55,6 +55,10 @@ class SdchFilter : public Filter {
PASS_THROUGH, // Non-sdch content being passed without alteration.
};
+ // Update the read_times_ array with time estimates for the number of packets
+ // read so far.
+ void UpdateReadTimes();
+
// Identify the suggested dictionary, and initialize underlying decompressor.
Filter::FilterStatus InitializeDictionary();
@@ -101,8 +105,10 @@ class SdchFilter : public Filter {
size_t source_bytes_;
size_t output_bytes_;
- // Record of chunk processing times for this filter. Used only for stats
- // generations in histograms.
+ // Record of packet processing times for this filter. Used only for stats
+ // generations in histograms. There is one time entry each time the byte
+ // count receieved exceeds the next multiple of 1430 bytes (a common
+ // per-TCP/IP-packet payload size).
std::vector<base::Time> read_times_;
// Error recovery in content type may add an sdch filter type, in which case