summaryrefslogtreecommitdiffstats
path: root/net/url_request/url_request_job.cc
diff options
context:
space:
mode:
Diffstat (limited to 'net/url_request/url_request_job.cc')
-rw-r--r--net/url_request/url_request_job.cc169
1 files changed, 167 insertions, 2 deletions
diff --git a/net/url_request/url_request_job.cc b/net/url_request/url_request_job.cc
index 544913d..0b005a5 100644
--- a/net/url_request/url_request_job.cc
+++ b/net/url_request/url_request_job.cc
@@ -30,7 +30,11 @@ URLRequestJob::URLRequestJob(URLRequest* request)
read_buffer_len_(0),
has_handled_response_(false),
expected_content_size_(-1),
- filter_input_byte_count_(0) {
+ packet_timing_enabled_(false),
+ filter_input_byte_count_(0),
+ bytes_observed_in_packets_(0),
+ max_packets_timed_(0),
+ observed_packet_count_(0) {
load_flags_ = request_->load_flags();
is_profiling_ = request->enable_profiling();
if (is_profiling()) {
@@ -41,6 +45,10 @@ URLRequestJob::URLRequestJob(URLRequest* request)
}
URLRequestJob::~URLRequestJob() {
+ // Cause filter chain to be destroyed now, so that any histogram requests can
+ // be made before we are torn down.
+ filter_.reset(NULL);
+
g_url_request_job_tracker.RemoveJob(this);
}
@@ -94,7 +102,7 @@ void URLRequestJob::ContinueDespiteLastError() {
}
int64 URLRequestJob::GetByteReadCount() const {
- return filter_input_byte_count_ ;
+ return filter_input_byte_count_;
}
bool URLRequestJob::GetURL(GURL* gurl) const {
@@ -522,6 +530,7 @@ void URLRequestJob::RecordBytesRead(int bytes_read) {
metrics_->total_bytes_read_ += bytes_read;
}
filter_input_byte_count_ += bytes_read;
+ UpdatePacketReadTimes(); // Facilitate stats recording if it is active.
g_url_request_job_tracker.OnBytesRead(this, bytes_read);
}
@@ -537,3 +546,159 @@ void URLRequestJob::SetStatus(const URLRequestStatus &status) {
if (request_)
request_->set_status(status);
}
+
+void URLRequestJob::UpdatePacketReadTimes() {
+ if (!packet_timing_enabled_)
+ return;
+
+ if (filter_input_byte_count_ <= bytes_observed_in_packets_) {
+ DCHECK(filter_input_byte_count_ == bytes_observed_in_packets_);
+ return; // No new bytes have arrived.
+ }
+
+ if (!bytes_observed_in_packets_)
+ request_time_snapshot_ = GetRequestTime();
+
+ final_packet_time_ = base::Time::Now();
+ const size_t kTypicalPacketSize = 1430;
+ while (filter_input_byte_count_ > bytes_observed_in_packets_) {
+ ++observed_packet_count_;
+ if (max_packets_timed_ > packet_times_.size()) {
+ packet_times_.push_back(final_packet_time_);
+ DCHECK(static_cast<size_t>(observed_packet_count_) ==
+ packet_times_.size());
+ }
+ bytes_observed_in_packets_ += kTypicalPacketSize;
+ }
+ // Since packets may not be full, we'll remember the number of bytes we've
+ // accounted for in packets thus far.
+ bytes_observed_in_packets_ = filter_input_byte_count_;
+}
+
+void URLRequestJob::EnablePacketCounting(size_t max_packets_timed) {
+ if (max_packets_timed_ < max_packets_timed)
+ max_packets_timed_ = max_packets_timed;
+ packet_timing_enabled_ = true;
+}
+
+void URLRequestJob::RecordPacketStats(StatisticSelector statistic) const {
+ if (!packet_timing_enabled_ || (final_packet_time_ == base::Time()))
+ return;
+ base::TimeDelta duration = final_packet_time_ - request_time_snapshot_;
+ switch (statistic) {
+ case SDCH_DECODE: {
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Decode_Latency_F_a", duration,
+ base::TimeDelta::FromMilliseconds(20),
+ base::TimeDelta::FromMinutes(10), 100);
+ UMA_HISTOGRAM_COUNTS_100("Sdch.Network_Decode_Packets_b",
+ static_cast<int>(observed_packet_count_));
+ UMA_HISTOGRAM_COUNTS("Sdch.Network_Decode_Bytes_Processed_a",
+ static_cast<int>(bytes_observed_in_packets_));
+ if (packet_times_.empty())
+ return;
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Decode_1st_To_Last_a",
+ final_packet_time_ - packet_times_[0],
+ base::TimeDelta::FromMilliseconds(20),
+ base::TimeDelta::FromMinutes(10), 100);
+
+ DCHECK(max_packets_timed_ >= kSdchPacketHistogramCount);
+ DCHECK(kSdchPacketHistogramCount > 4);
+ if (packet_times_.size() <= 4)
+ return;
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Decode_1st_To_2nd_c",
+ packet_times_[1] - packet_times_[0],
+ base::TimeDelta::FromMilliseconds(1),
+ base::TimeDelta::FromSeconds(10), 100);
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Decode_2nd_To_3rd_c",
+ packet_times_[2] - packet_times_[1],
+ base::TimeDelta::FromMilliseconds(1),
+ base::TimeDelta::FromSeconds(10), 100);
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Decode_3rd_To_4th_c",
+ packet_times_[3] - packet_times_[2],
+ base::TimeDelta::FromMilliseconds(1),
+ base::TimeDelta::FromSeconds(10), 100);
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Decode_4th_To_5th_c",
+ packet_times_[4] - packet_times_[3],
+ base::TimeDelta::FromMilliseconds(1),
+ base::TimeDelta::FromSeconds(10), 100);
+ return;
+ }
+ case SDCH_PASSTHROUGH: {
+ // Despite advertising a dictionary, we handled non-sdch compressed
+ // content.
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Pass-through_Latency_F_a",
+ duration,
+ base::TimeDelta::FromMilliseconds(20),
+ base::TimeDelta::FromMinutes(10), 100);
+ UMA_HISTOGRAM_COUNTS_100("Sdch.Network_Pass-through_Packets_b",
+ observed_packet_count_);
+ if (packet_times_.empty())
+ return;
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Pass-through_1st_To_Last_a",
+ final_packet_time_ - packet_times_[0],
+ base::TimeDelta::FromMilliseconds(20),
+ base::TimeDelta::FromMinutes(10), 100);
+ DCHECK(max_packets_timed_ >= kSdchPacketHistogramCount);
+ DCHECK(kSdchPacketHistogramCount > 4);
+ if (packet_times_.size() <= 4)
+ return;
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Pass-through_1st_To_2nd_c",
+ packet_times_[1] - packet_times_[0],
+ base::TimeDelta::FromMilliseconds(1),
+ base::TimeDelta::FromSeconds(10), 100);
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Pass-through_2nd_To_3rd_c",
+ packet_times_[2] - packet_times_[1],
+ base::TimeDelta::FromMilliseconds(1),
+ base::TimeDelta::FromSeconds(10), 100);
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Pass-through_3rd_To_4th_c",
+ packet_times_[3] - packet_times_[2],
+ base::TimeDelta::FromMilliseconds(1),
+ base::TimeDelta::FromSeconds(10), 100);
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Pass-through_4th_To_5th_c",
+ packet_times_[4] - packet_times_[3],
+ base::TimeDelta::FromMilliseconds(1),
+ base::TimeDelta::FromSeconds(10), 100);
+ return;
+ }
+
+ case SDCH_EXPERIMENT_DECODE: {
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Experiment_Decode",
+ duration,
+ base::TimeDelta::FromMilliseconds(20),
+ base::TimeDelta::FromMinutes(10), 100);
+ // We already provided interpacket histograms above in the SDCH_DECODE
+ // case, so we don't need them here.
+ return;
+ }
+ case SDCH_EXPERIMENT_HOLDBACK: {
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Experiment_Holdback",
+ duration,
+ base::TimeDelta::FromMilliseconds(20),
+ base::TimeDelta::FromMinutes(10), 100);
+ DCHECK(max_packets_timed_ >= kSdchPacketHistogramCount);
+ DCHECK(kSdchPacketHistogramCount > 4);
+ if (packet_times_.size() <= 4)
+ return;
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Experiment_Holdback_1st_To_2nd_c",
+ packet_times_[1] - packet_times_[0],
+ base::TimeDelta::FromMilliseconds(1),
+ base::TimeDelta::FromSeconds(10), 100);
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Experiment_Holdback_2nd_To_3rd_c",
+ packet_times_[2] - packet_times_[1],
+ base::TimeDelta::FromMilliseconds(1),
+ base::TimeDelta::FromSeconds(10), 100);
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Experiment_Holdback_3rd_To_4th_c",
+ packet_times_[3] - packet_times_[2],
+ base::TimeDelta::FromMilliseconds(1),
+ base::TimeDelta::FromSeconds(10), 100);
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Experiment_Holdback_4th_To_5th_c",
+ packet_times_[4] - packet_times_[3],
+ base::TimeDelta::FromMilliseconds(1),
+ base::TimeDelta::FromSeconds(10), 100);
+ return;
+ }
+ default:
+ NOTREACHED();
+ return;
+ }
+}