diff options
author | rvargas@google.com <rvargas@google.com@0039d316-1c4b-4281-b951-d872f2087c98> | 2011-06-24 00:11:01 +0000 |
---|---|---|
committer | rvargas@google.com <rvargas@google.com@0039d316-1c4b-4281-b951-d872f2087c98> | 2011-06-24 00:11:01 +0000 |
commit | bbaea8fe7a6d050578bf9f78a2e730e22dd56c0e (patch) | |
tree | 6b47681f639b0109bc86004aa0b89191738f9d45 /net/url_request | |
parent | 3f600a5e5889626dfd6893f01b68c50efbe15783 (diff) | |
download | chromium_src-bbaea8fe7a6d050578bf9f78a2e730e22dd56c0e.zip chromium_src-bbaea8fe7a6d050578bf9f78a2e730e22dd56c0e.tar.gz chromium_src-bbaea8fe7a6d050578bf9f78a2e730e22dd56c0e.tar.bz2 |
net: Add some histograms to URLRequestHttpJob and make sure that
we know when we are finished with the job
BUG=79186
TEST=none
Review URL: http://codereview.chromium.org/6995120
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@90302 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net/url_request')
-rw-r--r-- | net/url_request/url_request_http_job.cc | 54 | ||||
-rw-r--r-- | net/url_request/url_request_http_job.h | 12 |
2 files changed, 65 insertions, 1 deletions
diff --git a/net/url_request/url_request_http_job.cc b/net/url_request/url_request_http_job.cc index 17edff7..702c0bf 100644 --- a/net/url_request/url_request_http_job.cc +++ b/net/url_request/url_request_http_job.cc @@ -258,6 +258,7 @@ URLRequestHttpJob::URLRequestHttpJob(URLRequest* request) is_cached_content_(false), request_creation_time_(), packet_timing_enabled_(false), + done_(false), bytes_observed_in_packets_(0), packet_times_(), request_time_snapshot_(), @@ -341,13 +342,14 @@ void URLRequestHttpJob::NotifyDone(const URLRequestStatus& original_status) { } } - RecordCompressionHistograms(); + DoneWithRequest(FINISHED); URLRequestJob::NotifyDone(status); } void URLRequestHttpJob::DestroyTransaction() { DCHECK(transaction_.get()); + DoneWithRequest(ABORTED); transaction_.reset(); response_info_ = NULL; context_ = NULL; @@ -376,6 +378,7 @@ void URLRequestHttpJob::StartTransaction() { !throttling_entry_->IsDuringExponentialBackoff()) { rv = transaction_->Start( &request_info_, &start_callback_, request_->net_log()); + start_time_ = base::TimeTicks::Now(); } else { // Special error code for the exponential back-off module. rv = ERR_TEMPORARILY_THROTTLED; @@ -1087,6 +1090,8 @@ bool URLRequestHttpJob::ReadRawData(IOBuffer* buf, int buf_size, int rv = transaction_->Read(buf, buf_size, &read_callback_); if (rv >= 0) { *bytes_read = rv; + if (!rv) + DoneWithRequest(FINISHED); return true; } @@ -1135,6 +1140,7 @@ URLRequestHttpJob::~URLRequestHttpJob() { if (manager) // Defensive programming. manager->FetchDictionary(request_info_.url, sdch_dictionary_url_); } + DoneWithRequest(ABORTED); } void URLRequestHttpJob::RecordTimer() { @@ -1425,4 +1431,50 @@ bool URLRequestHttpJob::IsCompressibleContent() const { IsSupportedNonImageMimeType(mime_type.c_str())); } +void URLRequestHttpJob::RecordPerfHistograms(CompletionCause reason) { + if (start_time_.is_null()) + return; + + base::TimeDelta total_time = base::TimeTicks::Now() - start_time_; + UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTime", total_time); + + if (reason == FINISHED) { + UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeSuccess", total_time); + } else { + UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeCancel", total_time); + } + + static bool cache_experiment = false; + if (!cache_experiment) + cache_experiment = base::FieldTrialList::TrialExists("CacheListSize"); + if (cache_experiment) { + UMA_HISTOGRAM_TIMES( + base::FieldTrial::MakeName("Net.HttpJob.TotalTime", "CacheListSize"), + total_time); + if (reason == FINISHED) { + UMA_HISTOGRAM_TIMES( + base::FieldTrial::MakeName("Net.HttpJob.TotalTimeSuccess", + "CacheListSize"), + total_time); + } else { + UMA_HISTOGRAM_TIMES( + base::FieldTrial::MakeName("Net.HttpJob.TotalTimeCancel", + "CacheListSize"), + total_time); + } + } + + start_time_ = base::TimeTicks(); +} + +void URLRequestHttpJob::DoneWithRequest(CompletionCause reason) { + if (done_) + return; + done_ = true; + + RecordPerfHistograms(reason); + if (reason == FINISHED) + RecordCompressionHistograms(); +} + } // namespace net diff --git a/net/url_request/url_request_http_job.h b/net/url_request/url_request_http_job.h index 9620457..a6ccafd 100644 --- a/net/url_request/url_request_http_job.h +++ b/net/url_request/url_request_http_job.h @@ -132,6 +132,11 @@ class URLRequestHttpJob : public URLRequestJob { bool is_cached_content_; private: + enum CompletionCause { + ABORTED, + FINISHED + }; + class HttpFilterContext; virtual ~URLRequestHttpJob(); @@ -145,6 +150,9 @@ class URLRequestHttpJob : public URLRequestJob { void RecordCompressionHistograms(); bool IsCompressibleContent() const; + void RecordPerfHistograms(CompletionCause reason); + void DoneWithRequest(CompletionCause reason); + base::Time request_creation_time_; // Data used for statistics gathering. This data is only used for histograms @@ -158,6 +166,7 @@ class URLRequestHttpJob : public URLRequestJob { // Enable recording of packet arrival times for histogramming. bool packet_timing_enabled_; + bool done_; // True when we are done doing work. // The number of bytes that have been accounted for in packets (where some of // those packets may possibly have had their time of arrival recorded). @@ -174,6 +183,9 @@ class URLRequestHttpJob : public URLRequestJob { // last time for use in histograms. base::Time final_packet_time_; + // The start time for the job, ignoring re-starts. + base::TimeTicks start_time_; + // The count of the number of packets, some of which may not have been timed. // We're ignoring overflow, as 1430 x 2^31 is a LOT of bytes. int observed_packet_count_; |