summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorjar@chromium.org <jar@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2009-01-13 18:06:03 +0000
committerjar@chromium.org <jar@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2009-01-13 18:06:03 +0000
commit87a1a958cd1a99a57bb28d242e244411a5f39cfa (patch)
tree04e97186d459fd2430adc8a7b3cd1e7cf5120f66 /net
parent5cbb127e6b67a646b98a460faf89740f29b91403 (diff)
downloadchromium_src-87a1a958cd1a99a57bb28d242e244411a5f39cfa.zip
chromium_src-87a1a958cd1a99a57bb28d242e244411a5f39cfa.tar.gz
chromium_src-87a1a958cd1a99a57bb28d242e244411a5f39cfa.tar.bz2
Correct latency histograms for SDCH encoding
Add a boolean to indicate if the request_time_ was set via a call to Now(), vs unpickling from the cache, so that cached results can be excluded from latency measurements. bug=1561947 r=darin,rvargas Review URL: http://codereview.chromium.org/17371 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@7942 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net')
-rw-r--r--net/base/sdch_filter.cc22
-rw-r--r--net/http/http_cache.cc1
-rw-r--r--net/http/http_network_transaction.cc4
-rw-r--r--net/http/http_response_info.h9
-rw-r--r--net/http/http_transaction_unittest.h2
-rw-r--r--net/http/http_transaction_winhttp.cc3
-rw-r--r--net/url_request/url_request.cc5
-rw-r--r--net/url_request/url_request.h11
-rw-r--r--net/url_request/url_request_job.cc3
9 files changed, 37 insertions, 23 deletions
diff --git a/net/base/sdch_filter.cc b/net/base/sdch_filter.cc
index de15774..2113ebd 100644
--- a/net/base/sdch_filter.cc
+++ b/net/base/sdch_filter.cc
@@ -41,24 +41,18 @@ SdchFilter::~SdchFilter() {
if (base::Time() != connect_time() && base::Time() != time_of_last_read_) {
base::TimeDelta duration = time_of_last_read_ - connect_time();
- // Note: connect_time may be somewhat incorrect if this is cached data, as
- // it will reflect the time the connect was done for the original read :-(.
- // To avoid any chances of overflow, and since SDCH is meant to primarilly
- // handle short downloads, we'll restrict what results we log to effectively
- // discard bogus large numbers. Note that IF the number is large enough, it
- // would DCHECK in histogram as the square of the value is summed. The
- // relatively precise histogram only properly covers the range 1ms to 10
+ // Note: connect_time is *only* set if this was NOT cached data, so the
+ // latency duration should only apply to the network read of the content.
+ // We clip our logging at 10 minutes to prevent anamolous data from being
+ // considered (per suggestion from Jake Brutlag).
+ // The relatively precise histogram only properly covers the range 1ms to 10
// seconds, so the discarded data would not be that readable anyway.
- if (180 >= duration.InSeconds()) {
+ if (10 >= duration.InMinutes()) {
if (DECODING_IN_PROGRESS == decoding_status_)
- UMA_HISTOGRAM_MEDIUM_TIMES(L"Sdch.Transit_Latency_M", duration);
+ UMA_HISTOGRAM_MEDIUM_TIMES(L"Sdch.Network_Decode_Latency_M", duration);
if (PASS_THROUGH == decoding_status_)
- UMA_HISTOGRAM_MEDIUM_TIMES(L"Sdch.Transit_Pass-through_Latency_M",
+ UMA_HISTOGRAM_MEDIUM_TIMES(L"Sdch.Network_Pass-through_Latency_M",
duration);
- // Look at sizes of the 20% of blocks that arrive with large latency.
- if (15 < duration.InSeconds())
- UMA_HISTOGRAM_COUNTS(L"Sdch.Transit_Belated_Final_Block_Size",
- size_of_last_read_);
}
}
diff --git a/net/http/http_cache.cc b/net/http/http_cache.cc
index 2ac62ef..7edc0a8 100644
--- a/net/http/http_cache.cc
+++ b/net/http/http_cache.cc
@@ -1022,6 +1022,7 @@ bool HttpCache::ReadResponseInfo(disk_cache::Entry* disk_entry,
if (!pickle.ReadInt64(&iter, &time_val))
return false;
response_info->request_time = Time::FromInternalValue(time_val);
+ response_info->was_cached = true; // Set status to show cache resurrection.
// read response-time
if (!pickle.ReadInt64(&iter, &time_val))
diff --git a/net/http/http_network_transaction.cc b/net/http/http_network_transaction.cc
index 45a9a5e..9d6ed1b 100644
--- a/net/http/http_network_transaction.cc
+++ b/net/http/http_network_transaction.cc
@@ -560,8 +560,10 @@ int HttpNetworkTransaction::DoWriteHeaders() {
// Record our best estimate of the 'request time' as the time when we send
// out the first bytes of the request headers.
- if (request_headers_bytes_sent_ == 0)
+ if (request_headers_bytes_sent_ == 0) {
response_.request_time = Time::Now();
+ response_.was_cached = false;
+ }
const char* buf = request_headers_.data() + request_headers_bytes_sent_;
int buf_len = static_cast<int>(request_headers_.size() -
diff --git a/net/http/http_response_info.h b/net/http/http_response_info.h
index 80defef..5563ada 100644
--- a/net/http/http_response_info.h
+++ b/net/http/http_response_info.h
@@ -15,6 +15,15 @@ namespace net {
class HttpResponseInfo {
public:
+ // The following is only defined if the request_time memmber is set.
+ // If this response was resurrected from cache, then this bool is set, and
+ // request_time may corresponds to a time "far" in the past. Note that
+ // stale content (perhaps un-cacheable) may be fetched from cache subject to
+ // the load flags specified on the request info. For example, this is done
+ // when a user presses the back button to re-render pages, or at startup, when
+ // reloading previously visited pages (without going over the network).
+ bool was_cached;
+
// The time at which the request was made that resulted in this response.
// For cached responses, this time could be "far" in the past.
base::Time request_time;
diff --git a/net/http/http_transaction_unittest.h b/net/http/http_transaction_unittest.h
index eb2d7bb..ccdb954 100644
--- a/net/http/http_transaction_unittest.h
+++ b/net/http/http_transaction_unittest.h
@@ -7,6 +7,7 @@
#include "net/http/http_transaction.h"
+#include <algorithm>
#include <string>
#include "base/compiler_specific.h"
@@ -212,6 +213,7 @@ class MockNetworkTransaction : public net::HttpTransaction {
std::replace(header_data.begin(), header_data.end(), '\n', '\0');
response_.request_time = base::Time::Now();
+ response_.was_cached = false;
response_.response_time = base::Time::Now();
response_.headers = new net::HttpResponseHeaders(header_data);
response_.ssl_info.cert_status = t->cert_status;
diff --git a/net/http/http_transaction_winhttp.cc b/net/http/http_transaction_winhttp.cc
index b1ecf2a..06cd391 100644
--- a/net/http/http_transaction_winhttp.cc
+++ b/net/http/http_transaction_winhttp.cc
@@ -172,7 +172,7 @@ class HttpTransactionWinHttp::Session
WINHTTP_FLAG_SECURE_PROTOCOL_TLS1
};
- Session(ProxyService* proxy_service);
+ explicit Session(ProxyService* proxy_service);
// Opens the primary WinHttp session handle.
bool Init(const std::string& user_agent);
@@ -1232,6 +1232,7 @@ int HttpTransactionWinHttp::SendRequest() {
}
response_.request_time = Time::Now();
+ response_.was_cached = false;
DWORD total_size = 0;
if (request_->upload_data) {
diff --git a/net/url_request/url_request.cc b/net/url_request/url_request.cc
index 79ea708..87facba 100644
--- a/net/url_request/url_request.cc
+++ b/net/url_request/url_request.cc
@@ -28,7 +28,7 @@ using std::string;
using std::wstring;
// Max number of http redirects to follow. Same number as gecko.
-const static int kMaxRedirects = 20;
+static const int kMaxRedirects = 20;
static URLRequestJobManager* GetJobManager() {
return Singleton<URLRequestJobManager>::get();
@@ -218,6 +218,7 @@ void URLRequest::Start() {
is_pending_ = true;
response_info_.request_time = Time::Now();
+ response_info_.was_cached = false;
// Don't allow errors to be sent from within Start().
// TODO(brettw) this may cause NotifyDone to be sent synchronously,
@@ -307,7 +308,7 @@ std::string URLRequest::StripPostSpecificHeaders(const std::string& headers) {
std::string stripped_headers;
net::HttpUtil::HeadersIterator it(headers.begin(), headers.end(), "\r\n");
-
+
while (it.GetNext()) {
bool is_post_specific = false;
for (size_t i = 0; i < ARRAYSIZE_UNSAFE(kPostHeaders); ++i) {
diff --git a/net/url_request/url_request.h b/net/url_request/url_request.h
index b01aa9c..5dfc711 100644
--- a/net/url_request/url_request.h
+++ b/net/url_request/url_request.h
@@ -45,8 +45,8 @@ class URLRequest {
// information with a URLRequest. Use user_data() and set_user_data()
class UserData {
public:
- UserData() {};
- virtual ~UserData() {};
+ UserData() {}
+ virtual ~UserData() {}
};
// Callback function implemented by protocol handlers to create new jobs.
@@ -90,7 +90,7 @@ class URLRequest {
class Delegate {
public:
virtual ~Delegate() {}
-
+
// Called upon a server-initiated redirect. The delegate may call the
// request's Cancel method to prevent the redirect from being followed.
// Since there may be multiple chained redirects, there may also be more
@@ -296,6 +296,9 @@ class URLRequest {
return response_info_.response_time;
}
+ // Indicate if this response was fetched from disk cache.
+ bool was_cached() const { return response_info_.was_cached; }
+
// Get all response headers, as a HttpResponseHeaders object. See comments
// in HttpResponseHeaders class as to the format of the data.
net::HttpResponseHeaders* response_headers() const {
@@ -446,7 +449,7 @@ class URLRequest {
GURL url_;
GURL original_url_;
GURL policy_url_;
- std::string method_; // "GET", "POST", etc. Should be all uppercase.
+ std::string method_; // "GET", "POST", etc. Should be all uppercase.
std::string referrer_;
std::string extra_request_headers_;
int load_flags_; // Flags indicating the request type for the load;
diff --git a/net/url_request/url_request_job.cc b/net/url_request/url_request_job.cc
index 98712a5..c30388f 100644
--- a/net/url_request/url_request_job.cc
+++ b/net/url_request/url_request_job.cc
@@ -61,7 +61,8 @@ void URLRequestJob::SetupFilter() {
// Approximate connect time with request_time. If it is not cached, then
// this is a good approximation for when the first bytes went on the
// wire.
- filter_->SetConnectTime(request_->response_info_.request_time);
+ if (!request_->response_info_.was_cached)
+ filter_->SetConnectTime(request_->response_info_.request_time);
}
}
}