summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--net/base/filter.h14
-rw-r--r--net/base/filter_unittest.h1
-rw-r--r--net/base/load_flags.h8
-rw-r--r--net/base/sdch_filter.cc125
-rw-r--r--net/base/sdch_filter.h31
-rw-r--r--net/base/sdch_filter_unittest.cc29
-rw-r--r--net/base/sdch_manager.cc26
-rw-r--r--net/base/sdch_manager.h32
-rw-r--r--net/url_request/url_request_http_job.cc62
-rw-r--r--net/url_request/url_request_http_job.h12
-rw-r--r--net/url_request/url_request_job.cc169
-rw-r--r--net/url_request/url_request_job.h54
12 files changed, 387 insertions, 176 deletions
diff --git a/net/base/filter.h b/net/base/filter.h
index 852faba..295d80f 100644
--- a/net/base/filter.h
+++ b/net/base/filter.h
@@ -48,6 +48,15 @@
// or later filters in a chain.
class FilterContext {
public:
+ // Enum to control what histograms are emitted near end-of-life of this
+ // instance.
+ enum StatisticSelector {
+ SDCH_DECODE,
+ SDCH_PASSTHROUGH,
+ SDCH_EXPERIMENT_DECODE,
+ SDCH_EXPERIMENT_HOLDBACK,
+ };
+
virtual ~FilterContext() {}
// What mime type was specified in the header for this data?
@@ -85,6 +94,10 @@ class FilterContext {
// don't change the input buffer sizes for a linked chain of filters, and the
// buffer size for input to all filters in a chain is this one constant).
virtual int GetInputStreamBufferSize() const = 0;
+
+ // The following method forces the context to emit a specific set of
+ // statistics as selected by the argument.
+ virtual void RecordPacketStats(StatisticSelector statistic) const = 0;
};
//------------------------------------------------------------------------------
@@ -179,6 +192,7 @@ class Filter {
// advertised in the GET), as well as the mime type of the content.
static void FixupEncodingTypes(const FilterContext& filter_context,
std::vector<FilterType>* encoding_types);
+
protected:
explicit Filter(const FilterContext& filter_context);
diff --git a/net/base/filter_unittest.h b/net/base/filter_unittest.h
index a15ae59..9de5bb5 100644
--- a/net/base/filter_unittest.h
+++ b/net/base/filter_unittest.h
@@ -67,6 +67,7 @@ class MockFilterContext : public FilterContext {
// What is the desirable input buffer size for these filters?
virtual int GetInputStreamBufferSize() const { return buffer_size_; }
+ virtual void RecordPacketStats(StatisticSelector statistic) const {}
private:
int buffer_size_;
diff --git a/net/base/load_flags.h b/net/base/load_flags.h
index c2c0d20..09851ec 100644
--- a/net/base/load_flags.h
+++ b/net/base/load_flags.h
@@ -66,16 +66,12 @@ enum {
// cookies or updating existing ones.
LOAD_DO_NOT_SAVE_COOKIES = 1 << 13,
- // An SDCH dictionary was advertised, and an SDCH encoded response is
- // possible.
- LOAD_SDCH_DICTIONARY_ADVERTISED = 1 << 14,
-
// Do not resolve proxies. This override is used when downloading PAC files
// to avoid having a circular dependency.
- LOAD_BYPASS_PROXY = 1 << 15,
+ LOAD_BYPASS_PROXY = 1 << 14,
// Indicate this request is for a download, as opposed to viewing.
- LOAD_IS_DOWNLOAD = 1 << 16,
+ LOAD_IS_DOWNLOAD = 1 << 15,
};
} // namespace net
diff --git a/net/base/sdch_filter.cc b/net/base/sdch_filter.cc
index 3ff78b5..0eb98a0 100644
--- a/net/base/sdch_filter.cc
+++ b/net/base/sdch_filter.cc
@@ -25,11 +25,7 @@ SdchFilter::SdchFilter(const FilterContext& filter_context)
dest_buffer_excess_index_(0),
source_bytes_(0),
output_bytes_(0),
- observed_packet_count_(0),
- bytes_observed_in_packets_(0),
- final_packet_time_(),
possible_pass_through_(false),
- connect_time_(filter_context.GetRequestTime()),
was_cached_(filter_context.IsCachedContent()) {
bool success = filter_context.GetMimeType(&mime_type_);
DCHECK(success);
@@ -80,92 +76,20 @@ SdchFilter::~SdchFilter() {
return; // We don't need timing stats, and we aready got ratios.
}
- if (base::Time() == connect_time_ || read_times_.empty()) {
- // Not a real error, but it is useful to have this tally.
- // TODO(jar): Remove this stat after SDCH stability is validated.
- SdchManager::SdchErrorRecovery(SdchManager::MISSING_TIME_STATS);
- UMA_HISTOGRAM_COUNTS("Sdch.MissingTimeBytesIn",
- static_cast<int>(filter_context().GetByteReadCount()));
- UMA_HISTOGRAM_COUNTS("Sdch.MissingTimeVcdiffIn", source_bytes_);
- return;
- }
- // Since read_times_ is not empty, we can assert we have some packets times.
- DCHECK(final_packet_time_ != base::Time());
-
- base::TimeDelta duration = final_packet_time_ - connect_time_;
- // We clip our logging at 10 minutes to prevent anamolous data from being
- // considered (per suggestion from Jake Brutlag).
- if (10 < duration.InMinutes()) {
- SdchManager::SdchErrorRecovery(SdchManager::OVER_10_MINUTES);
- return;
- }
-
switch (decoding_status_) {
case DECODING_IN_PROGRESS: {
UMA_HISTOGRAM_PERCENTAGE("Sdch.Network_Decode_Ratio_a", static_cast<int>(
(filter_context().GetByteReadCount() * 100) / output_bytes_));
- UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Decode_Latency_F_a", duration,
- base::TimeDelta::FromMilliseconds(20),
- base::TimeDelta::FromMinutes(10), 100);
- UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Decode_1st_To_Last_a",
- final_packet_time_ - read_times_[0],
- base::TimeDelta::FromMilliseconds(20),
- base::TimeDelta::FromMinutes(10), 100);
- if (read_times_.size() > 4) {
- UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Decode_1st_To_2nd_b",
- read_times_[1] - read_times_[0],
- base::TimeDelta::FromMilliseconds(10),
- base::TimeDelta::FromSeconds(3), 100);
- UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Decode_2nd_To_3rd_b",
- read_times_[2] - read_times_[1],
- base::TimeDelta::FromMilliseconds(10),
- base::TimeDelta::FromSeconds(3), 100);
- UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Decode_3rd_To_4th_b",
- read_times_[3] - read_times_[2],
- base::TimeDelta::FromMilliseconds(10),
- base::TimeDelta::FromSeconds(3), 100);
- UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Decode_4th_To_5th_b",
- read_times_[4] - read_times_[3],
- base::TimeDelta::FromMilliseconds(10),
- base::TimeDelta::FromSeconds(3), 100);
- }
- UMA_HISTOGRAM_COUNTS_100("Sdch.Network_Decode_Packets_b",
- observed_packet_count_);
- UMA_HISTOGRAM_COUNTS("Sdch.Network_Decode_Bytes_Processed_a",
- static_cast<int>(filter_context().GetByteReadCount()));
UMA_HISTOGRAM_COUNTS("Sdch.Network_Decode_Bytes_VcdiffOut_a",
output_bytes_);
+ filter_context().RecordPacketStats(FilterContext::SDCH_DECODE);
+
+ // Allow latency experiments to proceed.
+ SdchManager::Global()->SetAllowLatencyExperiment(url_, true);
return;
}
case PASS_THROUGH: {
- UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Pass-through_Latency_F_a",
- duration,
- base::TimeDelta::FromMilliseconds(20),
- base::TimeDelta::FromMinutes(10), 100);
- UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Pass-through_1st_To_Last_a",
- final_packet_time_ - read_times_[0],
- base::TimeDelta::FromMilliseconds(20),
- base::TimeDelta::FromMinutes(10), 100);
- if (read_times_.size() > 4) {
- UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Pass-through_1st_To_2nd_b",
- read_times_[1] - read_times_[0],
- base::TimeDelta::FromMilliseconds(10),
- base::TimeDelta::FromSeconds(3), 100);
- UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Pass-through_2nd_To_3rd_b",
- read_times_[2] - read_times_[1],
- base::TimeDelta::FromMilliseconds(10),
- base::TimeDelta::FromSeconds(3), 100);
- UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Pass-through_3rd_To_4th_b",
- read_times_[3] - read_times_[2],
- base::TimeDelta::FromMilliseconds(10),
- base::TimeDelta::FromSeconds(3), 100);
- UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Pass-through_4th_To_5th_b",
- read_times_[4] - read_times_[3],
- base::TimeDelta::FromMilliseconds(10),
- base::TimeDelta::FromSeconds(3), 100);
- }
- UMA_HISTOGRAM_COUNTS_100("Sdch.Network_Pass-through_Packets_b",
- observed_packet_count_);
+ filter_context().RecordPacketStats(FilterContext::SDCH_PASSTHROUGH);
return;
}
case DECODING_UNINITIALIZED: {
@@ -187,33 +111,6 @@ SdchFilter::~SdchFilter() {
} // end of switch.
}
-void SdchFilter::UpdateReadTimes() {
- if (!next_stream_data_ || (stream_data_len_ == 0)) {
- // Don't update when we're called to just flush out our internal buffers.
- return;
- }
- const size_t bytes_read_so_far =
- static_cast<size_t>(filter_context().GetByteReadCount());
- if (bytes_read_so_far <= bytes_observed_in_packets_) {
- DCHECK(bytes_read_so_far == bytes_observed_in_packets_);
- return; // No new bytes have arrived.
- }
-
- // We only save distinct times for the first 5 packets.
- const size_t kMaxTimesInArray = 5;
- const size_t kTypicalPacketSize = 1430;
- final_packet_time_ = base::Time::Now();
- while (bytes_read_so_far > bytes_observed_in_packets_) {
- if (++observed_packet_count_ <= kMaxTimesInArray) {
- read_times_.push_back(final_packet_time_);
- }
- bytes_observed_in_packets_ += kTypicalPacketSize;
- }
- // Since packets may not be full, we'll remember the number of bytes we've
- // accounted for in packets thus far.
- bytes_observed_in_packets_ = bytes_read_so_far;
-}
-
bool SdchFilter::InitDecoding(Filter::FilterType filter_type) {
if (decoding_status_ != DECODING_UNINITIALIZED)
return false;
@@ -245,15 +142,13 @@ Filter::FilterStatus SdchFilter::ReadFilteredData(char* dest_buffer,
if (!dest_buffer || available_space <= 0)
return FILTER_ERROR;
- UpdateReadTimes();
-
if (WAITING_FOR_DICTIONARY_SELECTION == decoding_status_) {
FilterStatus status = InitializeDictionary();
if (FILTER_NEED_MORE_DATA == status)
return FILTER_NEED_MORE_DATA;
if (FILTER_ERROR == status) {
DCHECK(DECODING_ERROR == decoding_status_);
- DCHECK(0 == dest_buffer_excess_index_);
+ DCHECK_EQ(0u, dest_buffer_excess_index_);
DCHECK(dest_buffer_excess_.empty());
// This is where we try very hard to do error recovery, and make this
// protocol robust in teh face of proxies that do many different things.
@@ -333,12 +228,12 @@ Filter::FilterStatus SdchFilter::ReadFilteredData(char* dest_buffer,
*dest_len += amount;
dest_buffer += amount;
available_space -= amount;
- DCHECK(available_space >= 0);
+ DCHECK_GE(available_space, 0);
if (available_space <= 0)
return FILTER_OK;
DCHECK(dest_buffer_excess_.empty());
- DCHECK(0 == dest_buffer_excess_index_);
+ DCHECK_EQ(0u, dest_buffer_excess_index_);
if (decoding_status_ != DECODING_IN_PROGRESS) {
if (META_REFRESH_RECOVERY == decoding_status_) {
@@ -388,7 +283,7 @@ Filter::FilterStatus SdchFilter::ReadFilteredData(char* dest_buffer,
Filter::FilterStatus SdchFilter::InitializeDictionary() {
const size_t kServerIdLength = 9; // Dictionary hash plus null from server.
size_t bytes_needed = kServerIdLength - dictionary_hash_.size();
- DCHECK(bytes_needed > 0);
+ DCHECK_GT(bytes_needed, 0u);
if (!next_stream_data_)
return FILTER_NEED_MORE_DATA;
if (static_cast<size_t>(stream_data_len_) < bytes_needed) {
@@ -400,7 +295,7 @@ Filter::FilterStatus SdchFilter::InitializeDictionary() {
dictionary_hash_.append(next_stream_data_, bytes_needed);
DCHECK(kServerIdLength == dictionary_hash_.size());
stream_data_len_ -= bytes_needed;
- DCHECK(0 <= stream_data_len_);
+ DCHECK_LE(0, stream_data_len_);
if (stream_data_len_ > 0)
next_stream_data_ += bytes_needed;
else
diff --git a/net/base/sdch_filter.h b/net/base/sdch_filter.h
index 85dfef6..7255839 100644
--- a/net/base/sdch_filter.h
+++ b/net/base/sdch_filter.h
@@ -56,10 +56,6 @@ class SdchFilter : public Filter {
PASS_THROUGH, // Non-sdch content being passed without alteration.
};
- // Update the read_times_ array with time estimates for the number of packets
- // read so far.
- void UpdateReadTimes();
-
// Identify the suggested dictionary, and initialize underlying decompressor.
Filter::FilterStatus InitializeDictionary();
@@ -106,25 +102,6 @@ class SdchFilter : public Filter {
size_t source_bytes_;
size_t output_bytes_;
- // The number of packets we've observed over the net.
- size_t observed_packet_count_;
-
- // We can't really see when a packet arrives, but we can record how much data
- // was accounted for in previously noted packets. We use this count to (later)
- // identify new packets .
- size_t bytes_observed_in_packets_;
-
- // Since we don't save all packet times in read_times_, we save the last time
- // for use in histograms.
- base::Time final_packet_time_;
-
- // Record of packet processing times for this filter. Used only for stats
- // generations in histograms. There is one time entry each time the byte
- // count receieved exceeds the next multiple of 1430 bytes (a common
- // per-TCP/IP-packet payload size). It is currently only valid for the first
- // 5 packets.
- std::vector<base::Time> read_times_;
-
// Error recovery in content type may add an sdch filter type, in which case
// we should gracefully perform pass through if the format is incorrect, or
// an applicable dictionary can't be found.
@@ -134,11 +111,9 @@ class SdchFilter : public Filter {
// This is used to restrict use of a dictionary to a specific URL or path.
GURL url_;
- // To facilitate histogramming by individual filters, we store the connect
- // time for the corresponding HTTP transaction, as well as whether this time
- // was recalled from a cached entry. The time is approximate, and may be
- // bogus if the data was gotten from cache (i.e., it may be LOOOONG ago).
- const base::Time connect_time_;
+ // To facilitate error recovery, we store whether this content came from a
+ // cache, as we then probably don't have the requsite dictionary, and will
+ // need to induce a meta-refresh.
const bool was_cached_;
// To facilitate error recovery, allow filter to know if content is text/html
diff --git a/net/base/sdch_filter_unittest.cc b/net/base/sdch_filter_unittest.cc
index ca6b266..532bba6 100644
--- a/net/base/sdch_filter_unittest.cc
+++ b/net/base/sdch_filter_unittest.cc
@@ -1184,3 +1184,32 @@ TEST_F(SdchFilterTest, PathMatch) {
EXPECT_FALSE(PathMatch("/ABC", "/abc"));
EXPECT_FALSE(PathMatch("/abc", "/ABC"));
}
+
+// The following are only applicable while we have a latency test in the code,
+// and can be removed when that functionality is stripped.
+TEST_F(SdchFilterTest, LatencyTestControls) {
+ GURL url("http://www.google.com");
+ GURL url2("http://www.google2.com");
+
+ // First make sure we default to false.
+ EXPECT_FALSE(sdch_manager_->AllowLatencyExperiment(url));
+ EXPECT_FALSE(sdch_manager_->AllowLatencyExperiment(url2));
+
+ // That we can set each to true.
+ sdch_manager_->SetAllowLatencyExperiment(url, true);
+ EXPECT_TRUE(sdch_manager_->AllowLatencyExperiment(url));
+ EXPECT_FALSE(sdch_manager_->AllowLatencyExperiment(url2));
+
+ sdch_manager_->SetAllowLatencyExperiment(url2, true);
+ EXPECT_TRUE(sdch_manager_->AllowLatencyExperiment(url));
+ EXPECT_TRUE(sdch_manager_->AllowLatencyExperiment(url2));
+
+ // And can reset them to false.
+ sdch_manager_->SetAllowLatencyExperiment(url, false);
+ EXPECT_FALSE(sdch_manager_->AllowLatencyExperiment(url));
+ EXPECT_TRUE(sdch_manager_->AllowLatencyExperiment(url2));
+
+ sdch_manager_->SetAllowLatencyExperiment(url2, false);
+ EXPECT_FALSE(sdch_manager_->AllowLatencyExperiment(url));
+ EXPECT_FALSE(sdch_manager_->AllowLatencyExperiment(url2));
+}
diff --git a/net/base/sdch_manager.cc b/net/base/sdch_manager.cc
index c6bf950..1ea9e89 100644
--- a/net/base/sdch_manager.cc
+++ b/net/base/sdch_manager.cc
@@ -85,6 +85,7 @@ SdchManager::~SdchManager() {
void SdchManager::BlacklistDomain(const GURL& url) {
if (!global_ )
return;
+ global_->SetAllowLatencyExperiment(url, false);
std::string domain(StringToLowerASCII(url.host()));
int count = global_->blacklisted_domains_[domain];
@@ -104,6 +105,7 @@ void SdchManager::BlacklistDomain(const GURL& url) {
void SdchManager::BlacklistDomainForever(const GURL& url) {
if (!global_ )
return;
+ global_->SetAllowLatencyExperiment(url, false);
std::string domain(StringToLowerASCII(url.host()));
global_->exponential_blacklist_count[domain] = INT_MAX;
@@ -331,8 +333,8 @@ void SdchManager::GenerateHash(const std::string& dictionary_text,
UrlSafeBase64Encode(first_48_bits, client_hash);
UrlSafeBase64Encode(second_48_bits, server_hash);
- DCHECK(server_hash->length() == 8);
- DCHECK(client_hash->length() == 8);
+ DCHECK_EQ(server_hash->length(), 8u);
+ DCHECK_EQ(client_hash->length(), 8u);
}
// static
@@ -507,3 +509,23 @@ bool SdchManager::Dictionary::DomainMatch(const GURL& gurl,
// TODO(jar): This is not precisely a domain match definition.
return gurl.DomainIs(restriction.data(), restriction.size());
}
+
+//------------------------------------------------------------------------------
+// Methods for supporting latency experiments.
+
+bool SdchManager::AllowLatencyExperiment(const GURL& url) const {
+ return allow_latency_experiment_.end() !=
+ allow_latency_experiment_.find(url.host());
+}
+
+void SdchManager::SetAllowLatencyExperiment(const GURL& url, bool enable) {
+ if (enable) {
+ allow_latency_experiment_.insert(url.host());
+ return;
+ }
+ ExperimentSet::iterator it = allow_latency_experiment_.find(url.host());
+ if (allow_latency_experiment_.end() == it)
+ return; // It was already erased, or never allowed.
+ SdchErrorRecovery(LATENCY_TEST_DISALLOWED);
+ allow_latency_experiment_.erase(it);
+}
diff --git a/net/base/sdch_manager.h b/net/base/sdch_manager.h
index 6524e7a..e8329e1 100644
--- a/net/base/sdch_manager.h
+++ b/net/base/sdch_manager.h
@@ -130,13 +130,16 @@ class SdchManager {
// (i.e., be able to be sure all dictionary advertisements are accounted
// for).
- UNFLUSHED_CONTENT = 90, // Possible error in filter chaining.
- MISSING_TIME_STATS = 91, // Should never happen.
- CACHE_DECODED = 92, // No timing stats recorded.
- OVER_10_MINUTES = 93, // No timing stats will be recorded.
- UNINITIALIZED = 94, // Filter never even got initialized.
- PRIOR_TO_DICTIONARY = 95, // We hadn't even parsed a dictionary selector.
- DECODE_ERROR = 96, // Something went wrong during decode.
+ UNFLUSHED_CONTENT = 90, // Possible error in filter chaining.
+ // defunct = 91, // MISSING_TIME_STATS (Should never happen.)
+ CACHE_DECODED = 92, // No timing stats recorded.
+ // defunct = 93, // OVER_10_MINUTES (No timing stats recorded.)
+ UNINITIALIZED = 94, // Filter never even got initialized.
+ PRIOR_TO_DICTIONARY = 95, // We hadn't even parsed a dictionary selector.
+ DECODE_ERROR = 96, // Something went wrong during decode.
+
+ // Problem during the latency test.
+ LATENCY_TEST_DISALLOWED = 100, // SDCH now failing, but it worked before!
MAX_PROBLEM_CODE // Used to bound histogram.
};
@@ -302,8 +305,17 @@ class SdchManager {
static void GenerateHash(const std::string& dictionary_text,
std::string* client_hash, std::string* server_hash);
+ // For Latency testing only, we need to know if we've succeeded in doing a
+ // round trip before starting our comparative tests. If ever we encounter
+ // problems with SDCH, we opt-out of the test unless/until we perform a
+ // complete SDCH decoding.
+ bool AllowLatencyExperiment(const GURL& url) const;
+
+ void SetAllowLatencyExperiment(const GURL& url, bool enable);
+
private:
- typedef std::map<const std::string, int> DomainCounter;
+ typedef std::map<std::string, int> DomainCounter;
+ typedef std::set<std::string> ExperimentSet;
// A map of dictionaries info indexed by the hash that the server provides.
typedef std::map<std::string, Dictionary*> DictionaryMap;
@@ -334,6 +346,10 @@ class SdchManager {
// blacklisting expires.
DomainCounter exponential_blacklist_count;
+ // List of hostnames for which a latency experiment is allowed (because a
+ // round trip test has recently passed).
+ ExperimentSet allow_latency_experiment_;
+
DISALLOW_COPY_AND_ASSIGN(SdchManager);
};
diff --git a/net/url_request/url_request_http_job.cc b/net/url_request/url_request_http_job.cc
index c0a2080..9c81e00 100644
--- a/net/url_request/url_request_http_job.cc
+++ b/net/url_request/url_request_http_job.cc
@@ -10,6 +10,7 @@
#include "base/file_util.h"
#include "base/file_version_info.h"
#include "base/message_loop.h"
+#include "base/rand_util.h"
#include "base/string_util.h"
#include "net/base/cookie_monster.h"
#include "net/base/filter.h"
@@ -63,10 +64,19 @@ URLRequestHttpJob::URLRequestHttpJob(URLRequest* request)
ALLOW_THIS_IN_INITIALIZER_LIST(
read_callback_(this, &URLRequestHttpJob::OnReadCompleted)),
read_in_progress_(false),
- context_(request->context()) {
+ context_(request->context()),
+ sdch_dictionary_advertised_(false),
+ sdch_test_activated_(false),
+ sdch_test_control_(false) {
}
URLRequestHttpJob::~URLRequestHttpJob() {
+ DCHECK(!sdch_test_control_ || !sdch_test_activated_);
+ if (sdch_test_control_)
+ RecordPacketStats(SDCH_EXPERIMENT_HOLDBACK);
+ if (sdch_test_activated_)
+ RecordPacketStats(SDCH_EXPERIMENT_DECODE);
+
if (sdch_dictionary_url_.is_valid()) {
// Prior to reaching the destructor, request_ has been set to a NULL
// pointer, so request_->url() is no longer valid in the destructor, and we
@@ -215,8 +225,7 @@ bool URLRequestHttpJob::GetContentEncodings(
}
bool URLRequestHttpJob::IsSdchResponse() const {
- return response_info_ &&
- (request_info_.load_flags & net::LOAD_SDCH_DICTIONARY_ADVERTISED);
+ return sdch_dictionary_advertised_;
}
bool URLRequestHttpJob::IsRedirectResponse(GURL* location,
@@ -546,29 +555,54 @@ void URLRequestHttpJob::StartTransaction() {
}
void URLRequestHttpJob::AddExtraHeaders() {
+ // TODO(jar): Consider optimizing away SDCH advertising bytes when the URL is
+ // probably an img or such (and SDCH encoding is not likely).
+ bool advertise_sdch = SdchManager::Global() &&
+ SdchManager::Global()->IsInSupportedDomain(request_->url());
+ std::string avail_dictionaries;
+ if (advertise_sdch) {
+ SdchManager::Global()->GetAvailDictionaryList(request_->url(),
+ &avail_dictionaries);
+
+ // The AllowLatencyExperiment() is only true if we've successfully done a
+ // full SDCH compression recently in this browser session for this host.
+ // Note that for this path, there might be no applicable dictionaries, and
+ // hence we can't participate in the experiment.
+ if (!avail_dictionaries.empty() &&
+ SdchManager::Global()->AllowLatencyExperiment(request_->url())) {
+ // We are participating in the test (or control), and hence we'll
+ // eventually record statistics via either SDCH_EXPERIMENT_DECODE or
+ // SDCH_EXPERIMENT_HOLDBACK, and we'll need some packet timing data.
+ EnablePacketCounting(kSdchPacketHistogramCount);
+ if (base::RandDouble() < .5) {
+ sdch_test_control_ = true;
+ advertise_sdch = false;
+ } else {
+ sdch_test_activated_ = true;
+ }
+ }
+ }
+
// Supply Accept-Encoding headers first so that it is more likely that they
// will be in the first transmitted packet. This can sometimes make it easier
// to filter and analyze the streams to assure that a proxy has not damaged
// these headers. Some proxies deliberately corrupt Accept-Encoding headers.
- if (!SdchManager::Global() ||
- !SdchManager::Global()->IsInSupportedDomain(request_->url())) {
+ if (!advertise_sdch) {
// Tell the server what compression formats we support (other than SDCH).
request_info_.extra_headers += "Accept-Encoding: gzip,deflate,bzip2\r\n";
} else {
- // Supply SDCH related headers, as well as accepting that encoding.
- // Tell the server what compression formats we support.
+ // Include SDCH in acceptable list.
request_info_.extra_headers += "Accept-Encoding: "
"gzip,deflate,bzip2,sdch\r\n";
-
- // TODO(jar): See if it is worth optimizing away these bytes when the URL is
- // probably an img or such. (and SDCH encoding is not likely).
- std::string avail_dictionaries;
- SdchManager::Global()->GetAvailDictionaryList(request_->url(),
- &avail_dictionaries);
if (!avail_dictionaries.empty()) {
request_info_.extra_headers += "Avail-Dictionary: "
+ avail_dictionaries + "\r\n";
- request_info_.load_flags |= net::LOAD_SDCH_DICTIONARY_ADVERTISED;
+ sdch_dictionary_advertised_ = true;
+ // Since we're tagging this transaction as advertising a dictionary, we'll
+ // definately employ an SDCH filter (or tentative sdch filter) when we get
+ // a response. When done, we'll record histograms via SDCH_DECODE or
+ // SDCH_PASSTHROUGH. Hence we need to record packet arrival times.
+ EnablePacketCounting(kSdchPacketHistogramCount);
}
}
diff --git a/net/url_request/url_request_http_job.h b/net/url_request/url_request_http_job.h
index 19ecdf86..57764ef 100644
--- a/net/url_request/url_request_http_job.h
+++ b/net/url_request/url_request_http_job.h
@@ -93,6 +93,18 @@ class URLRequestHttpJob : public URLRequestJob {
// before us.
scoped_refptr<URLRequestContext> context_;
+ // Indicated if an SDCH dictionary was advertised, and hence an SDCH
+ // compressed response is expected. We use this to help detect (accidental?)
+ // proxy corruption of a response, which sometimes marks SDCH content as
+ // having no content encoding <oops>.
+ bool sdch_dictionary_advertised_;
+
+ // For SDCH latency experiments, when we are able to do SDCH, we may enable
+ // either an SDCH latency test xor a pass through test. The following bools
+ // indicate what we decided on for this instance.
+ bool sdch_test_activated_; // Advertising a dictionary for sdch.
+ bool sdch_test_control_; // Not even accepting-content sdch.
+
DISALLOW_COPY_AND_ASSIGN(URLRequestHttpJob);
};
diff --git a/net/url_request/url_request_job.cc b/net/url_request/url_request_job.cc
index 544913d..0b005a5 100644
--- a/net/url_request/url_request_job.cc
+++ b/net/url_request/url_request_job.cc
@@ -30,7 +30,11 @@ URLRequestJob::URLRequestJob(URLRequest* request)
read_buffer_len_(0),
has_handled_response_(false),
expected_content_size_(-1),
- filter_input_byte_count_(0) {
+ packet_timing_enabled_(false),
+ filter_input_byte_count_(0),
+ bytes_observed_in_packets_(0),
+ max_packets_timed_(0),
+ observed_packet_count_(0) {
load_flags_ = request_->load_flags();
is_profiling_ = request->enable_profiling();
if (is_profiling()) {
@@ -41,6 +45,10 @@ URLRequestJob::URLRequestJob(URLRequest* request)
}
URLRequestJob::~URLRequestJob() {
+ // Cause filter chain to be destroyed now, so that any histogram requests can
+ // be made before we are torn down.
+ filter_.reset(NULL);
+
g_url_request_job_tracker.RemoveJob(this);
}
@@ -94,7 +102,7 @@ void URLRequestJob::ContinueDespiteLastError() {
}
int64 URLRequestJob::GetByteReadCount() const {
- return filter_input_byte_count_ ;
+ return filter_input_byte_count_;
}
bool URLRequestJob::GetURL(GURL* gurl) const {
@@ -522,6 +530,7 @@ void URLRequestJob::RecordBytesRead(int bytes_read) {
metrics_->total_bytes_read_ += bytes_read;
}
filter_input_byte_count_ += bytes_read;
+ UpdatePacketReadTimes(); // Facilitate stats recording if it is active.
g_url_request_job_tracker.OnBytesRead(this, bytes_read);
}
@@ -537,3 +546,159 @@ void URLRequestJob::SetStatus(const URLRequestStatus &status) {
if (request_)
request_->set_status(status);
}
+
+void URLRequestJob::UpdatePacketReadTimes() {
+ if (!packet_timing_enabled_)
+ return;
+
+ if (filter_input_byte_count_ <= bytes_observed_in_packets_) {
+ DCHECK(filter_input_byte_count_ == bytes_observed_in_packets_);
+ return; // No new bytes have arrived.
+ }
+
+ if (!bytes_observed_in_packets_)
+ request_time_snapshot_ = GetRequestTime();
+
+ final_packet_time_ = base::Time::Now();
+ const size_t kTypicalPacketSize = 1430;
+ while (filter_input_byte_count_ > bytes_observed_in_packets_) {
+ ++observed_packet_count_;
+ if (max_packets_timed_ > packet_times_.size()) {
+ packet_times_.push_back(final_packet_time_);
+ DCHECK(static_cast<size_t>(observed_packet_count_) ==
+ packet_times_.size());
+ }
+ bytes_observed_in_packets_ += kTypicalPacketSize;
+ }
+ // Since packets may not be full, we'll remember the number of bytes we've
+ // accounted for in packets thus far.
+ bytes_observed_in_packets_ = filter_input_byte_count_;
+}
+
+void URLRequestJob::EnablePacketCounting(size_t max_packets_timed) {
+ if (max_packets_timed_ < max_packets_timed)
+ max_packets_timed_ = max_packets_timed;
+ packet_timing_enabled_ = true;
+}
+
+void URLRequestJob::RecordPacketStats(StatisticSelector statistic) const {
+ if (!packet_timing_enabled_ || (final_packet_time_ == base::Time()))
+ return;
+ base::TimeDelta duration = final_packet_time_ - request_time_snapshot_;
+ switch (statistic) {
+ case SDCH_DECODE: {
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Decode_Latency_F_a", duration,
+ base::TimeDelta::FromMilliseconds(20),
+ base::TimeDelta::FromMinutes(10), 100);
+ UMA_HISTOGRAM_COUNTS_100("Sdch.Network_Decode_Packets_b",
+ static_cast<int>(observed_packet_count_));
+ UMA_HISTOGRAM_COUNTS("Sdch.Network_Decode_Bytes_Processed_a",
+ static_cast<int>(bytes_observed_in_packets_));
+ if (packet_times_.empty())
+ return;
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Decode_1st_To_Last_a",
+ final_packet_time_ - packet_times_[0],
+ base::TimeDelta::FromMilliseconds(20),
+ base::TimeDelta::FromMinutes(10), 100);
+
+ DCHECK(max_packets_timed_ >= kSdchPacketHistogramCount);
+ DCHECK(kSdchPacketHistogramCount > 4);
+ if (packet_times_.size() <= 4)
+ return;
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Decode_1st_To_2nd_c",
+ packet_times_[1] - packet_times_[0],
+ base::TimeDelta::FromMilliseconds(1),
+ base::TimeDelta::FromSeconds(10), 100);
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Decode_2nd_To_3rd_c",
+ packet_times_[2] - packet_times_[1],
+ base::TimeDelta::FromMilliseconds(1),
+ base::TimeDelta::FromSeconds(10), 100);
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Decode_3rd_To_4th_c",
+ packet_times_[3] - packet_times_[2],
+ base::TimeDelta::FromMilliseconds(1),
+ base::TimeDelta::FromSeconds(10), 100);
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Decode_4th_To_5th_c",
+ packet_times_[4] - packet_times_[3],
+ base::TimeDelta::FromMilliseconds(1),
+ base::TimeDelta::FromSeconds(10), 100);
+ return;
+ }
+ case SDCH_PASSTHROUGH: {
+ // Despite advertising a dictionary, we handled non-sdch compressed
+ // content.
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Pass-through_Latency_F_a",
+ duration,
+ base::TimeDelta::FromMilliseconds(20),
+ base::TimeDelta::FromMinutes(10), 100);
+ UMA_HISTOGRAM_COUNTS_100("Sdch.Network_Pass-through_Packets_b",
+ observed_packet_count_);
+ if (packet_times_.empty())
+ return;
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Pass-through_1st_To_Last_a",
+ final_packet_time_ - packet_times_[0],
+ base::TimeDelta::FromMilliseconds(20),
+ base::TimeDelta::FromMinutes(10), 100);
+ DCHECK(max_packets_timed_ >= kSdchPacketHistogramCount);
+ DCHECK(kSdchPacketHistogramCount > 4);
+ if (packet_times_.size() <= 4)
+ return;
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Pass-through_1st_To_2nd_c",
+ packet_times_[1] - packet_times_[0],
+ base::TimeDelta::FromMilliseconds(1),
+ base::TimeDelta::FromSeconds(10), 100);
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Pass-through_2nd_To_3rd_c",
+ packet_times_[2] - packet_times_[1],
+ base::TimeDelta::FromMilliseconds(1),
+ base::TimeDelta::FromSeconds(10), 100);
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Pass-through_3rd_To_4th_c",
+ packet_times_[3] - packet_times_[2],
+ base::TimeDelta::FromMilliseconds(1),
+ base::TimeDelta::FromSeconds(10), 100);
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Network_Pass-through_4th_To_5th_c",
+ packet_times_[4] - packet_times_[3],
+ base::TimeDelta::FromMilliseconds(1),
+ base::TimeDelta::FromSeconds(10), 100);
+ return;
+ }
+
+ case SDCH_EXPERIMENT_DECODE: {
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Experiment_Decode",
+ duration,
+ base::TimeDelta::FromMilliseconds(20),
+ base::TimeDelta::FromMinutes(10), 100);
+ // We already provided interpacket histograms above in the SDCH_DECODE
+ // case, so we don't need them here.
+ return;
+ }
+ case SDCH_EXPERIMENT_HOLDBACK: {
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Experiment_Holdback",
+ duration,
+ base::TimeDelta::FromMilliseconds(20),
+ base::TimeDelta::FromMinutes(10), 100);
+ DCHECK(max_packets_timed_ >= kSdchPacketHistogramCount);
+ DCHECK(kSdchPacketHistogramCount > 4);
+ if (packet_times_.size() <= 4)
+ return;
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Experiment_Holdback_1st_To_2nd_c",
+ packet_times_[1] - packet_times_[0],
+ base::TimeDelta::FromMilliseconds(1),
+ base::TimeDelta::FromSeconds(10), 100);
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Experiment_Holdback_2nd_To_3rd_c",
+ packet_times_[2] - packet_times_[1],
+ base::TimeDelta::FromMilliseconds(1),
+ base::TimeDelta::FromSeconds(10), 100);
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Experiment_Holdback_3rd_To_4th_c",
+ packet_times_[3] - packet_times_[2],
+ base::TimeDelta::FromMilliseconds(1),
+ base::TimeDelta::FromSeconds(10), 100);
+ UMA_HISTOGRAM_CLIPPED_TIMES("Sdch.Experiment_Holdback_4th_To_5th_c",
+ packet_times_[4] - packet_times_[3],
+ base::TimeDelta::FromMilliseconds(1),
+ base::TimeDelta::FromSeconds(10), 100);
+ return;
+ }
+ default:
+ NOTREACHED();
+ return;
+ }
+}
diff --git a/net/url_request/url_request_job.h b/net/url_request/url_request_job.h
index 133af7e..5ecb6d0 100644
--- a/net/url_request/url_request_job.h
+++ b/net/url_request/url_request_job.h
@@ -10,6 +10,7 @@
#include "base/ref_counted.h"
#include "base/scoped_ptr.h"
+#include "base/time.h"
#include "net/base/filter.h"
#include "net/base/load_states.h"
@@ -31,6 +32,13 @@ class URLRequestJobMetrics;
class URLRequestJob : public base::RefCountedThreadSafe<URLRequestJob>,
public FilterContext {
public:
+ // When histogramming results related to SDCH and/or an SDCH latency test, the
+ // number of packets for which we need to record arrival times so as to
+ // calculate interpacket latencies. We currently are only looking at the
+ // first few packets, as we're monitoring the impact of the initial TCP
+ // congestion window on stalling of transmissions.
+ static const size_t kSdchPacketHistogramCount = 5;
+
explicit URLRequestJob(URLRequest* request);
virtual ~URLRequestJob();
@@ -201,6 +209,7 @@ class URLRequestJob : public base::RefCountedThreadSafe<URLRequestJob>,
virtual int64 GetByteReadCount() const;
virtual int GetResponseCode() const { return -1; }
virtual int GetInputStreamBufferSize() const { return kFilterBufSize; }
+ virtual void RecordPacketStats(StatisticSelector statistic) const;
protected:
// Notifies the job that headers have been received.
@@ -272,6 +281,10 @@ class URLRequestJob : public base::RefCountedThreadSafe<URLRequestJob>,
// Contains IO performance measurement when profiling is enabled.
scoped_ptr<URLRequestJobMetrics> metrics_;
+ // Facilitate histogramming by turning on packet counting.
+ // If called more than once, the largest value will be used.
+ void EnablePacketCounting(size_t max_packets_timed);
+
private:
// Size of filter input buffers used by this class.
static const int kFilterBufSize;
@@ -290,11 +303,13 @@ class URLRequestJob : public base::RefCountedThreadSafe<URLRequestJob>,
// have been read.
void RecordBytesRead(int bytes_read);
- private:
// Called to query whether there is data available in the filter to be read
// out.
bool FilterHasData();
+ // Record packet arrival times for possible use in histograms.
+ void UpdatePacketReadTimes();
+
// Indicates that the job is done producing data, either it has completed
// all the data or an error has been encountered. Set exclusively by
// NotifyDone so that it is kept in sync with the request.
@@ -324,11 +339,48 @@ class URLRequestJob : public base::RefCountedThreadSafe<URLRequestJob>,
// Expected content size
int64 expected_content_size_;
+ //----------------------------------------------------------------------------
+ // Data used for statistics gathering in some instances. This data is only
+ // used for histograms etc., and is not required. It is optionally gathered
+ // based on the settings of several control variables.
+
+ // Enable recording of packet arrival times for histogramming.
+ bool packet_timing_enabled_;
+
+ // TODO(jar): improve the quality of the gathered info by gathering most times
+ // at a lower point in the network stack, assuring we have actual packet
+ // boundaries, rather than approximations. Also note that input byte count
+ // as gathered here is post-SSL, and post-cache-fetch, and does not reflect
+ // true packet arrival times in such cases.
+
// Total number of bytes read from network (or cache) and and typically handed
// to filter to process. Used to histogram compression ratios, and error
// recovery scenarios in filters.
int64 filter_input_byte_count_;
+ // The number of bytes that have been accounted for in packets (where some of
+ // those packets may possibly have had their time of arrival recorded).
+ int64 bytes_observed_in_packets_;
+
+ // Limit on the size of the array packet_times_. This can be set to
+ // zero, and then no packet times will be gathered.
+ size_t max_packets_timed_;
+
+ // Arrival times for some of the first few packets.
+ std::vector<base::Time> packet_times_;
+
+ // The request time may not be available when we are being destroyed, so we
+ // snapshot it early on.
+ base::Time request_time_snapshot_;
+
+ // Since we don't save all packet times in packet_times_, we save the
+ // last time for use in histograms.
+ base::Time final_packet_time_;
+
+ // The count of the number of packets, some of which may not have been timed.
+ // We're ignoring overflow, as 1430 x 2^31 is a LOT of bytes.
+ int observed_packet_count_;
+
DISALLOW_COPY_AND_ASSIGN(URLRequestJob);
};