summaryrefslogtreecommitdiffstats
path: root/net/base
diff options
context:
space:
mode:
authorjar@google.com <jar@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2008-11-15 02:39:45 +0000
committerjar@google.com <jar@google.com@0039d316-1c4b-4281-b951-d872f2087c98>2008-11-15 02:39:45 +0000
commit4ff391fcc5b88061912f73ec570f389100cc5e2a (patch)
tree91d3eb1a894a71b64ff3769beb058fc274ce44fb /net/base
parentdf4ac237348f1aebfe7ca345e2bc6375fd51a1dc (diff)
downloadchromium_src-4ff391fcc5b88061912f73ec570f389100cc5e2a.zip
chromium_src-4ff391fcc5b88061912f73ec570f389100cc5e2a.tar.gz
chromium_src-4ff391fcc5b88061912f73ec570f389100cc5e2a.tar.bz2
Open up SDCH for all sites, in preparation for latency tests
Since the stability test is going well (so far) on ".google.com," this change will open up support for SDCH compression to all sites. This will allow for more international testing as well. I tightened down the restrictions on who can set up a dictionary for a given domain. I'm pretty sure it is at least as restrictive as the current SDCH spec. I also supplied a default expiration time for using an SDCH dictionary at 30 days (as per SDCH spec). To be safer with the latency histograms, I also tightened the period of time we measure, on the off chance that the page some-how asks for more bytes after everything has been read/rendered. r=openvcdiff,hunar Review URL: http://codereview.chromium.org/11009 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@5529 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'net/base')
-rw-r--r--net/base/sdch_filter.cc10
-rw-r--r--net/base/sdch_filter_unittest.cc63
-rw-r--r--net/base/sdch_manager.cc15
3 files changed, 81 insertions, 7 deletions
diff --git a/net/base/sdch_filter.cc b/net/base/sdch_filter.cc
index 9678ae4..46e6af8 100644
--- a/net/base/sdch_filter.cc
+++ b/net/base/sdch_filter.cc
@@ -42,7 +42,7 @@ SdchFilter::~SdchFilter() {
base::TimeDelta duration = time_of_last_read_ - connect_time();
// Note: connect_time may be somewhat incorrect if this is cached data, as
// it will reflect the time the connect was done for the original read :-(.
- // To avoid any chances of overflow (and since SDCH is meant to primarilly
+ // To avoid any chances of overflow, and since SDCH is meant to primarilly
// handle short downloads, we'll restrict what results we log to effectively
// discard bogus large numbers. Note that IF the number is large enough, it
// would DCHECK in histogram as the square of the value is summed. The
@@ -50,9 +50,9 @@ SdchFilter::~SdchFilter() {
// seconds, so the discarded data would not be that readable anyway.
if (30 >= duration.InSeconds()) {
if (DECODING_IN_PROGRESS == decoding_status_)
- UMA_HISTOGRAM_TIMES(L"Sdch.Transit_Latency", duration);
+ UMA_HISTOGRAM_TIMES(L"Sdch.Transit_Latency_2", duration);
if (PASS_THROUGH == decoding_status_)
- UMA_HISTOGRAM_TIMES(L"Sdch.Transit_Pass-through_Latency", duration);
+ UMA_HISTOGRAM_TIMES(L"Sdch.Transit_Pass-through_Latency_2", duration);
}
}
@@ -90,7 +90,9 @@ Filter::FilterStatus SdchFilter::ReadFilteredData(char* dest_buffer,
if (!dest_buffer || available_space <= 0)
return FILTER_ERROR;
- time_of_last_read_ = base::Time::Now();
+ // Don't update when we're called to just flush out our internal buffers.
+ if (next_stream_data_ && stream_data_len_ > 0)
+ time_of_last_read_ = base::Time::Now();
if (WAITING_FOR_DICTIONARY_SELECTION == decoding_status_) {
FilterStatus status = InitializeDictionary();
diff --git a/net/base/sdch_filter_unittest.cc b/net/base/sdch_filter_unittest.cc
index 58a32e8..81fc3e9 100644
--- a/net/base/sdch_filter_unittest.cc
+++ b/net/base/sdch_filter_unittest.cc
@@ -49,7 +49,8 @@ class SdchFilterTest : public testing::Test {
scoped_ptr<SdchManager> sdch_manager_; // A singleton database.
};
-std::string SdchFilterTest::NewSdchCompressedData(const std::string dictionary) {
+std::string SdchFilterTest::NewSdchCompressedData(
+ const std::string dictionary) {
std::string client_hash;
std::string server_hash;
SdchManager::GenerateHash(dictionary, &client_hash, &server_hash);
@@ -581,3 +582,63 @@ TEST_F(SdchFilterTest, DomainBlacklisting) {
EXPECT_FALSE(SdchManager::Global()->IsInSupportedDomain(test_url));
EXPECT_FALSE(SdchManager::Global()->IsInSupportedDomain(google_url));
}
+
+TEST_F(SdchFilterTest, CanSetExactMatchDictionary) {
+ std::string dictionary_domain("x.y.z.google.com");
+ std::string dictionary_text(NewSdchDictionary(dictionary_domain));
+
+ // Perfect match should work.
+ EXPECT_TRUE(sdch_manager_->AddSdchDictionary(dictionary_text,
+ GURL("http://" + dictionary_domain)));
+}
+
+TEST_F(SdchFilterTest, FailToSetDomainMismatchDictionary) {
+ std::string dictionary_domain("x.y.z.google.com");
+ std::string dictionary_text(NewSdchDictionary(dictionary_domain));
+
+ // Fail the "domain match" requirement.
+ EXPECT_FALSE(sdch_manager_->AddSdchDictionary(dictionary_text,
+ GURL("http://y.z.google.com")));
+}
+
+TEST_F(SdchFilterTest, FailToSetDotHostPrefixDomainDictionary) {
+ std::string dictionary_domain("x.y.z.google.com");
+ std::string dictionary_text(NewSdchDictionary(dictionary_domain));
+
+ // Fail the HD with D being the domain and H having a dot requirement.
+ EXPECT_FALSE(sdch_manager_->AddSdchDictionary(dictionary_text,
+ GURL("http://w.x.y.z.google.com")));
+}
+
+TEST_F(SdchFilterTest, FailToSetRepeatPrefixWithDotDictionary) {
+ // Make sure that a prefix that matches the domain postfix won't confuse
+ // the validation checks.
+ std::string dictionary_domain("www.google.com");
+ std::string dictionary_text(NewSdchDictionary(dictionary_domain));
+
+ // Fail the HD with D being the domain and H having a dot requirement.
+ EXPECT_FALSE(sdch_manager_->AddSdchDictionary(dictionary_text,
+ GURL("http://www.google.com.www.google.com")));
+}
+
+TEST_F(SdchFilterTest, CanSetLeadingDotDomainDictionary) {
+ // Make sure that a prefix that matches the domain postfix won't confuse
+ // the validation checks.
+ std::string dictionary_domain(".google.com");
+ std::string dictionary_text(NewSdchDictionary(dictionary_domain));
+
+ // Fail the HD with D being the domain and H having a dot requirement.
+ EXPECT_TRUE(sdch_manager_->AddSdchDictionary(dictionary_text,
+ GURL("http://www.google.com")));
+}
+
+// Make sure the order of the tests is not helping us or confusing things.
+// See test CanSetExactMatchDictionary above for first try.
+TEST_F(SdchFilterTest, CanStillSetExactMatchDictionary) {
+ std::string dictionary_domain("x.y.z.google.com");
+ std::string dictionary_text(NewSdchDictionary(dictionary_domain));
+
+ // Perfect match should *STILL* work.
+ EXPECT_TRUE(sdch_manager_->AddSdchDictionary(dictionary_text,
+ GURL("http://" + dictionary_domain)));
+}
diff --git a/net/base/sdch_manager.cc b/net/base/sdch_manager.cc
index 08c9d1c2..005d300 100644
--- a/net/base/sdch_manager.cc
+++ b/net/base/sdch_manager.cc
@@ -122,7 +122,7 @@ bool SdchManager::AddSdchDictionary(const std::string& dictionary_text,
std::string domain, path;
std::set<int> ports;
- Time expiration;
+ Time expiration(Time::Now() + TimeDelta::FromDays(30));
size_t header_end = dictionary_text.find("\n\n");
if (std::string::npos == header_end) {
@@ -300,7 +300,16 @@ bool SdchManager::Dictionary::CanSet(const std::string& domain,
return false;
}
- // TODO(jar): Enforce item 4 above.
+ std::string referrer_url_host = dictionary_url.host();
+ size_t postfix_domain_index = referrer_url_host.rfind(domain);
+ // See if it is indeed a postfix, or just an internal string.
+ if (referrer_url_host.size() == postfix_domain_index + domain.size()) {
+ // It is a postfix... so check to see if there's a dot in the prefix.
+ size_t end_of_host_index = referrer_url_host.find_first_of('.');
+ if (referrer_url_host.npos != end_of_host_index &&
+ end_of_host_index < postfix_domain_index)
+ return false;
+ }
if (!ports.empty()
&& 0 == ports.count(dictionary_url.EffectiveIntPort())) {
@@ -365,6 +374,8 @@ bool SdchManager::Dictionary::CanAdvertise(const GURL& target_url) {
return false;
if (target_url.SchemeIsSecure())
return false;
+ if (Time::Now() > expiration_)
+ return false;
return true;
}