diff options
author | jar@chromium.org <jar@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2010-03-20 05:41:42 +0000 |
---|---|---|
committer | jar@chromium.org <jar@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98> | 2010-03-20 05:41:42 +0000 |
commit | f9f4841b14a9f309ce5ee613f0d4de6afad88767 (patch) | |
tree | 390a79375cb846d9c0aba2b708532df92464ec26 /chrome/browser | |
parent | 5fbd065bd1025374e52d3c939fa7f668b79c153a (diff) | |
download | chromium_src-f9f4841b14a9f309ce5ee613f0d4de6afad88767.zip chromium_src-f9f4841b14a9f309ce5ee613f0d4de6afad88767.tar.gz chromium_src-f9f4841b14a9f309ce5ee613f0d4de6afad88767.tar.bz2 |
2 experiments: DNS prefetch limit concurrency: TCP split a packet
Some firewalls apparently try to preclude a "syn flood to host" by limiting
the number of syn's (used to open a TCP/IP socket) that are outstanding
without having received a syn-ack. Presumably this is to prevent a user
from participating in a syn-flood attack (which traditional sends a lot
of syn packets, with false return addresses, resulting in no responses).
Apparently this firewall technology has in some cases been extended
to include UDP sessions for which there has been no response, and this
may include DNS resolutions. Since the prefetcher currently resolves
as many as 8 names simultaneously, this is remarkably close to the
reported threshold of 10 un-answered connections. This test attempts
to limit connections to 2, 4, or 6, so that we can see if this helps
users.
In TCP, the RTO remains (under windows) at a full 3 seconds until after the
first ack is received. As a result, if the first data packet sent (after
the SYN) is lost, then TCP won't resend until after 3 seconds without an ack.
As a test, we split up the first packet into two parts (the second part
containing only one byte). This is done as an A/B test, and we'll see
if we get a measurable improvement in page-load-time latency.
Finally, to get better page load stats, I adjusted the PLT histograms
so that we record a "final" time for abandoned pages when they are
closed (even if they didn't finish rendering, etc.). This should give
a much more fair PLT comparison for all network latency experiments.
BUG=3041
BUG=12754
r=mbelshe,darin
Review URL: http://codereview.chromium.org/1088002
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@42181 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'chrome/browser')
-rw-r--r-- | chrome/browser/browser_main.cc | 16 | ||||
-rw-r--r-- | chrome/browser/net/dns_global.cc | 32 |
2 files changed, 44 insertions, 4 deletions
diff --git a/chrome/browser/browser_main.cc b/chrome/browser/browser_main.cc index 0085c91..262397a 100644 --- a/chrome/browser/browser_main.cc +++ b/chrome/browser/browser_main.cc @@ -417,9 +417,23 @@ int BrowserMain(const MainFunctionParams& parameters) { SystemMonitor system_monitor; HighResolutionTimerManager hi_res_timer_manager; - // Initialize statistical testing infrastructure. + // Initialize statistical testing infrastructure for entire browser. FieldTrialList field_trial; + // Set up a field trial to see if splitting the first transmitted packet helps + // with latency. + { + FieldTrial::Probability kDivisor = 100; + FieldTrial* trial = new FieldTrial("PacketSplit", kDivisor); + // For each option (i.e., non-default), we have a fixed probability. + FieldTrial::Probability kProbabilityPerGroup = 10; // 10% probability. + int split = trial->AppendGroup("_first_packet_split", kProbabilityPerGroup); + DCHECK_EQ(split, 0); + int intact = trial->AppendGroup("_first_packet_intact", + FieldTrial::kAllRemainingProbability); + DCHECK_EQ(intact, 1); + } + std::wstring app_name = chrome::kBrowserAppName; std::string thread_name_string = WideToASCII(app_name + L"_BrowserMain"); diff --git a/chrome/browser/net/dns_global.cc b/chrome/browser/net/dns_global.cc index a61b85d..f7fe990 100644 --- a/chrome/browser/net/dns_global.cc +++ b/chrome/browser/net/dns_global.cc @@ -543,13 +543,21 @@ DnsGlobalInit::DnsGlobalInit(PrefService* user_prefs, // latency of page loads. FieldTrial::Probability kDivisor = 100; // For each option (i.e., non-default), we have a fixed probability. - FieldTrial::Probability kProbabilityPerGroup = 3; // 3% probability. + FieldTrial::Probability kProbabilityPerGroup = 5; // 5% probability. trial_ = new FieldTrial("DnsImpact", kDivisor); // First option is to disable prefetching completely. int disabled_prefetch = trial_->AppendGroup("_disabled_prefetch", kProbabilityPerGroup); + + + // We're running two experiments at the same time. The first set of trials + // modulates the delay-time until we declare a congestion event (and purge + // our queue). The second modulates the number of concurrent resolutions + // we do at any time. Users are in exactly one trial (or the default) during + // any one run, and hence only one experiment at a time. + // Experiment 1: // Set congestion detection at 250, 500, or 750ms, rather than the 1 second // default. int max_250ms_prefetch = trial_->AppendGroup("_max_250ms_queue_prefetch", @@ -561,6 +569,16 @@ DnsGlobalInit::DnsGlobalInit(PrefService* user_prefs, // Set congestion detection at 2 seconds instead of the 1 second default. int max_2s_prefetch = trial_->AppendGroup("_max_2s_queue_prefetch", kProbabilityPerGroup); + // Experiment 2: + // Set max simultaneous resoultions to 2, 4, or 6, and scale the congestion + // limit proportionally (so we don't impact average probability of asserting + // congesion very much). + int max_2_concurrent_prefetch = trial_->AppendGroup( + "_max_2 concurrent_prefetch", kProbabilityPerGroup); + int max_4_concurrent_prefetch = trial_->AppendGroup( + "_max_4 concurrent_prefetch", kProbabilityPerGroup); + int max_6_concurrent_prefetch = trial_->AppendGroup( + "_max_6 concurrent_prefetch", kProbabilityPerGroup); trial_->AppendGroup("_default_enabled_prefetch", FieldTrial::kAllRemainingProbability); @@ -571,9 +589,7 @@ DnsGlobalInit::DnsGlobalInit(PrefService* user_prefs, if (trial_->group() != disabled_prefetch) { // Initialize the DNS prefetch system. - size_t max_concurrent = kMaxPrefetchConcurrentLookups; - int max_queueing_delay_ms = kMaxPrefetchQueueingDelayMs; if (trial_->group() == max_250ms_prefetch) @@ -584,6 +600,16 @@ DnsGlobalInit::DnsGlobalInit(PrefService* user_prefs, max_queueing_delay_ms = 750; else if (trial_->group() == max_2s_prefetch) max_queueing_delay_ms = 2000; + if (trial_->group() == max_2_concurrent_prefetch) + max_concurrent = 2; + else if (trial_->group() == max_4_concurrent_prefetch) + max_concurrent = 4; + else if (trial_->group() == max_6_concurrent_prefetch) + max_concurrent = 6; + // Scale acceptable delay so we don't cause congestion limits to fire as + // we modulate max_concurrent (*if* we are modulating it at all). + max_queueing_delay_ms = (kMaxPrefetchQueueingDelayMs * + kMaxPrefetchConcurrentLookups) / max_concurrent; TimeDelta max_queueing_delay( TimeDelta::FromMilliseconds(max_queueing_delay_ms)); |