summaryrefslogtreecommitdiffstats
path: root/chrome/browser
diff options
context:
space:
mode:
authorjar@chromium.org <jar@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-10-29 06:33:59 +0000
committerjar@chromium.org <jar@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-10-29 06:33:59 +0000
commit755a93358a4216c62c1055d1c33c45df8e1b7857 (patch)
treea452f5d467b5fbc157cb4b213073063b142d9147 /chrome/browser
parentdff8c1108d0b414ea73ae0e0d0d904e09856cdd6 (diff)
downloadchromium_src-755a93358a4216c62c1055d1c33c45df8e1b7857.zip
chromium_src-755a93358a4216c62c1055d1c33c45df8e1b7857.tar.gz
chromium_src-755a93358a4216c62c1055d1c33c45df8e1b7857.tar.bz2
Restrict total parallel DNS resolutions
A/B tests suggest that large numbers of parallel resolutions may cause the DNS failure rate (re: ratio of "host not found" to "found") to rise. To stay safely away from that threshold, this change lowers the maximum parallel resolutions to 8, and restricts the speculative resolutions to 3. We are also running A/B tests which will look at the impact of modulating either of these values. BUG=3041 r=eroman Review URL: http://codereview.chromium.org/4111004 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@64369 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'chrome/browser')
-rw-r--r--chrome/browser/io_thread.cc15
-rw-r--r--chrome/browser/io_thread.h7
-rw-r--r--chrome/browser/net/predictor.h5
-rw-r--r--chrome/browser/net/predictor_api.cc78
-rw-r--r--chrome/browser/net/predictor_api.h9
-rw-r--r--chrome/browser/net/predictor_unittest.cc19
6 files changed, 84 insertions, 49 deletions
diff --git a/chrome/browser/io_thread.cc b/chrome/browser/io_thread.cc
index f1cb24b..3c7fdc6 100644
--- a/chrome/browser/io_thread.cc
+++ b/chrome/browser/io_thread.cc
@@ -68,7 +68,9 @@ net::HostResolver* CreateGlobalHostResolver(net::NetLog* net_log) {
// List options with different counts.
// Firefox limits total to 8 in parallel, and default is currently 50.
int parallel_6 = trial->AppendGroup("parallel_6", kProbabilityPerGroup);
+ int parallel_7 = trial->AppendGroup("parallel_7", kProbabilityPerGroup);
int parallel_8 = trial->AppendGroup("parallel_8", kProbabilityPerGroup);
+ int parallel_9 = trial->AppendGroup("parallel_9", kProbabilityPerGroup);
int parallel_10 = trial->AppendGroup("parallel_10", kProbabilityPerGroup);
int parallel_14 = trial->AppendGroup("parallel_14", kProbabilityPerGroup);
int parallel_20 = trial->AppendGroup("parallel_20", kProbabilityPerGroup);
@@ -78,8 +80,12 @@ net::HostResolver* CreateGlobalHostResolver(net::NetLog* net_log) {
if (trial->group() == parallel_6)
parallelism = 6;
+ else if (trial->group() == parallel_7)
+ parallelism = 7;
else if (trial->group() == parallel_8)
parallelism = 8;
+ else if (trial->group() == parallel_9)
+ parallelism = 9;
else if (trial->group() == parallel_10)
parallelism = 10;
else if (trial->group() == parallel_14)
@@ -221,7 +227,7 @@ IOThread::Globals* IOThread::globals() {
void IOThread::InitNetworkPredictor(
bool prefetching_enabled,
base::TimeDelta max_dns_queue_delay,
- size_t max_concurrent,
+ size_t max_speculative_parallel_resolves,
const chrome_common_net::UrlList& startup_urls,
ListValue* referral_list,
bool preconnect_enabled) {
@@ -231,7 +237,8 @@ void IOThread::InitNetworkPredictor(
NewRunnableMethod(
this,
&IOThread::InitNetworkPredictorOnIOThread,
- prefetching_enabled, max_dns_queue_delay, max_concurrent,
+ prefetching_enabled, max_dns_queue_delay,
+ max_speculative_parallel_resolves,
startup_urls, referral_list, preconnect_enabled));
}
@@ -391,7 +398,7 @@ net::HttpAuthHandlerFactory* IOThread::CreateDefaultAuthHandlerFactory(
void IOThread::InitNetworkPredictorOnIOThread(
bool prefetching_enabled,
base::TimeDelta max_dns_queue_delay,
- size_t max_concurrent,
+ size_t max_speculative_parallel_resolves,
const chrome_common_net::UrlList& startup_urls,
ListValue* referral_list,
bool preconnect_enabled) {
@@ -403,7 +410,7 @@ void IOThread::InitNetworkPredictorOnIOThread(
predictor_ = new chrome_browser_net::Predictor(
globals_->host_resolver.get(),
max_dns_queue_delay,
- max_concurrent,
+ max_speculative_parallel_resolves,
preconnect_enabled);
predictor_->AddRef();
diff --git a/chrome/browser/io_thread.h b/chrome/browser/io_thread.h
index 2baec87..09f4df4 100644
--- a/chrome/browser/io_thread.h
+++ b/chrome/browser/io_thread.h
@@ -61,7 +61,7 @@ class IOThread : public BrowserProcessSubThread {
// It will post a task to the IO thread to perform the actual initialization.
void InitNetworkPredictor(bool prefetching_enabled,
base::TimeDelta max_dns_queue_delay,
- size_t max_concurrent,
+ size_t max_speculative_parallel_resolves,
const chrome_common_net::UrlList& startup_urls,
ListValue* referral_list,
bool preconnect_enabled);
@@ -92,9 +92,8 @@ class IOThread : public BrowserProcessSubThread {
void InitNetworkPredictorOnIOThread(
bool prefetching_enabled,
base::TimeDelta max_dns_queue_delay,
- size_t max_concurrent,
- const chrome_common_net::UrlList& startup_urls,
-
+ size_t max_speculative_parallel_resolves,
+ const chrome_common_net::UrlList& startup_urls,
ListValue* referral_list,
bool preconnect_enabled);
diff --git a/chrome/browser/net/predictor.h b/chrome/browser/net/predictor.h
index e6363ad..f735cb4 100644
--- a/chrome/browser/net/predictor.h
+++ b/chrome/browser/net/predictor.h
@@ -263,7 +263,10 @@ class Predictor : public base::RefCountedThreadSafe<Predictor> {
// When true, we don't make new lookup requests.
bool shutdown_;
- // The number of concurrent lookups currently allowed.
+ // The number of concurrent speculative lookups currently allowed to be sent
+ // to the resolver. Any additional lookups will be queued to avoid exceeding
+ // this value. The queue is a priority queue that will accelerate
+ // sub-resource speculation, and retard resolutions suggested by page scans.
const size_t max_concurrent_dns_lookups_;
// The maximum queueing delay that is acceptable before we enter congestion
diff --git a/chrome/browser/net/predictor_api.cc b/chrome/browser/net/predictor_api.cc
index 9149095..a0d885b 100644
--- a/chrome/browser/net/predictor_api.cc
+++ b/chrome/browser/net/predictor_api.cc
@@ -41,11 +41,35 @@ static void DnsPrefetchMotivatedList(const UrlList& urls,
static UrlList GetPredictedUrlListAtStartup(PrefService* user_prefs,
PrefService* local_state);
+// Given that the underlying Chromium resolver defaults to a total maximum of
+// 8 paralell resolutions, we will avoid any chance of starving navigational
+// resolutions by limiting the number of paralell speculative resolutions.
+// TODO(jar): Move this limitation into the resolver.
// static
-const size_t PredictorInit::kMaxPrefetchConcurrentLookups = 8;
-
+const size_t PredictorInit::kMaxSpeculativeParallelResolves = 3;
+
+// To control our congestion avoidance system, which discards a queue when
+// resolutions are "taking too long," we need an expected resolution time.
+// Common average is in the range of 300-500ms.
+static const int kExpectedResolutionTimeMs = 500;
+
+// To control the congestion avoidance system, we need an estimate of how many
+// speculative requests may arrive at once. Since we currently only keep 8
+// subresource names for each frame, we'll use that as our basis. Note that
+// when scanning search results lists, we might actually get 10 at a time, and
+// wikipedia can often supply (during a page scan) upwards of 50. In those odd
+// cases, we may discard some of the later speculative requests mistakenly
+// assuming that the resolutions took too long.
+static const int kTypicalSpeculativeGroupSize = 8;
+
+// The next constant specifies an amount of queueing delay that is "too large,"
+// and indicative of problems with resolutions (perhaps due to an overloaded
+// router, or such). When we exceed this delay, congestion avoidance will kick
+// in and all speculations in the queue will be discarded.
// static
-const int PredictorInit::kMaxPrefetchQueueingDelayMs = 500;
+const int PredictorInit::kMaxSpeculativeResolveQueueDelayMs =
+ (kExpectedResolutionTimeMs * kTypicalSpeculativeGroupSize) /
+ kMaxSpeculativeParallelResolves;
// A version number for prefs that are saved. This should be incremented when
// we change the format so that we discard old data.
@@ -351,8 +375,10 @@ void PredictorGetHtmlInfo(std::string* output) {
//------------------------------------------------------------------------------
static void InitNetworkPredictor(TimeDelta max_dns_queue_delay,
- size_t max_concurrent, PrefService* user_prefs, PrefService* local_state,
- bool preconnect_enabled) {
+ size_t max_parallel_resolves,
+ PrefService* user_prefs,
+ PrefService* local_state,
+ bool preconnect_enabled) {
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI));
bool prefetching_enabled =
@@ -367,7 +393,7 @@ static void InitNetworkPredictor(TimeDelta max_dns_queue_delay,
local_state->GetMutableList(prefs::kDnsHostReferralList)->DeepCopy());
g_browser_process->io_thread()->InitNetworkPredictor(
- prefetching_enabled, max_dns_queue_delay, max_concurrent, urls,
+ prefetching_enabled, max_dns_queue_delay, max_parallel_resolves, urls,
referral_list, preconnect_enabled);
}
@@ -554,33 +580,35 @@ PredictorInit::PredictorInit(PrefService* user_prefs,
if (trial_->group() != disabled_prefetch) {
// Initialize the DNS prefetch system.
- size_t max_concurrent = kMaxPrefetchConcurrentLookups;
- int max_queueing_delay_ms = kMaxPrefetchQueueingDelayMs;
-
- if (trial_->group() == max_250ms_prefetch)
- max_queueing_delay_ms = 250;
- else if (trial_->group() == max_500ms_prefetch)
- max_queueing_delay_ms = 500;
- else if (trial_->group() == max_750ms_prefetch)
- max_queueing_delay_ms = 750;
- else if (trial_->group() == max_2s_prefetch)
- max_queueing_delay_ms = 2000;
+ size_t max_parallel_resolves = kMaxSpeculativeParallelResolves;
+ int max_queueing_delay_ms = kMaxSpeculativeResolveQueueDelayMs;
+
if (trial_->group() == max_2_concurrent_prefetch)
- max_concurrent = 2;
+ max_parallel_resolves = 2;
else if (trial_->group() == max_4_concurrent_prefetch)
- max_concurrent = 4;
+ max_parallel_resolves = 4;
else if (trial_->group() == max_6_concurrent_prefetch)
- max_concurrent = 6;
- // Scale acceptable delay so we don't cause congestion limits to fire as
- // we modulate max_concurrent (*if* we are modulating it at all).
- max_queueing_delay_ms = (kMaxPrefetchQueueingDelayMs *
- kMaxPrefetchConcurrentLookups) / max_concurrent;
+ max_parallel_resolves = 6;
+
+ if (trial_->group() == max_250ms_prefetch) {
+ max_queueing_delay_ms =
+ (250 * kTypicalSpeculativeGroupSize) / max_parallel_resolves;
+ } else if (trial_->group() == max_500ms_prefetch) {
+ max_queueing_delay_ms =
+ (500 * kTypicalSpeculativeGroupSize) / max_parallel_resolves;
+ } else if (trial_->group() == max_750ms_prefetch) {
+ max_queueing_delay_ms =
+ (750 * kTypicalSpeculativeGroupSize) / max_parallel_resolves;
+ } else if (trial_->group() == max_2s_prefetch) {
+ max_queueing_delay_ms =
+ (2000 * kTypicalSpeculativeGroupSize) / max_parallel_resolves;
+ }
TimeDelta max_queueing_delay(
TimeDelta::FromMilliseconds(max_queueing_delay_ms));
DCHECK(!g_predictor);
- InitNetworkPredictor(max_queueing_delay, max_concurrent, user_prefs,
+ InitNetworkPredictor(max_queueing_delay, max_parallel_resolves, user_prefs,
local_state, preconnect_enabled);
}
}
diff --git a/chrome/browser/net/predictor_api.h b/chrome/browser/net/predictor_api.h
index 3e04e11..a03f7c9 100644
--- a/chrome/browser/net/predictor_api.h
+++ b/chrome/browser/net/predictor_api.h
@@ -81,15 +81,14 @@ void SavePredictorStateForNextStartupAndTrim(PrefService* prefs);
// Helper class to handle global init and shutdown.
class PredictorInit {
public:
- // Too many concurrent lookups negate benefits of prefetching by trashing
- // the OS cache before all resource loading is complete.
- // This is the default.
- static const size_t kMaxPrefetchConcurrentLookups;
+ // Too many concurrent lookups performed in parallel may overload a resolver,
+ // or may cause problems for a local router. The following limits that count.
+ static const size_t kMaxSpeculativeParallelResolves;
// When prefetch requests are queued beyond some period of time, then the
// system is congested, and we need to clear all queued requests to get out
// of that state. The following is the suggested default time limit.
- static const int kMaxPrefetchQueueingDelayMs;
+ static const int kMaxSpeculativeResolveQueueDelayMs;
PredictorInit(PrefService* user_prefs, PrefService* local_state,
bool preconnect_enabled);
diff --git a/chrome/browser/net/predictor_unittest.cc b/chrome/browser/net/predictor_unittest.cc
index a0a0113..38f0281 100644
--- a/chrome/browser/net/predictor_unittest.cc
+++ b/chrome/browser/net/predictor_unittest.cc
@@ -65,7 +65,7 @@ class PredictorTest : public testing::Test {
: io_thread_(BrowserThread::IO, &loop_),
host_resolver_(new net::MockCachingHostResolver()),
default_max_queueing_delay_(TimeDelta::FromMilliseconds(
- PredictorInit::kMaxPrefetchQueueingDelayMs)) {
+ PredictorInit::kMaxSpeculativeResolveQueueDelayMs)) {
}
protected:
@@ -112,7 +112,7 @@ TEST_F(PredictorTest, StartupShutdownTest) {
scoped_refptr<Predictor> testing_master =
new Predictor(host_resolver_.get(),
default_max_queueing_delay_,
- PredictorInit::kMaxPrefetchConcurrentLookups,
+ PredictorInit::kMaxSpeculativeParallelResolves,
false);
testing_master->Shutdown();
}
@@ -126,7 +126,7 @@ TEST_F(PredictorTest, ShutdownWhenResolutionIsPendingTest) {
scoped_refptr<Predictor> testing_master =
new Predictor(host_resolver_.get(),
default_max_queueing_delay_,
- PredictorInit::kMaxPrefetchConcurrentLookups,
+ PredictorInit::kMaxSpeculativeParallelResolves,
false);
GURL localhost("http://localhost:80");
@@ -152,7 +152,7 @@ TEST_F(PredictorTest, SingleLookupTest) {
scoped_refptr<Predictor> testing_master =
new Predictor(host_resolver_.get(),
default_max_queueing_delay_,
- PredictorInit::kMaxPrefetchConcurrentLookups,
+ PredictorInit::kMaxSpeculativeParallelResolves,
false);
GURL goog("http://www.google.com:80");
@@ -184,7 +184,7 @@ TEST_F(PredictorTest, ConcurrentLookupTest) {
scoped_refptr<Predictor> testing_master =
new Predictor(host_resolver_.get(),
default_max_queueing_delay_,
- PredictorInit::kMaxPrefetchConcurrentLookups,
+ PredictorInit::kMaxSpeculativeParallelResolves,
false);
GURL goog("http://www.google.com:80"),
@@ -221,7 +221,6 @@ TEST_F(PredictorTest, ConcurrentLookupTest) {
EXPECT_FALSE(testing_master->WasFound(bad1));
EXPECT_FALSE(testing_master->WasFound(bad2));
- EXPECT_GT(testing_master->peak_pending_lookups(), names.size() / 2);
EXPECT_LE(testing_master->peak_pending_lookups(), names.size());
EXPECT_LE(testing_master->peak_pending_lookups(),
testing_master->max_concurrent_dns_lookups());
@@ -235,7 +234,7 @@ TEST_F(PredictorTest, MassiveConcurrentLookupTest) {
scoped_refptr<Predictor> testing_master =
new Predictor(host_resolver_.get(),
default_max_queueing_delay_,
- PredictorInit::kMaxPrefetchConcurrentLookups,
+ PredictorInit::kMaxSpeculativeParallelResolves,
false);
UrlList names;
@@ -355,7 +354,7 @@ TEST_F(PredictorTest, ReferrerSerializationNilTest) {
scoped_refptr<Predictor> predictor =
new Predictor(host_resolver_.get(),
default_max_queueing_delay_,
- PredictorInit::kMaxPrefetchConcurrentLookups,
+ PredictorInit::kMaxSpeculativeParallelResolves,
false);
scoped_ptr<ListValue> referral_list(NewEmptySerializationList());
predictor->SerializeReferrers(referral_list.get());
@@ -374,7 +373,7 @@ TEST_F(PredictorTest, ReferrerSerializationSingleReferrerTest) {
scoped_refptr<Predictor> predictor =
new Predictor(host_resolver_.get(),
default_max_queueing_delay_,
- PredictorInit::kMaxPrefetchConcurrentLookups,
+ PredictorInit::kMaxSpeculativeParallelResolves,
false);
const GURL motivation_url("http://www.google.com:91");
const GURL subresource_url("http://icons.google.com:90");
@@ -402,7 +401,7 @@ TEST_F(PredictorTest, ReferrerSerializationTrimTest) {
scoped_refptr<Predictor> predictor =
new Predictor(host_resolver_.get(),
default_max_queueing_delay_,
- PredictorInit::kMaxPrefetchConcurrentLookups,
+ PredictorInit::kMaxSpeculativeParallelResolves,
false);
GURL motivation_url("http://www.google.com:110");