summaryrefslogtreecommitdiffstats
path: root/chrome/browser
diff options
context:
space:
mode:
authorpliard@chromium.org <pliard@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-03-09 14:39:48 +0000
committerpliard@chromium.org <pliard@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2012-03-09 14:39:48 +0000
commit368797e90d4f954261abe213e93dc13fc5f135bd (patch)
tree345c9a9d73a5d7c7071deb80e7b47b2b4765818b /chrome/browser
parent11b4670076c55aafe08c18649b1181241e15a746 (diff)
downloadchromium_src-368797e90d4f954261abe213e93dc13fc5f135bd.zip
chromium_src-368797e90d4f954261abe213e93dc13fc5f135bd.tar.gz
chromium_src-368797e90d4f954261abe213e93dc13fc5f135bd.tar.bz2
Remove static initializer in url_info.cc.
BUG=94925 TEST=unit_tests Review URL: http://codereview.chromium.org/9635018 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@125844 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'chrome/browser')
-rw-r--r--chrome/browser/net/predictor.h4
-rw-r--r--chrome/browser/net/predictor_unittest.cc2
-rw-r--r--chrome/browser/net/url_info.cc80
-rw-r--r--chrome/browser/net/url_info.h13
4 files changed, 56 insertions, 43 deletions
diff --git a/chrome/browser/net/predictor.h b/chrome/browser/net/predictor.h
index 9b5a0f1..9778943 100644
--- a/chrome/browser/net/predictor.h
+++ b/chrome/browser/net/predictor.h
@@ -368,10 +368,10 @@ class Predictor {
}
// Only for testing. Return how long was the resolution
- // or UrlInfo::kNullDuration if it hasn't been resolved yet.
+ // or UrlInfo::NullDuration() if it hasn't been resolved yet.
base::TimeDelta GetResolutionDuration(const GURL& url) {
if (results_.find(url) == results_.end())
- return UrlInfo::kNullDuration;
+ return UrlInfo::NullDuration();
return results_[url].resolve_duration();
}
diff --git a/chrome/browser/net/predictor_unittest.cc b/chrome/browser/net/predictor_unittest.cc
index 92974de..3b040c5 100644
--- a/chrome/browser/net/predictor_unittest.cc
+++ b/chrome/browser/net/predictor_unittest.cc
@@ -44,7 +44,7 @@ class WaitForResolutionHelper {
void Run() {
for (UrlList::const_iterator i = hosts_.begin(); i != hosts_.end(); ++i)
if (predictor_->GetResolutionDuration(*i) ==
- UrlInfo::kNullDuration)
+ UrlInfo::NullDuration())
return; // We don't have resolution for that host.
// When all hostnames have been resolved, exit the loop.
diff --git a/chrome/browser/net/url_info.cc b/chrome/browser/net/url_info.cc
index 4937b154..a5ac9b2 100644
--- a/chrome/browser/net/url_info.cc
+++ b/chrome/browser/net/url_info.cc
@@ -11,6 +11,7 @@
#include <string>
#include "base/format_macros.h"
+#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/metrics/histogram.h"
#include "base/stringprintf.h"
@@ -21,7 +22,41 @@ using base::TimeTicks;
namespace chrome_browser_net {
-static bool detailed_logging_enabled = false;
+namespace {
+
+// The number of OS cache entries we can guarantee(?) before cache eviction
+// might likely take place.
+const int kMaxGuaranteedDnsCacheSize = 50;
+
+// Common low end TTL for sites is 5 minutes. However, DNS servers give us the
+// remaining time, not the original 5 minutes. Hence it doesn't much matter
+// whether we found something in the local cache, or an ISP cache, it will on
+// average be 2.5 minutes before it expires. We could try to model this with
+// 180 seconds, but simpler is just to do the lookups all the time (wasting OS
+// calls(?)), and let that OS cache decide what to do (with TTL in hand). We
+// use a small time to help get some duplicate suppression, in case a page has
+// a TON of copies of the same domain name, so that we don't thrash the OS to
+// death. Hopefully it is small enough that we're not hurting our cache hit
+// rate (i.e., we could always ask the OS).
+const int kDefaultCacheExpirationDuration = 5;
+
+TimeDelta MaxNonNetworkDnsLookupDuration() {
+ return TimeDelta::FromMilliseconds(15);
+}
+
+bool detailed_logging_enabled = false;
+
+struct GlobalState {
+ GlobalState() {
+ cache_expiration_duration =
+ TimeDelta::FromSeconds(kDefaultCacheExpirationDuration);
+ }
+ TimeDelta cache_expiration_duration;
+};
+
+base::LazyInstance<GlobalState>::Leaky global_state;
+
+} // anonymous namespace
// Use command line switch to enable detailed logging.
void EnablePredictorDetailedLog(bool enable) {
@@ -34,8 +69,8 @@ int UrlInfo::sequence_counter = 1;
UrlInfo::UrlInfo()
: state_(PENDING),
old_prequeue_state_(state_),
- resolve_duration_(kNullDuration),
- queue_duration_(kNullDuration),
+ resolve_duration_(NullDuration()),
+ queue_duration_(NullDuration()),
sequence_number_(0),
motivation_(NO_PREFETCH_MOTIVATION),
was_linked_(false) {
@@ -63,39 +98,22 @@ bool UrlInfo::NeedsDnsUpdate() {
}
}
-const TimeDelta UrlInfo::kNullDuration(TimeDelta::FromMilliseconds(-1));
-
-// Common low end TTL for sites is 5 minutes. However, DNS servers give us
-// the remaining time, not the original 5 minutes. Hence it doesn't much matter
-// whether we found something in the local cache, or an ISP cache, it will
-// on average be 2.5 minutes before it expires. We could try to model this with
-// 180 seconds, but simpler is just to do the lookups all the time (wasting
-// OS calls(?)), and let that OS cache decide what to do (with TTL in hand).
-// We use a small time to help get some duplicate suppression, in case a page
-// has a TON of copies of the same domain name, so that we don't thrash the OS
-// to death. Hopefully it is small enough that we're not hurting our cache hit
-// rate (i.e., we could always ask the OS).
-TimeDelta UrlInfo::cache_expiration_duration_(TimeDelta::FromSeconds(5));
-
-const TimeDelta UrlInfo::kMaxNonNetworkDnsLookupDuration(
- TimeDelta::FromMilliseconds(15));
-
-// Used by test ONLY. The value is otherwise constant.
+// Used by test ONLY. The value is otherwise constant.
// static
void UrlInfo::set_cache_expiration(TimeDelta time) {
- cache_expiration_duration_ = time;
+ global_state.Pointer()->cache_expiration_duration = time;
}
// static
TimeDelta UrlInfo::get_cache_expiration() {
- return cache_expiration_duration_;
+ return global_state.Get().cache_expiration_duration;
}
void UrlInfo::SetQueuedState(ResolutionMotivation motivation) {
DCHECK(PENDING == state_ || FOUND == state_ || NO_SUCH_NAME == state_);
old_prequeue_state_ = state_;
state_ = QUEUED;
- queue_duration_ = resolve_duration_ = kNullDuration;
+ queue_duration_ = resolve_duration_ = NullDuration();
SetMotivation(motivation);
GetDuration(); // Set time_
DLogResultsStats("DNS Prefetch in queue");
@@ -113,14 +131,14 @@ void UrlInfo::RemoveFromQueue() {
DCHECK(ASSIGNED == state_);
state_ = old_prequeue_state_;
DLogResultsStats("DNS Prefetch reset to prequeue");
- static const TimeDelta kBoundary = TimeDelta::FromSeconds(2);
+ const TimeDelta kBoundary = TimeDelta::FromSeconds(2);
if (queue_duration_ > kBoundary) {
UMA_HISTOGRAM_MEDIUM_TIMES("DNS.QueueRecycledDeltaOver2",
queue_duration_ - kBoundary);
return;
}
// Make a custom linear histogram for the region from 0 to boundary.
- const size_t kBucketCount = 52;
+ static const size_t kBucketCount = 52;
static base::Histogram* histogram(NULL);
if (!histogram)
histogram = base::LinearHistogram::FactoryTimeGet(
@@ -138,9 +156,10 @@ void UrlInfo::SetFoundState() {
DCHECK(ASSIGNED == state_);
state_ = FOUND;
resolve_duration_ = GetDuration();
- if (kMaxNonNetworkDnsLookupDuration <= resolve_duration_) {
+ const TimeDelta max_duration = MaxNonNetworkDnsLookupDuration();
+ if (max_duration <= resolve_duration_) {
UMA_HISTOGRAM_CUSTOM_TIMES("DNS.PrefetchResolution", resolve_duration_,
- kMaxNonNetworkDnsLookupDuration, TimeDelta::FromMinutes(15), 100);
+ max_duration, TimeDelta::FromMinutes(15), 100);
}
sequence_number_ = sequence_counter++;
DLogResultsStats("DNS PrefetchFound");
@@ -150,7 +169,7 @@ void UrlInfo::SetNoSuchNameState() {
DCHECK(ASSIGNED == state_);
state_ = NO_SUCH_NAME;
resolve_duration_ = GetDuration();
- if (kMaxNonNetworkDnsLookupDuration <= resolve_duration_) {
+ if (MaxNonNetworkDnsLookupDuration() <= resolve_duration_) {
DHISTOGRAM_TIMES("DNS.PrefetchNotFoundName", resolve_duration_);
}
sequence_number_ = sequence_counter++;
@@ -178,8 +197,7 @@ bool UrlInfo::IsStillCached() const {
return false;
TimeDelta time_since_resolution = TimeTicks::Now() - time_;
-
- return time_since_resolution < cache_expiration_duration_;
+ return time_since_resolution < global_state.Get().cache_expiration_duration;
}
void UrlInfo::DLogResultsStats(const char* message) const {
diff --git a/chrome/browser/net/url_info.h b/chrome/browser/net/url_info.h
index ddabcae..eb52f4f 100644
--- a/chrome/browser/net/url_info.h
+++ b/chrome/browser/net/url_info.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -64,14 +64,12 @@ class UrlInfo {
FOUND, // DNS resolution completed.
NO_SUCH_NAME, // DNS resolution completed.
};
- static const base::TimeDelta kMaxNonNetworkDnsLookupDuration;
- // The number of OS cache entries we can guarantee(?) before cache eviction
- // might likely take place.
- static const int kMaxGuaranteedDnsCacheSize = 50;
typedef std::vector<UrlInfo> UrlInfoTable;
- static const base::TimeDelta kNullDuration;
+ static base::TimeDelta NullDuration() {
+ return base::TimeDelta::FromMilliseconds(-1);
+ }
// UrlInfo are usually made by the default constructor during
// initializing of the Predictor's map (of info for Hostnames).
@@ -151,9 +149,6 @@ class UrlInfo {
// Helper function for about:dns printing.
std::string GetAsciiMotivation() const;
- // The next declaration is non-const to facilitate testing.
- static base::TimeDelta cache_expiration_duration_;
-
// The current state of this instance.
DnsProcessingState state_;