summaryrefslogtreecommitdiffstats
path: root/chrome/browser/net/referrer.cc
diff options
context:
space:
mode:
authorjar@chromium.org <jar@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-07-28 22:37:23 +0000
committerjar@chromium.org <jar@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-07-28 22:37:23 +0000
commitf4ef861ba773e66d1c6e7e023a38db32afdd531d (patch)
tree6ae53ffb969fa9ddf39131deb79c15b2ae8fa592 /chrome/browser/net/referrer.cc
parent175aa689f7f9b705ee2c6eebb29aca1c93c540d5 (diff)
downloadchromium_src-f4ef861ba773e66d1c6e7e023a38db32afdd531d.zip
chromium_src-f4ef861ba773e66d1c6e7e023a38db32afdd531d.tar.gz
chromium_src-f4ef861ba773e66d1c6e7e023a38db32afdd531d.tar.bz2
Support both preconnection, and pre-resolution for subresources
With this change, both preconnection and preresolution are enabled when the --enable-preconnection flag is turned on. I'm expecting to enable this feature by default soon, so as to better tune the parameters. BUG=42694 r=mbelshe Review URL: http://codereview.chromium.org/3032014 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@54044 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'chrome/browser/net/referrer.cc')
-rw-r--r--chrome/browser/net/referrer.cc114
1 files changed, 35 insertions, 79 deletions
diff --git a/chrome/browser/net/referrer.cc b/chrome/browser/net/referrer.cc
index 936d25b..b5d8c82 100644
--- a/chrome/browser/net/referrer.cc
+++ b/chrome/browser/net/referrer.cc
@@ -7,23 +7,24 @@
#include <limits.h>
#include "base/logging.h"
+#include "chrome/browser/net/predictor.h"
namespace chrome_browser_net {
//------------------------------------------------------------------------------
// Smoothing parameter for updating subresource_use_rate_.
-// We always combine our old expected value, weighted by some factor, with the
-// new expected value Enew. The new "expected value" is the number of actual
-// connections made due to the curernt navigations.
-// This means the formula (in a concise form) is:
-// Eupdated = Eold * W + Enew * (1 - W)
+// We always combine our old expected value, weighted by some factor W (we use
+// kWeightingForOldExpectedValue), with the new expected value Enew. The new
+// "expected value" is the number of actual connections made due to the current
+// navigations.
// That means that IF we end up needing to connect, we should apply the formula:
-// Pupdated = Pold * W + Enew * (1 - W)
-// If we visit the containing url, but don't end up needing a connection:
-// Pupdated = Pold * W
-// To achive the above upating algorithm, we end up doing the multiplication
-// by W every time we contemplate doing a preconneciton (i.e., when we navigate
+// Eupdated = Eold * W + Enew * (1 - W)
+// If we visit the containing url, but don't end up needing a connection, then
+// Enew == 0, so we use the formula:
+// Eupdated = Eold * W
+// To achieve the above updating algorithm, we end up doing the multiplication
+// by W every time we contemplate doing a preconnection (i.e., when we navigate
// to the containing URL, and consider doing a preconnection), and then IFF we
// learn that we really needed a connection to the subresource, we complete the
// above algorithm by adding the (1 - W) for each connection we make.
@@ -32,13 +33,14 @@ namespace chrome_browser_net {
// 1.0.
static const double kWeightingForOldExpectedValue = 0.66;
-// The expected value needed before we actually do a preconnection.
-static const double kPreconnectWorthyExpectedValue = 0.7;
-
-// The expected value that we'll need a preconnection when we first see the
-// subresource getting fetched. Very conservative is 0.0, which will mean that
-// we have to wait for a while before using preconnection... but we do persist
-// results, so we'll have the learned answer in the long run.
+// To estimate the expected value of the number of connections that we'll need
+// when a referrer is navigated to, we start with the following rather low
+// initial value. Each time we do indeed (again) need the subresource, this
+// value will get increased. Each time we navigate to the refererrer but never
+// end up needing this subresource, the value will decrease.
+// Very conservative is 0.0, which will mean that we have to wait for a while
+// before doing much speculative acvtivity... but we do persist results, so
+// we'll save the asymptotic (correct?) learned answer in the long run.
static const double kInitialExpectedValue = 0.0;
// static
@@ -71,71 +73,43 @@ void Referrer::SuggestHost(const GURL& url) {
void Referrer::DeleteLeastUseful() {
// Find the item with the lowest value. Most important is preconnection_rate,
- // next is latency savings, and last is lifetime (age).
+ // and least is lifetime (age).
GURL least_useful_url;
double lowest_rate_seen = 0.0;
// We use longs for durations because we will use multiplication on them.
- int64 lowest_latency_seen = 0; // Duration in milliseconds.
int64 least_useful_lifetime = 0; // Duration in milliseconds.
const base::Time kNow(base::Time::Now()); // Avoid multiple calls.
for (SubresourceMap::iterator it = begin(); it != end(); ++it) {
int64 lifetime = (kNow - it->second.birth_time()).InMilliseconds();
- int64 latency = it->second.latency().InMilliseconds();
double rate = it->second.subresource_use_rate();
if (least_useful_url.has_host()) {
if (rate > lowest_rate_seen)
continue;
- if (!latency && !lowest_latency_seen) {
- // Older name is less useful.
- if (lifetime <= least_useful_lifetime)
- continue;
- } else {
- // Compare the ratios:
- // latency/lifetime
- // vs.
- // lowest_latency_seen/least_useful_lifetime
- // by cross multiplying (to avoid integer division hassles). Overflow's
- // won't happen until both latency and lifetime pass about 49 days.
- if (latency * least_useful_lifetime >
- lowest_latency_seen * lifetime) {
- continue;
- }
- }
+ if (lifetime <= least_useful_lifetime)
+ continue;
}
least_useful_url = it->first;
lowest_rate_seen = rate;
- lowest_latency_seen = latency;
least_useful_lifetime = lifetime;
}
- erase(least_useful_url);
- // Note: there is a small chance that we will discard a least_useful_url
- // that is currently being prefetched because it *was* in this referer list.
- // In that case, when a benefit appears in AccrueValue() below, we are careful
- // to check before accessing the member.
-}
-
-void Referrer::AccrueValue(const base::TimeDelta& delta,
- const GURL& url) {
- SubresourceMap::iterator it = find(url);
- // Be careful that we weren't evicted from this referrer in DeleteLeastUseful.
- if (it != end())
- it->second.AccrueValue(delta);
+ if (least_useful_url.has_host())
+ erase(least_useful_url);
}
bool Referrer::Trim() {
- bool has_some_latency_left = false;
+ std::vector<GURL> discarded_urls;
for (SubresourceMap::iterator it = begin(); it != end(); ++it)
- if (it->second.Trim())
- has_some_latency_left = true;
- return has_some_latency_left;
+ if (!it->second.Trim())
+ discarded_urls.push_back(it->first);
+ for (size_t i = 0; i < discarded_urls.size(); ++i)
+ erase(discarded_urls[i]);
+ return size() > 0;
}
bool ReferrerValue::Trim() {
- int64 latency_ms = latency_.InMilliseconds() / 2;
- latency_ = base::TimeDelta::FromMilliseconds(latency_ms);
- return latency_ms > 0 ||
- subresource_use_rate_ > kPreconnectWorthyExpectedValue / 2;
+ subresource_use_rate_ /= 2.0;
+ return subresource_use_rate_ > Predictor::kPersistWorthyExpectedValue;
}
@@ -148,22 +122,17 @@ void Referrer::Deserialize(const Value& value) {
std::string url_spec;
if (!subresource_list->GetString(index++, &url_spec))
return;
- int latency_ms;
- if (!subresource_list->GetInteger(index++, &latency_ms))
- return;
double rate;
if (!subresource_list->GetReal(index++, &rate))
return;
GURL url(url_spec);
- base::TimeDelta latency = base::TimeDelta::FromMilliseconds(latency_ms);
// TODO(jar): We could be more direct, and change birth date or similar to
// show that this is a resurrected value we're adding in. I'm not yet sure
// of how best to optimize the learning and pruning (Trim) algorithm at this
// level, so for now, we just suggest subresources, which leaves them all
// with the same birth date (typically start of process).
SuggestHost(url);
- AccrueValue(latency, url);
(*this)[url].SetSubresourceUseRate(rate);
}
}
@@ -172,21 +141,10 @@ Value* Referrer::Serialize() const {
ListValue* subresource_list(new ListValue);
for (const_iterator it = begin(); it != end(); ++it) {
StringValue* url_spec(new StringValue(it->first.spec()));
- int latency_integer = static_cast<int>(it->second.latency().
- InMilliseconds());
- // Watch out for overflow in the above static_cast! Check to see if we went
- // negative, and just use a "big" value. The value seems unimportant once
- // we get to such high latencies. Probable cause of high latency is a bug
- // in other code, so also do a DCHECK.
- DCHECK_GE(latency_integer, 0);
- if (latency_integer < 0)
- latency_integer = INT_MAX;
- FundamentalValue* latency(new FundamentalValue(latency_integer));
FundamentalValue* rate(new FundamentalValue(
it->second.subresource_use_rate()));
subresource_list->Append(url_spec);
- subresource_list->Append(latency);
subresource_list->Append(rate);
}
return subresource_list;
@@ -198,6 +156,7 @@ ReferrerValue::ReferrerValue()
: birth_time_(base::Time::Now()),
navigation_count_(0),
preconnection_count_(0),
+ preresolution_count_(0),
subresource_use_rate_(kInitialExpectedValue) {
}
@@ -208,15 +167,12 @@ void ReferrerValue::SubresourceIsNeeded() {
subresource_use_rate_ += 1 - kWeightingForOldExpectedValue;
}
-bool ReferrerValue::IsPreconnectWorthDoing() {
- bool preconnecting = kPreconnectWorthyExpectedValue < subresource_use_rate_;
- if (preconnecting)
- ++preconnection_count_;
+void ReferrerValue::ReferrerWasObserved() {
subresource_use_rate_ *= kWeightingForOldExpectedValue;
// Note: the use rate is temporarilly possibly incorect, as we need to find
// out if we really end up connecting. This will happen in a few hundred
// milliseconds (when content arrives, etc.).
- return preconnecting;
+ // Value of subresource_use_rate_ should be sampled before this call.
}
} // namespace chrome_browser_net