summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--chrome/browser/browser_main.cc16
-rw-r--r--chrome/browser/net/dns_global.cc32
-rw-r--r--chrome/renderer/render_view.cc64
-rw-r--r--net/http/http_stream_parser.cc29
4 files changed, 128 insertions, 13 deletions
diff --git a/chrome/browser/browser_main.cc b/chrome/browser/browser_main.cc
index 0085c91..262397a 100644
--- a/chrome/browser/browser_main.cc
+++ b/chrome/browser/browser_main.cc
@@ -417,9 +417,23 @@ int BrowserMain(const MainFunctionParams& parameters) {
SystemMonitor system_monitor;
HighResolutionTimerManager hi_res_timer_manager;
- // Initialize statistical testing infrastructure.
+ // Initialize statistical testing infrastructure for entire browser.
FieldTrialList field_trial;
+ // Set up a field trial to see if splitting the first transmitted packet helps
+ // with latency.
+ {
+ FieldTrial::Probability kDivisor = 100;
+ FieldTrial* trial = new FieldTrial("PacketSplit", kDivisor);
+ // For each option (i.e., non-default), we have a fixed probability.
+ FieldTrial::Probability kProbabilityPerGroup = 10; // 10% probability.
+ int split = trial->AppendGroup("_first_packet_split", kProbabilityPerGroup);
+ DCHECK_EQ(split, 0);
+ int intact = trial->AppendGroup("_first_packet_intact",
+ FieldTrial::kAllRemainingProbability);
+ DCHECK_EQ(intact, 1);
+ }
+
std::wstring app_name = chrome::kBrowserAppName;
std::string thread_name_string = WideToASCII(app_name + L"_BrowserMain");
diff --git a/chrome/browser/net/dns_global.cc b/chrome/browser/net/dns_global.cc
index a61b85d..f7fe990 100644
--- a/chrome/browser/net/dns_global.cc
+++ b/chrome/browser/net/dns_global.cc
@@ -543,13 +543,21 @@ DnsGlobalInit::DnsGlobalInit(PrefService* user_prefs,
// latency of page loads.
FieldTrial::Probability kDivisor = 100;
// For each option (i.e., non-default), we have a fixed probability.
- FieldTrial::Probability kProbabilityPerGroup = 3; // 3% probability.
+ FieldTrial::Probability kProbabilityPerGroup = 5; // 5% probability.
trial_ = new FieldTrial("DnsImpact", kDivisor);
// First option is to disable prefetching completely.
int disabled_prefetch = trial_->AppendGroup("_disabled_prefetch",
kProbabilityPerGroup);
+
+
+ // We're running two experiments at the same time. The first set of trials
+ // modulates the delay-time until we declare a congestion event (and purge
+ // our queue). The second modulates the number of concurrent resolutions
+ // we do at any time. Users are in exactly one trial (or the default) during
+ // any one run, and hence only one experiment at a time.
+ // Experiment 1:
// Set congestion detection at 250, 500, or 750ms, rather than the 1 second
// default.
int max_250ms_prefetch = trial_->AppendGroup("_max_250ms_queue_prefetch",
@@ -561,6 +569,16 @@ DnsGlobalInit::DnsGlobalInit(PrefService* user_prefs,
// Set congestion detection at 2 seconds instead of the 1 second default.
int max_2s_prefetch = trial_->AppendGroup("_max_2s_queue_prefetch",
kProbabilityPerGroup);
+ // Experiment 2:
+ // Set max simultaneous resoultions to 2, 4, or 6, and scale the congestion
+ // limit proportionally (so we don't impact average probability of asserting
+ // congesion very much).
+ int max_2_concurrent_prefetch = trial_->AppendGroup(
+ "_max_2 concurrent_prefetch", kProbabilityPerGroup);
+ int max_4_concurrent_prefetch = trial_->AppendGroup(
+ "_max_4 concurrent_prefetch", kProbabilityPerGroup);
+ int max_6_concurrent_prefetch = trial_->AppendGroup(
+ "_max_6 concurrent_prefetch", kProbabilityPerGroup);
trial_->AppendGroup("_default_enabled_prefetch",
FieldTrial::kAllRemainingProbability);
@@ -571,9 +589,7 @@ DnsGlobalInit::DnsGlobalInit(PrefService* user_prefs,
if (trial_->group() != disabled_prefetch) {
// Initialize the DNS prefetch system.
-
size_t max_concurrent = kMaxPrefetchConcurrentLookups;
-
int max_queueing_delay_ms = kMaxPrefetchQueueingDelayMs;
if (trial_->group() == max_250ms_prefetch)
@@ -584,6 +600,16 @@ DnsGlobalInit::DnsGlobalInit(PrefService* user_prefs,
max_queueing_delay_ms = 750;
else if (trial_->group() == max_2s_prefetch)
max_queueing_delay_ms = 2000;
+ if (trial_->group() == max_2_concurrent_prefetch)
+ max_concurrent = 2;
+ else if (trial_->group() == max_4_concurrent_prefetch)
+ max_concurrent = 4;
+ else if (trial_->group() == max_6_concurrent_prefetch)
+ max_concurrent = 6;
+ // Scale acceptable delay so we don't cause congestion limits to fire as
+ // we modulate max_concurrent (*if* we are modulating it at all).
+ max_queueing_delay_ms = (kMaxPrefetchQueueingDelayMs *
+ kMaxPrefetchConcurrentLookups) / max_concurrent;
TimeDelta max_queueing_delay(
TimeDelta::FromMilliseconds(max_queueing_delay_ms));
diff --git a/chrome/renderer/render_view.cc b/chrome/renderer/render_view.cc
index 855db43..5c0b630 100644
--- a/chrome/renderer/render_view.cc
+++ b/chrome/renderer/render_view.cc
@@ -1640,7 +1640,8 @@ void RenderView::didStopLoading() {
Send(new ViewHostMsg_DidStopLoading(routing_id_));
- MessageLoop::current()->PostDelayedTask(FROM_HERE,
+ MessageLoop::current()->PostDelayedTask(
+ FROM_HERE,
method_factory_.NewRunnableMethod(&RenderView::CapturePageInfo, page_id_,
false),
kDelayForCaptureMs);
@@ -2561,7 +2562,8 @@ void RenderView::didCommitProvisionalLoad(WebFrame* frame,
history_list_offset_ = chrome::kMaxSessionHistoryEntries - 1;
history_list_length_ = history_list_offset_ + 1;
- MessageLoop::current()->PostDelayedTask(FROM_HERE,
+ MessageLoop::current()->PostDelayedTask(
+ FROM_HERE,
method_factory_.NewRunnableMethod(&RenderView::CapturePageInfo,
page_id_, true),
kDelayForForcedCaptureMs);
@@ -4146,10 +4148,18 @@ void RenderView::DumpLoadHistograms() const {
NavigationState::FromDataSource(main_frame->dataSource());
Time finish = navigation_state->finish_load_time();
- // If we've already dumped or we haven't finished loading, do nothing.
- if (navigation_state->load_histograms_recorded() || finish.is_null())
+ // If we've already dumped, do nothing.
+ if (navigation_state->load_histograms_recorded())
return;
+ // Handle case where user hits "stop" or "back" before loading completely.
+ bool abandoned_page = finish.is_null();
+ if (abandoned_page) {
+ finish = Time::Now();
+ navigation_state->set_finish_load_time(finish);
+ }
+ UMA_HISTOGRAM_ENUMERATION("Renderer4.Abandoned", abandoned_page ? 1 : 0, 2);
+
LogNavigationState(navigation_state, main_frame->dataSource());
NavigationState::LoadType load_type = navigation_state->load_type();
@@ -4271,8 +4281,11 @@ void RenderView::DumpLoadHistograms() const {
static bool use_dns_histogram(FieldTrialList::Find("DnsImpact") &&
!FieldTrialList::Find("DnsImpact")->group_name().empty());
if (use_dns_histogram) {
- UMA_HISTOGRAM_ENUMERATION(FieldTrial::MakeName(
- "Renderer4.LoadType", "DnsImpact"),
+ UMA_HISTOGRAM_ENUMERATION(
+ FieldTrial::MakeName("Renderer4.Abandoned", "DnsImpact"),
+ abandoned_page ? 1 : 0, 2);
+ UMA_HISTOGRAM_ENUMERATION(
+ FieldTrial::MakeName("Renderer4.LoadType", "DnsImpact"),
load_type, NavigationState::kLoadTypeMax);
switch (load_type) {
case NavigationState::NORMAL_LOAD:
@@ -4304,6 +4317,45 @@ void RenderView::DumpLoadHistograms() const {
}
}
+ static bool use_packet_split_histogram(FieldTrialList::Find("PacketSplit") &&
+ !FieldTrialList::Find("PacketSplit")->group_name().empty());
+ if (use_packet_split_histogram) {
+ UMA_HISTOGRAM_ENUMERATION(
+ FieldTrial::MakeName("Renderer4.Abandoned", "PacketSplit"),
+ abandoned_page ? 1 : 0, 2);
+ UMA_HISTOGRAM_ENUMERATION(
+ FieldTrial::MakeName("Renderer4.LoadType", "PacketSplit"),
+ load_type, NavigationState::kLoadTypeMax);
+ switch (load_type) {
+ case NavigationState::NORMAL_LOAD:
+ UMA_HISTOGRAM_CUSTOM_TIMES(FieldTrial::MakeName(
+ "Renderer4.BeginToFinish_NormalLoad", "PacketSplit"),
+ begin_to_finish, kBeginToFinishMin, kBeginToFinishMax,
+ kBeginToFinishBucketCount);
+ break;
+ case NavigationState::LINK_LOAD_NORMAL:
+ UMA_HISTOGRAM_CUSTOM_TIMES(FieldTrial::MakeName(
+ "Renderer4.BeginToFinish_LinkLoadNormal", "PacketSplit"),
+ begin_to_finish, kBeginToFinishMin, kBeginToFinishMax,
+ kBeginToFinishBucketCount);
+ break;
+ case NavigationState::LINK_LOAD_RELOAD:
+ UMA_HISTOGRAM_CUSTOM_TIMES(FieldTrial::MakeName(
+ "Renderer4.BeginToFinish_LinkLoadReload", "PacketSplit"),
+ begin_to_finish, kBeginToFinishMin, kBeginToFinishMax,
+ kBeginToFinishBucketCount);
+ break;
+ case NavigationState::LINK_LOAD_CACHE_STALE_OK:
+ UMA_HISTOGRAM_CUSTOM_TIMES(FieldTrial::MakeName(
+ "Renderer4.BeginToFinish_LinkLoadStaleOk", "PacketSplit"),
+ begin_to_finish, kBeginToFinishMin, kBeginToFinishMax,
+ kBeginToFinishBucketCount);
+ break;
+ default:
+ break;
+ }
+ }
+
static bool use_sdch_histogram(FieldTrialList::Find("GlobalSdch") &&
!FieldTrialList::Find("GlobalSdch")->group_name().empty());
if (use_sdch_histogram) {
diff --git a/net/http/http_stream_parser.cc b/net/http/http_stream_parser.cc
index 53c57f1..334e770 100644
--- a/net/http/http_stream_parser.cc
+++ b/net/http/http_stream_parser.cc
@@ -5,6 +5,7 @@
#include "net/http/http_stream_parser.h"
#include "base/compiler_specific.h"
+#include "base/field_trial.h"
#include "base/trace_event.h"
#include "net/base/io_buffer.h"
#include "net/http/http_request_info.h"
@@ -186,17 +187,39 @@ int HttpStreamParser::DoLoop(int result) {
int HttpStreamParser::DoSendHeaders(int result) {
request_headers_->DidConsume(result);
- if (request_headers_->BytesRemaining() > 0) {
+ // Set up a field trial to see if splitting the first packet helps with
+ // latency (loss of the first packet may otherwise cause an RTO of 3 seconds
+ // at least on Windows... but with two packets, the probability of loss
+ // without any ack to alert us should be lower, and receipt of a first ack
+ // will lower the RTO dramatically, so recovery will be fast.).
+ static const FieldTrial* kTrial = FieldTrialList::Find("PacketSplit");
+ static const bool kForceSecondPacket(kTrial && (kTrial->group() == 0));
+ if (kForceSecondPacket)
+ DCHECK_EQ(kTrial->group_name(), "_first_packet_split");
+
+ int bytes_remaining = request_headers_->BytesRemaining();
+ if (bytes_remaining > 0) {
// Record our best estimate of the 'request time' as the time when we send
// out the first bytes of the request headers.
- if (request_headers_->BytesRemaining() == request_headers_->size()) {
+ if (bytes_remaining == request_headers_->size()) {
response_->request_time = base::Time::Now();
+
+ // Note that we ONLY ensure second packet when this is a fresh connection,
+ // as a reused connection (re: reuse_type()) already had traffic, and
+ // hence has an RTO which will provide for a fast packet-loss recovery.
+ // We also avoid splitting out a second packet if we have a request_body_
+ // to send, as it will provide the desired second packet (see bug 38703).
+ if (kForceSecondPacket &&
+ connection_->reuse_type() != ClientSocketHandle::REUSED_IDLE &&
+ (request_body_ == NULL || !request_body_->size()) &&
+ bytes_remaining > 1)
+ --bytes_remaining; // Leave one byte for next packet.
}
// TODO(vandebo) remove when bug 31096 is resolved
CHECK(connection_);
CHECK(connection_->socket());
result = connection_->socket()->Write(request_headers_,
- request_headers_->BytesRemaining(),
+ bytes_remaining,
&io_callback_);
} else if (request_body_ != NULL && request_body_->size()) {
io_state_ = STATE_SENDING_BODY;