summaryrefslogtreecommitdiffstats
path: root/chrome
diff options
context:
space:
mode:
authorpfeldman@chromium.org <pfeldman@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-07-16 08:25:41 +0000
committerpfeldman@chromium.org <pfeldman@chromium.org@0039d316-1c4b-4281-b951-d872f2087c98>2010-07-16 08:25:41 +0000
commite25e955817c9d6b9356e815d9f0c9948669fce95 (patch)
tree72708e6be5e60ee9c18568a3a5f099cad9fb11a0 /chrome
parent7cae2ab87cabee2f80d55fcb5062014138d2cf60 (diff)
downloadchromium_src-e25e955817c9d6b9356e815d9f0c9948669fce95.zip
chromium_src-e25e955817c9d6b9356e815d9f0c9948669fce95.tar.gz
chromium_src-e25e955817c9d6b9356e815d9f0c9948669fce95.tar.bz2
DevTools & WebTiming : Migrate from PassiveLogCollector to dedicated LoadTimingObserver.
Review URL: http://codereview.chromium.org/2909016 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@52634 0039d316-1c4b-4281-b951-d872f2087c98
Diffstat (limited to 'chrome')
-rw-r--r--chrome/browser/net/chrome_net_log.cc7
-rw-r--r--chrome/browser/net/chrome_net_log.h6
-rw-r--r--chrome/browser/net/load_timing_observer.cc206
-rw-r--r--chrome/browser/net/load_timing_observer.h68
-rw-r--r--chrome/browser/net/passive_log_collector.cc8
-rw-r--r--chrome/browser/net/passive_log_collector.h3
-rw-r--r--chrome/browser/renderer_host/async_resource_handler.cc164
-rw-r--r--chrome/chrome_browser.gypi2
-rw-r--r--chrome/common/render_messages.h4
9 files changed, 307 insertions, 161 deletions
diff --git a/chrome/browser/net/chrome_net_log.cc b/chrome/browser/net/chrome_net_log.cc
index efe801fc..a696552 100644
--- a/chrome/browser/net/chrome_net_log.cc
+++ b/chrome/browser/net/chrome_net_log.cc
@@ -9,18 +9,22 @@
#include "base/logging.h"
#include "base/string_util.h"
#include "chrome/browser/chrome_thread.h"
+#include "chrome/browser/net/load_timing_observer.h"
#include "chrome/browser/net/passive_log_collector.h"
ChromeNetLog::ChromeNetLog()
: next_id_(1),
- passive_collector_(new PassiveLogCollector) {
+ passive_collector_(new PassiveLogCollector),
+ load_timing_observer_(new LoadTimingObserver) {
DCHECK(ChromeThread::CurrentlyOn(ChromeThread::IO));
AddObserver(passive_collector_.get());
+ AddObserver(load_timing_observer_.get());
}
ChromeNetLog::~ChromeNetLog() {
DCHECK(ChromeThread::CurrentlyOn(ChromeThread::IO));
RemoveObserver(passive_collector_.get());
+ RemoveObserver(load_timing_observer_.get());
}
void ChromeNetLog::AddEntry(EventType type,
@@ -55,4 +59,3 @@ void ChromeNetLog::RemoveObserver(Observer* observer) {
DCHECK(ChromeThread::CurrentlyOn(ChromeThread::IO));
observers_.RemoveObserver(observer);
}
-
diff --git a/chrome/browser/net/chrome_net_log.h b/chrome/browser/net/chrome_net_log.h
index 3d94ac6..ee9cd22 100644
--- a/chrome/browser/net/chrome_net_log.h
+++ b/chrome/browser/net/chrome_net_log.h
@@ -9,6 +9,7 @@
#include "base/scoped_ptr.h"
#include "net/base/net_log.h"
+class LoadTimingObserver;
class PassiveLogCollector;
// ChromeNetLog is an implementation of NetLog that dispatches network log
@@ -52,9 +53,14 @@ class ChromeNetLog : public net::NetLog {
return passive_collector_.get();
}
+ LoadTimingObserver* load_timing_observer() {
+ return load_timing_observer_.get();
+ }
+
private:
uint32 next_id_;
scoped_ptr<PassiveLogCollector> passive_collector_;
+ scoped_ptr<LoadTimingObserver> load_timing_observer_;
ObserverList<Observer, true> observers_;
DISALLOW_COPY_AND_ASSIGN(ChromeNetLog);
diff --git a/chrome/browser/net/load_timing_observer.cc b/chrome/browser/net/load_timing_observer.cc
new file mode 100644
index 0000000..cd80b7d
--- /dev/null
+++ b/chrome/browser/net/load_timing_observer.cc
@@ -0,0 +1,206 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/net/load_timing_observer.h"
+
+#include "base/compiler_specific.h"
+#include "base/time.h"
+#include "net/base/load_flags.h"
+#include "net/url_request/url_request_netlog_params.h"
+
+using base::Time;
+using base::TimeTicks;
+using webkit_glue::ResourceLoaderBridge;
+
+const size_t kMaxNumEntries = 1000;
+
+namespace {
+
+// We know that this conversion is not solid and suffers from world clock
+// changes, but it should be good enough for the load timing info.
+static Time TimeTicksToTime(const TimeTicks& time_ticks) {
+ static int64 tick_to_time_offset;
+ static bool tick_to_time_offset_available = false;
+ if (!tick_to_time_offset_available) {
+ int64 cur_time = (Time::Now() - Time()).InMicroseconds();
+ int64 cur_time_ticks = (TimeTicks::Now() - TimeTicks()).InMicroseconds();
+ // If we add this number to a time tick value, it gives the timestamp.
+ tick_to_time_offset = cur_time - cur_time_ticks;
+ tick_to_time_offset_available = true;
+ }
+ return Time::FromInternalValue(time_ticks.ToInternalValue() +
+ tick_to_time_offset);
+}
+
+static int32 TimeTicksToOffset(
+ const TimeTicks& time_ticks,
+ LoadTimingObserver::URLRequestRecord* record) {
+ return static_cast<int32>(
+ (time_ticks - record->base_ticks).InMillisecondsRoundedUp());
+}
+
+}
+
+LoadTimingObserver::URLRequestRecord::URLRequestRecord()
+ : connect_job_id(net::NetLog::Source::kInvalidId),
+ socket_log_id(net::NetLog::Source::kInvalidId),
+ socket_reused(false) {
+}
+
+LoadTimingObserver::LoadTimingObserver() {
+}
+
+LoadTimingObserver::~LoadTimingObserver() {
+}
+
+LoadTimingObserver::URLRequestRecord*
+LoadTimingObserver::GetURLRequestRecord(uint32 source_id) {
+ URLRequestToRecordMap::iterator it = url_request_to_record_.find(source_id);
+ if (it != url_request_to_record_.end())
+ return &it->second;
+ return NULL;
+}
+
+void LoadTimingObserver::OnAddEntry(net::NetLog::EventType type,
+ const base::TimeTicks& time,
+ const net::NetLog::Source& source,
+ net::NetLog::EventPhase phase,
+ net::NetLog::EventParameters* params) {
+ if (source.type == net::NetLog::SOURCE_URL_REQUEST)
+ OnAddURLRequestEntry(type, time, source, phase, params);
+ else if (source.type == net::NetLog::SOURCE_CONNECT_JOB)
+ OnAddConnectJobEntry(type, time, source, phase, params);
+}
+
+void LoadTimingObserver::OnAddURLRequestEntry(
+ net::NetLog::EventType type,
+ const base::TimeTicks& time,
+ const net::NetLog::Source& source,
+ net::NetLog::EventPhase phase,
+ net::NetLog::EventParameters* params) {
+ bool is_begin = phase == net::NetLog::PHASE_BEGIN;
+ bool is_end = phase == net::NetLog::PHASE_END;
+
+ if (type == net::NetLog::TYPE_URL_REQUEST_START_JOB) {
+ if (is_begin) {
+ // Only record timing for entries with corresponding flag.
+ int load_flags = static_cast<URLRequestStartEventParameters*>(params)->
+ load_flags();
+ if (!(load_flags & net::LOAD_ENABLE_LOAD_TIMING))
+ return;
+
+ // Prevents us from passively growing the memory memory unbounded in case
+ // something went wrong. Should not happen.
+ if (url_request_to_record_.size() > kMaxNumEntries) {
+ LOG(WARNING) << "The load timing observer url request count has grown "
+ "larger than expected, resetting";
+ url_request_to_record_.clear();
+ }
+
+ URLRequestRecord& record = url_request_to_record_[source.id];
+ record.base_ticks = time;
+ record.timing.base_time = TimeTicksToTime(time);
+ }
+ return;
+ } else if (type == net::NetLog::TYPE_REQUEST_ALIVE) {
+ // Cleanup records based on the TYPE_REQUEST_ALIVE entry.
+ if (is_end)
+ url_request_to_record_.erase(source.id);
+ return;
+ }
+
+ URLRequestRecord* record = GetURLRequestRecord(source.id);
+ if (!record)
+ return;
+
+ ResourceLoaderBridge::LoadTimingInfo& timing = record->timing;
+
+ switch(type) {
+ case net::NetLog::TYPE_PROXY_SERVICE:
+ if (is_begin)
+ timing.proxy_start = TimeTicksToOffset(time, record);
+ else if (is_end)
+ timing.proxy_end = TimeTicksToOffset(time, record);
+ break;
+ case net::NetLog::TYPE_SOCKET_POOL:
+ if (is_begin)
+ timing.connect_start = TimeTicksToOffset(time, record);
+ else if (is_end)
+ timing.connect_end = TimeTicksToOffset(time, record);
+ break;
+ case net::NetLog::TYPE_SOCKET_POOL_BOUND_TO_CONNECT_JOB:
+ {
+ uint32 connect_job_id = static_cast<net::NetLogSourceParameter*>(
+ params)->value().id;
+ ConnectJobToRecordMap::iterator it =
+ connect_job_to_record_.find(connect_job_id);
+ if (it != connect_job_to_record_.end() &&
+ !it->second.dns_start.is_null()) {
+ timing.dns_start = TimeTicksToOffset(it->second.dns_start, record);
+ timing.dns_end = TimeTicksToOffset(it->second.dns_end, record);
+ }
+ }
+ break;
+ case net::NetLog::TYPE_SOCKET_POOL_REUSED_AN_EXISTING_SOCKET:
+ record->socket_reused = true;
+ break;
+ case net::NetLog::TYPE_SOCKET_POOL_BOUND_TO_SOCKET:
+ record->socket_log_id = static_cast<net::NetLogSourceParameter*>(
+ params)->value().id;
+ break;
+ case net::NetLog::TYPE_HTTP_TRANSACTION_SEND_REQUEST:
+ case net::NetLog::TYPE_SPDY_TRANSACTION_SEND_REQUEST:
+ if (is_begin)
+ timing.send_start = TimeTicksToOffset(time, record);
+ else if (is_end)
+ timing.send_end = TimeTicksToOffset(time, record);
+ break;
+ case net::NetLog::TYPE_HTTP_TRANSACTION_READ_HEADERS:
+ case net::NetLog::TYPE_SPDY_TRANSACTION_READ_HEADERS:
+ if (is_begin)
+ timing.receive_headers_start = TimeTicksToOffset(time, record);
+ else if (is_end)
+ timing.receive_headers_end = TimeTicksToOffset(time, record);
+ break;
+ default:
+ break;
+ }
+}
+
+void LoadTimingObserver::OnAddConnectJobEntry(
+ net::NetLog::EventType type,
+ const base::TimeTicks& time,
+ const net::NetLog::Source& source,
+ net::NetLog::EventPhase phase,
+ net::NetLog::EventParameters* params) {
+ bool is_begin = phase == net::NetLog::PHASE_BEGIN;
+ bool is_end = phase == net::NetLog::PHASE_END;
+
+ // Manage record lifetime based on the SOCKET_POOL_CONNECT_JOB entry.
+ if (type == net::NetLog::TYPE_SOCKET_POOL_CONNECT_JOB) {
+ if (is_begin) {
+ // Prevents us from passively growing the memory memory unbounded in case
+ // something went wrong. Should not happen.
+ if (connect_job_to_record_.size() > kMaxNumEntries) {
+ LOG(WARNING) << "The load timing observer connect job count has grown "
+ "larger than expected, resetting";
+ connect_job_to_record_.clear();
+ }
+
+ connect_job_to_record_.insert(
+ std::make_pair(source.id, ConnectJobRecord()));
+ } else if (is_end) {
+ connect_job_to_record_.erase(source.id);
+ }
+ } else if (type == net::NetLog::TYPE_HOST_RESOLVER_IMPL) {
+ ConnectJobToRecordMap::iterator it =
+ connect_job_to_record_.find(source.id);
+ if (it != connect_job_to_record_.end()) {
+ if (is_begin)
+ it->second.dns_start = time;
+ else if (is_end)
+ it->second.dns_end = time;
+ }
+ }
+}
diff --git a/chrome/browser/net/load_timing_observer.h b/chrome/browser/net/load_timing_observer.h
new file mode 100644
index 0000000..b49de5d
--- /dev/null
+++ b/chrome/browser/net/load_timing_observer.h
@@ -0,0 +1,68 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_NET_LOAD_TIMING_OBSERVER_H_
+#define CHROME_BROWSER_NET_LOAD_TIMING_OBSERVER_H_
+
+#include "base/hash_tables.h"
+#include "base/time.h"
+#include "chrome/browser/net/chrome_net_log.h"
+#include "net/base/net_log.h"
+#include "webkit/glue/resource_loader_bridge.h"
+
+// LoadTimingObserver watches the NetLog event stream and collects the network
+// timing information.
+class LoadTimingObserver : public ChromeNetLog::Observer {
+ public:
+ struct URLRequestRecord {
+ URLRequestRecord();
+
+ webkit_glue::ResourceLoaderBridge::LoadTimingInfo timing;
+ uint32 connect_job_id;
+ uint32 socket_log_id;
+ bool socket_reused;
+ base::TimeTicks base_ticks;
+ };
+
+ struct ConnectJobRecord {
+ base::TimeTicks dns_start;
+ base::TimeTicks dns_end;
+ };
+
+ LoadTimingObserver();
+ ~LoadTimingObserver();
+
+ URLRequestRecord* GetURLRequestRecord(uint32 source_id);
+
+ // Observer implementation:
+ virtual void OnAddEntry(net::NetLog::EventType type,
+ const base::TimeTicks& time,
+ const net::NetLog::Source& source,
+ net::NetLog::EventPhase phase,
+ net::NetLog::EventParameters* params);
+ private:
+ void OnAddURLRequestEntry(net::NetLog::EventType type,
+ const base::TimeTicks& time,
+ const net::NetLog::Source& source,
+ net::NetLog::EventPhase phase,
+ net::NetLog::EventParameters* params);
+
+ void OnAddConnectJobEntry(net::NetLog::EventType type,
+ const base::TimeTicks& time,
+ const net::NetLog::Source& source,
+ net::NetLog::EventPhase phase,
+ net::NetLog::EventParameters* params);
+
+ URLRequestRecord* CreateURLRequestRecord(uint32 source_id);
+ void DeleteURLRequestRecord(uint32 source_id);
+
+ typedef base::hash_map<uint32, URLRequestRecord> URLRequestToRecordMap;
+ typedef base::hash_map<uint32, ConnectJobRecord> ConnectJobToRecordMap;
+ URLRequestToRecordMap url_request_to_record_;
+ ConnectJobToRecordMap connect_job_to_record_;
+
+ DISALLOW_COPY_AND_ASSIGN(LoadTimingObserver);
+};
+
+#endif // CHROME_BROWSER_NET_LOAD_TIMING_OBSERVER_H_
diff --git a/chrome/browser/net/passive_log_collector.cc b/chrome/browser/net/passive_log_collector.cc
index c96c3a7..152d332 100644
--- a/chrome/browser/net/passive_log_collector.cc
+++ b/chrome/browser/net/passive_log_collector.cc
@@ -241,14 +241,6 @@ void PassiveLogCollector::SourceTracker::AppendAllEntries(
}
}
-PassiveLogCollector::SourceInfo*
-PassiveLogCollector::SourceTracker::GetSourceInfo(uint32 source_id) {
- SourceIDToInfoMap::iterator it = sources_.find(source_id);
- if (it != sources_.end())
- return &(it->second);
- return NULL;
-}
-
void PassiveLogCollector::SourceTracker::AddToDeletionQueue(
uint32 source_id) {
DCHECK(sources_.find(source_id) != sources_.end());
diff --git a/chrome/browser/net/passive_log_collector.h b/chrome/browser/net/passive_log_collector.h
index 316aada1..7615b28 100644
--- a/chrome/browser/net/passive_log_collector.h
+++ b/chrome/browser/net/passive_log_collector.h
@@ -142,9 +142,6 @@ class PassiveLogCollector : public ChromeNetLog::Observer {
virtual void Clear();
virtual void AppendAllEntries(EntryList* out) const;
- // Finds a source info with given id.
- SourceInfo* GetSourceInfo(uint32 id);
-
#ifdef UNIT_TEST
// Helper used to inspect the current state by unit-tests.
// Retuns a copy of the source infos held by the tracker.
diff --git a/chrome/browser/renderer_host/async_resource_handler.cc b/chrome/browser/renderer_host/async_resource_handler.cc
index 3ab36a2..bbcb89e 100644
--- a/chrome/browser/renderer_host/async_resource_handler.cc
+++ b/chrome/browser/renderer_host/async_resource_handler.cc
@@ -4,13 +4,14 @@
#include "chrome/browser/renderer_host/async_resource_handler.h"
+#include "base/hash_tables.h"
#include "base/logging.h"
#include "base/process.h"
#include "base/shared_memory.h"
#include "chrome/browser/browser_process.h"
#include "chrome/browser/net/chrome_net_log.h"
#include "chrome/browser/net/chrome_url_request_context.h"
-#include "chrome/browser/net/passive_log_collector.h"
+#include "chrome/browser/net/load_timing_observer.h"
#include "chrome/browser/renderer_host/global_request_id.h"
#include "chrome/browser/renderer_host/resource_dispatcher_host_request_info.h"
#include "chrome/common/render_messages.h"
@@ -35,22 +36,6 @@ const int kInitialReadBufSize = 32768;
// The maximum size of the shared memory buffer. (512 kilobytes).
const int kMaxReadBufSize = 524288;
-// We know that this conversion is not solid and suffers from world clock
-// changes, but it should be good enough for the load timing info.
-static Time TimeTicksToTime(const TimeTicks& time_ticks) {
- static int64 tick_to_time_offset;
- static bool tick_to_time_offset_available = false;
- if (!tick_to_time_offset_available) {
- int64 cur_time = (Time::Now() - Time()).InMicroseconds();
- int64 cur_time_ticks = (TimeTicks::Now() - TimeTicks()).InMicroseconds();
- // If we add this number to a time tick value, it gives the timestamp.
- tick_to_time_offset = cur_time - cur_time_ticks;
- tick_to_time_offset_available = true;
- }
- return Time::FromInternalValue(time_ticks.ToInternalValue() +
- tick_to_time_offset);
-}
-
} // namespace
// Our version of IOBuffer that uses shared memory.
@@ -108,135 +93,22 @@ AsyncResourceHandler::~AsyncResourceHandler() {
void AsyncResourceHandler::PopulateTimingInfo(URLRequest* request,
ResourceResponse* response) {
- uint32 source_id = request->net_log().source().id;
- ChromeNetLog* chrome_net_log = static_cast<ChromeNetLog*>(
- request->net_log().net_log());
-
- PassiveLogCollector* collector = chrome_net_log->passive_collector();
- PassiveLogCollector::SourceTracker* url_tracker =
- static_cast<PassiveLogCollector::SourceTracker*>(collector->
- GetTrackerForSourceType(net::NetLog::SOURCE_URL_REQUEST));
-
- PassiveLogCollector::SourceInfo* url_request =
- url_tracker->GetSourceInfo(source_id);
-
- if (!url_request)
+ if (!(request->load_flags() & net::LOAD_ENABLE_LOAD_TIMING))
return;
- ResourceResponseHead& response_head = response->response_head;
- webkit_glue::ResourceLoaderBridge::LoadTimingInfo& timing =
- response_head.load_timing;
-
- uint32 connect_job_id = net::NetLog::Source::kInvalidId;
-
- base::TimeTicks base_time;
-
- for (PassiveLogCollector::EntryList::const_iterator it =
- url_request->entries.begin();
- it != url_request->entries.end(); ++it) {
- const PassiveLogCollector::Entry& entry = *it;
-
- bool is_begin = entry.phase == net::NetLog::PHASE_BEGIN;
- bool is_end = entry.phase == net::NetLog::PHASE_END;
-
- switch (entry.type) {
- case net::NetLog::TYPE_URL_REQUEST_START_JOB:
- if (is_begin) {
- // Reset state so that we captured last redirect only.
- timing.base_time = TimeTicksToTime(entry.time);
- base_time = entry.time;
- connect_job_id = net::NetLog::Source::kInvalidId;
- }
- break;
- case net::NetLog::TYPE_PROXY_SERVICE:
- if (is_begin) {
- timing.proxy_start = static_cast<int32>(
- (entry.time - base_time).InMillisecondsRoundedUp());
- } else if (is_end) {
- timing.proxy_end = static_cast<int32>(
- (entry.time - base_time).InMillisecondsRoundedUp());
- }
- break;
- case net::NetLog::TYPE_SOCKET_POOL:
- if (is_begin) {
- timing.connect_start = static_cast<int32>(
- (entry.time - base_time).InMillisecondsRoundedUp());
- } else if (is_end &&
- connect_job_id != net::NetLog::Source::kInvalidId) {
- timing.connect_end = static_cast<int32>(
- (entry.time - base_time).InMillisecondsRoundedUp());
- }
- break;
- case net::NetLog::TYPE_SOCKET_POOL_BOUND_TO_CONNECT_JOB:
- connect_job_id = static_cast<net::NetLogSourceParameter*>(
- entry.params.get())->value().id;
- break;
- case net::NetLog::TYPE_SOCKET_POOL_BOUND_TO_SOCKET:
- {
- uint32 log_id = static_cast<net::NetLogSourceParameter*>(
- entry.params.get())->value().id;
- response->response_head.connection_id =
- log_id != net::NetLog::Source::kInvalidId ? log_id : 0;
- }
- break;
- case net::NetLog::TYPE_HTTP_TRANSACTION_SEND_REQUEST:
- case net::NetLog::TYPE_SPDY_TRANSACTION_SEND_REQUEST:
- if (is_begin) {
- timing.send_start = static_cast<int32>(
- (entry.time - base_time).InMillisecondsRoundedUp());
- } else if (is_end) {
- timing.send_end = static_cast<int32>(
- (entry.time - base_time).InMillisecondsRoundedUp());
- }
- break;
- case net::NetLog::TYPE_HTTP_TRANSACTION_READ_HEADERS:
- case net::NetLog::TYPE_SPDY_TRANSACTION_READ_HEADERS:
- if (is_begin) {
- timing.receive_headers_start = static_cast<int32>(
- (entry.time - base_time).InMillisecondsRoundedUp());
- } else if (is_end) {
- timing.receive_headers_end = static_cast<int32>(
- (entry.time - base_time).InMillisecondsRoundedUp());
- }
- break;
- default:
- break;
- }
- }
-
- // For DNS time, get the ID of the "connect job" from the
- // BOUND_TO_CONNECT_JOB entry, in its source info look at the
- // HOST_RESOLVER_IMPL times.
- if (connect_job_id == net::NetLog::Source::kInvalidId) {
- // Clean up connection time to match contract.
- timing.connect_start = -1;
- timing.connect_end = -1;
- return;
- }
-
- PassiveLogCollector::SourceTracker* connect_job_tracker =
- static_cast<PassiveLogCollector::SourceTracker*>(
- collector->GetTrackerForSourceType(net::NetLog::SOURCE_CONNECT_JOB));
- PassiveLogCollector::SourceInfo* connect_job =
- connect_job_tracker->GetSourceInfo(connect_job_id);
- if (!connect_job)
+ ChromeNetLog* chrome_net_log = static_cast<ChromeNetLog*>(
+ request->net_log().net_log());
+ if (chrome_net_log == NULL)
return;
- for (PassiveLogCollector::EntryList::const_iterator it =
- connect_job->entries.begin();
- it != connect_job->entries.end(); ++it) {
- const PassiveLogCollector::Entry& entry = *it;
- if (entry.phase == net::NetLog::PHASE_BEGIN &&
- entry.type == net::NetLog::TYPE_HOST_RESOLVER_IMPL) {
- timing.dns_start = static_cast<int32>(
- (entry.time - base_time).InMillisecondsRoundedUp());
- } else if (entry.phase == net::NetLog::PHASE_END &&
- entry.type == net::NetLog::TYPE_HOST_RESOLVER_IMPL) {
- timing.dns_end = static_cast<int32>(
- (entry.time - base_time).InMillisecondsRoundedUp());
- // Connect time already includes dns time, subtract it here.
- break;
- }
+ uint32 source_id = request->net_log().source().id;
+ LoadTimingObserver* observer = chrome_net_log->load_timing_observer();
+ LoadTimingObserver::URLRequestRecord* record =
+ observer->GetURLRequestRecord(source_id);
+ if (record) {
+ response->response_head.connection_id = record->socket_log_id;
+ response->response_head.connection_reused = record->socket_reused;
+ response->response_head.load_timing = record->timing;
}
}
@@ -255,9 +127,7 @@ bool AsyncResourceHandler::OnRequestRedirected(int request_id,
*defer = true;
URLRequest* request = rdh_->GetURLRequest(
GlobalRequestID(process_id_, request_id));
- // TODO(pfeldman): enable once migrated to LoadTimingObserver.
- if (false && request && (request->load_flags() & net::LOAD_ENABLE_LOAD_TIMING))
- PopulateTimingInfo(request, response);
+ PopulateTimingInfo(request, response);
return receiver_->Send(new ViewMsg_Resource_ReceivedRedirect(
routing_id_, request_id, new_url, response->response_head));
}
@@ -272,9 +142,7 @@ bool AsyncResourceHandler::OnResponseStarted(int request_id,
URLRequest* request = rdh_->GetURLRequest(
GlobalRequestID(process_id_, request_id));
- // TODO(pfeldman): enable once migrated to LoadTimingObserver.
- if (false && request->load_flags() & net::LOAD_ENABLE_LOAD_TIMING)
- PopulateTimingInfo(request, response);
+ PopulateTimingInfo(request, response);
ResourceDispatcherHostRequestInfo* info = rdh_->InfoForRequest(request);
if (info->resource_type() == ResourceType::MAIN_FRAME) {
diff --git a/chrome/chrome_browser.gypi b/chrome/chrome_browser.gypi
index d9175e6..1d77ffd 100644
--- a/chrome/chrome_browser.gypi
+++ b/chrome/chrome_browser.gypi
@@ -1848,6 +1848,8 @@
'browser/net/connection_tester.h',
'browser/net/gaia/token_service.cc',
'browser/net/gaia/token_service.h',
+ 'browser/net/load_timing_observer.cc',
+ 'browser/net/load_timing_observer.h',
'browser/net/metadata_url_request.cc',
'browser/net/metadata_url_request.h',
'browser/net/passive_log_collector.cc',
diff --git a/chrome/common/render_messages.h b/chrome/common/render_messages.h
index 3a419cc2..826703e 100644
--- a/chrome/common/render_messages.h
+++ b/chrome/common/render_messages.h
@@ -1489,6 +1489,7 @@ struct ParamTraits<webkit_glue::ResourceLoaderBridge::ResponseInfo> {
WriteParam(m, p.appcache_id);
WriteParam(m, p.appcache_manifest_url);
WriteParam(m, p.connection_id);
+ WriteParam(m, p.connection_reused);
WriteParam(m, p.load_timing);
WriteParam(m, p.was_fetched_via_spdy);
WriteParam(m, p.was_npn_negotiated);
@@ -1507,6 +1508,7 @@ struct ParamTraits<webkit_glue::ResourceLoaderBridge::ResponseInfo> {
ReadParam(m, iter, &r->appcache_id) &&
ReadParam(m, iter, &r->appcache_manifest_url) &&
ReadParam(m, iter, &r->connection_id) &&
+ ReadParam(m, iter, &r->connection_reused) &&
ReadParam(m, iter, &r->load_timing) &&
ReadParam(m, iter, &r->was_fetched_via_spdy) &&
ReadParam(m, iter, &r->was_npn_negotiated) &&
@@ -1535,6 +1537,8 @@ struct ParamTraits<webkit_glue::ResourceLoaderBridge::ResponseInfo> {
l->append(L", ");
LogParam(p.connection_id, l);
l->append(L", ");
+ LogParam(p.connection_reused, l);
+ l->append(L", ");
LogParam(p.load_timing, l);
l->append(L", ");
LogParam(p.was_fetched_via_spdy, l);