summaryrefslogtreecommitdiffstats
path: root/chrome
diff options
context:
space:
mode:
Diffstat (limited to 'chrome')
-rw-r--r--chrome/browser/io_thread.cc7
-rw-r--r--chrome/browser/io_thread.h2
-rw-r--r--chrome/browser/net/chrome_net_log.cc53
-rw-r--r--chrome/browser/net/chrome_net_log.h54
-rw-r--r--chrome/browser/net/chrome_url_request_context.cc13
-rw-r--r--chrome/browser/net/chrome_url_request_context.h7
-rw-r--r--chrome/browser/net/dns_master.cc3
-rw-r--r--chrome/browser/net/passive_log_collector.cc318
-rw-r--r--chrome/browser/net/passive_log_collector.h138
-rw-r--r--chrome/browser/net/passive_log_collector_unittest.cc213
-rw-r--r--chrome/browser/net/view_net_internals_job_factory.cc101
-rw-r--r--chrome/browser/sync/notifier/communicator/ssl_socket_adapter.cc2
-rw-r--r--chrome/browser/sync/notifier/communicator/ssl_socket_adapter.h4
-rwxr-xr-xchrome/chrome_browser.gypi4
-rw-r--r--chrome/chrome_tests.gypi1
15 files changed, 861 insertions, 59 deletions
diff --git a/chrome/browser/io_thread.cc b/chrome/browser/io_thread.cc
index cda63ef..2ea8195 100644
--- a/chrome/browser/io_thread.cc
+++ b/chrome/browser/io_thread.cc
@@ -8,7 +8,9 @@
#include "base/logging.h"
#include "chrome/browser/browser_process.h"
#include "chrome/browser/chrome_thread.h"
+#include "chrome/browser/net/chrome_net_log.h"
#include "chrome/browser/net/dns_global.h"
+#include "chrome/browser/net/passive_log_collector.h"
#include "chrome/browser/net/url_fetcher.h"
#include "chrome/common/chrome_switches.h"
#include "net/base/mapped_host_resolver.h"
@@ -124,6 +126,7 @@ void IOThread::Init() {
DCHECK(!globals_);
globals_ = new Globals;
+ globals_->net_log.reset(new ChromeNetLog());
globals_->network_change_notifier.reset(
net::NetworkChangeNotifier::CreateDefaultNetworkChangeNotifier());
globals_->host_resolver =
@@ -241,4 +244,8 @@ void IOThread::ChangedToOnTheRecordOnIOThread() {
if (host_cache)
host_cache->clear();
}
+ // Clear all of the passively logged data.
+ // TODO(eroman): this is a bit heavy handed, really all we need to do is
+ // clear the data pertaining to off the record context.
+ globals_->net_log->passive_collector()->Clear();
}
diff --git a/chrome/browser/io_thread.h b/chrome/browser/io_thread.h
index 6de1bae..404f69e 100644
--- a/chrome/browser/io_thread.h
+++ b/chrome/browser/io_thread.h
@@ -13,6 +13,7 @@
#include "chrome/common/net/dns.h"
#include "net/base/host_resolver.h"
+class ChromeNetLog;
class ListValue;
namespace chrome_browser_net {
@@ -27,6 +28,7 @@ class NetworkChangeNotifier;
class IOThread : public BrowserProcessSubThread {
public:
struct Globals {
+ scoped_ptr<ChromeNetLog> net_log;
scoped_ptr<net::NetworkChangeNotifier> network_change_notifier;
// TODO(willchan): Stop reference counting HostResolver. It's owned by
// IOThread now.
diff --git a/chrome/browser/net/chrome_net_log.cc b/chrome/browser/net/chrome_net_log.cc
new file mode 100644
index 0000000..6abf58a
--- /dev/null
+++ b/chrome/browser/net/chrome_net_log.cc
@@ -0,0 +1,53 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/net/chrome_net_log.h"
+
+#include <algorithm>
+
+#include "base/logging.h"
+#include "base/string_util.h"
+#include "chrome/browser/chrome_thread.h"
+#include "chrome/browser/net/passive_log_collector.h"
+
+ChromeNetLog::ChromeNetLog()
+ : next_id_(0),
+ passive_collector_(new PassiveLogCollector) {
+ DCHECK(ChromeThread::CurrentlyOn(ChromeThread::IO));
+ AddObserver(passive_collector_.get());
+}
+
+ChromeNetLog::~ChromeNetLog() {
+ DCHECK(ChromeThread::CurrentlyOn(ChromeThread::IO));
+ RemoveObserver(passive_collector_.get());
+}
+
+void ChromeNetLog::AddEntry(const Entry& entry) {
+ DCHECK(ChromeThread::CurrentlyOn(ChromeThread::IO));
+
+ // Notify all of the log observers.
+ FOR_EACH_OBSERVER(Observer, observers_, OnAddEntry(entry));
+}
+
+int ChromeNetLog::NextID() {
+ DCHECK(ChromeThread::CurrentlyOn(ChromeThread::IO));
+ return next_id_++;
+}
+
+bool ChromeNetLog::HasListener() const {
+ DCHECK(ChromeThread::CurrentlyOn(ChromeThread::IO));
+ // TODO(eroman): Hack to get refactor working.
+ return passive_collector_->url_request_tracker()->IsUnbounded();
+}
+
+void ChromeNetLog::AddObserver(Observer* observer) {
+ DCHECK(ChromeThread::CurrentlyOn(ChromeThread::IO));
+ observers_.AddObserver(observer);
+}
+
+void ChromeNetLog::RemoveObserver(Observer* observer) {
+ DCHECK(ChromeThread::CurrentlyOn(ChromeThread::IO));
+ observers_.RemoveObserver(observer);
+}
+
diff --git a/chrome/browser/net/chrome_net_log.h b/chrome/browser/net/chrome_net_log.h
new file mode 100644
index 0000000..d27c8d0
--- /dev/null
+++ b/chrome/browser/net/chrome_net_log.h
@@ -0,0 +1,54 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_NET_CHROME_NET_LOG_H_
+#define CHROME_BROWSER_NET_CHROME_NET_LOG_H_
+
+#include "base/observer_list.h"
+#include "net/base/net_log.h"
+
+class PassiveLogCollector;
+
+// ChromeNetLog is an implementation of NetLog that dispatches network log
+// messages to a list of observers.
+//
+// By default, ChromeNetLog will attach the observer PassiveLogCollector which
+// will keep track of recent request information (which used when displaying
+// the about:net-internals page).
+//
+// TODO(eroman): Move this default observer out of ChromeNetLog.
+//
+class ChromeNetLog : public net::NetLog {
+ public:
+ // Interface for observing the events logged by the network stack.
+ class Observer {
+ public:
+ virtual ~Observer() {}
+ virtual void OnAddEntry(const Entry& entry) = 0;
+ };
+
+ ChromeNetLog();
+ ~ChromeNetLog();
+
+ // NetLog implementation:
+ virtual void AddEntry(const Entry& entry);
+ virtual int NextID();
+ virtual bool HasListener() const;
+
+ void AddObserver(Observer* observer);
+ void RemoveObserver(Observer* observer);
+
+ PassiveLogCollector* passive_collector() {
+ return passive_collector_.get();
+ }
+
+ private:
+ int next_id_;
+ scoped_ptr<PassiveLogCollector> passive_collector_;
+ ObserverList<Observer, true> observers_;
+
+ DISALLOW_COPY_AND_ASSIGN(ChromeNetLog);
+};
+
+#endif // CHROME_BROWSER_NET_CHROME_NET_LOG_H_
diff --git a/chrome/browser/net/chrome_url_request_context.cc b/chrome/browser/net/chrome_url_request_context.cc
index cc19e5b..7ad6ecf 100644
--- a/chrome/browser/net/chrome_url_request_context.cc
+++ b/chrome/browser/net/chrome_url_request_context.cc
@@ -11,6 +11,7 @@
#include "chrome/browser/extensions/extensions_service.h"
#include "chrome/browser/extensions/user_script_master.h"
#include "chrome/browser/io_thread.h"
+#include "chrome/browser/net/chrome_net_log.h"
#include "chrome/browser/net/sqlite_persistent_cookie_store.h"
#include "chrome/browser/net/dns_global.h"
#include "chrome/browser/privacy_blacklist/blacklist.h"
@@ -201,6 +202,7 @@ ChromeURLRequestContext* FactoryForOriginal::Create() {
net::SetURLRequestContextForOCSP(context);
#endif
+ context->set_net_log(io_thread()->globals()->net_log.get());
return context;
}
@@ -293,6 +295,7 @@ ChromeURLRequestContext* FactoryForOffTheRecord::Create() {
context->set_appcache_service(
new ChromeAppCacheService(profile_dir_path_, context));
+ context->set_net_log(io_thread()->globals()->net_log.get());
return context;
}
@@ -595,9 +598,6 @@ void ChromeURLRequestContextGetter::GetCookieStoreAsyncHelper(
ChromeURLRequestContext::ChromeURLRequestContext() {
CheckCurrentlyOnIOThread();
-
- url_request_tracker()->SetGraveyardFilter(
- &ChromeURLRequestContext::ShouldTrackRequest);
}
ChromeURLRequestContext::~ChromeURLRequestContext() {
@@ -752,6 +752,7 @@ ChromeURLRequestContext::ChromeURLRequestContext(
CheckCurrentlyOnIOThread();
// Set URLRequestContext members
+ net_log_ = other->net_log_;
host_resolver_ = other->host_resolver_;
proxy_service_ = other->proxy_service_;
ssl_config_service_ = other->ssl_config_service_;
@@ -798,12 +799,6 @@ void ChromeURLRequestContext::OnDefaultCharsetChange(
net::HttpUtil::GenerateAcceptCharsetHeader(default_charset);
}
-// static
-bool ChromeURLRequestContext::ShouldTrackRequest(const GURL& url) {
- // Exclude "chrome://" URLs from our recent requests circular buffer.
- return !url.SchemeIs("chrome");
-}
-
// ----------------------------------------------------------------------------
// ChromeURLRequestContextFactory
// ----------------------------------------------------------------------------
diff --git a/chrome/browser/net/chrome_url_request_context.h b/chrome/browser/net/chrome_url_request_context.h
index 8f3c260..7e44d04 100644
--- a/chrome/browser/net/chrome_url_request_context.h
+++ b/chrome/browser/net/chrome_url_request_context.h
@@ -201,6 +201,9 @@ class ChromeURLRequestContext : public URLRequestContext {
void set_appcache_service(ChromeAppCacheService* service) {
appcache_service_ = service;
}
+ void set_net_log(net::NetLog* net_log) {
+ net_log_ = net_log;
+ }
// Callback for when the accept language changes.
void OnAcceptLanguageChange(const std::string& accept_language);
@@ -230,10 +233,6 @@ class ChromeURLRequestContext : public URLRequestContext {
bool InterceptCookie(const URLRequest* request,
const std::string& cookie) const;
- // Filter for url_request_tracker(), that prevents "chrome://" requests from
- // being tracked by "about:net-internals".
- static bool ShouldTrackRequest(const GURL& url);
-
DISALLOW_COPY_AND_ASSIGN(ChromeURLRequestContext);
};
diff --git a/chrome/browser/net/dns_master.cc b/chrome/browser/net/dns_master.cc
index 9ac9a0a..431842d 100644
--- a/chrome/browser/net/dns_master.cc
+++ b/chrome/browser/net/dns_master.cc
@@ -18,6 +18,7 @@
#include "net/base/completion_callback.h"
#include "net/base/host_resolver.h"
#include "net/base/net_errors.h"
+#include "net/base/net_log.h"
using base::TimeDelta;
@@ -48,7 +49,7 @@ class DnsMaster::LookupRequest {
// lets the HostResolver know it can de-prioritize it.
resolve_info.set_is_speculative(true);
return resolver_.Resolve(
- resolve_info, &addresses_, &net_callback_, NULL);
+ resolve_info, &addresses_, &net_callback_, net::BoundNetLog());
}
private:
diff --git a/chrome/browser/net/passive_log_collector.cc b/chrome/browser/net/passive_log_collector.cc
new file mode 100644
index 0000000..456ae1b
--- /dev/null
+++ b/chrome/browser/net/passive_log_collector.cc
@@ -0,0 +1,318 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/net/passive_log_collector.h"
+
+#include <algorithm>
+
+#include "base/string_util.h"
+#include "chrome/browser/chrome_thread.h"
+
+namespace {
+const size_t kMaxNumEntriesPerLog = 50;
+const size_t kMaxConnectJobGraveyardSize = 3;
+const size_t kMaxRequestGraveyardSize = 25;
+const size_t kMaxLiveRequests = 200;
+
+// Sort function on source ID.
+bool OrderBySourceID(const PassiveLogCollector::RequestInfo& a,
+ const PassiveLogCollector::RequestInfo& b) {
+ return a.entries[0].source.id < b.entries[0].source.id;
+}
+
+void AddEntryToRequestInfo(const net::NetLog::Entry& entry,
+ bool is_unbounded,
+ PassiveLogCollector::RequestInfo* out_info) {
+ // Start dropping new entries when the log has gotten too big.
+ if (out_info->entries.size() + 1 <= kMaxNumEntriesPerLog || is_unbounded) {
+ out_info->entries.push_back(entry);
+ } else {
+ out_info->num_entries_truncated += 1;
+ out_info->entries[kMaxNumEntriesPerLog - 1] = entry;
+ }
+}
+
+void AppendToRequestInfo(const PassiveLogCollector::RequestInfo& info,
+ bool is_unbounded,
+ PassiveLogCollector::RequestInfo* out_info) {
+ for (size_t i = 0; i < info.entries.size(); ++i)
+ AddEntryToRequestInfo(info.entries[i], is_unbounded, out_info);
+}
+
+} // namespace
+
+//----------------------------------------------------------------------------
+// PassiveLogCollector
+//----------------------------------------------------------------------------
+
+PassiveLogCollector::PassiveLogCollector()
+ : url_request_tracker_(&connect_job_tracker_),
+ socket_stream_tracker_(&connect_job_tracker_) {
+}
+
+PassiveLogCollector::~PassiveLogCollector() {
+}
+
+void PassiveLogCollector::OnAddEntry(const net::NetLog::Entry& entry) {
+ switch (entry.source.type) {
+ case net::NetLog::SOURCE_URL_REQUEST:
+ url_request_tracker_.OnAddEntry(entry);
+ break;
+ case net::NetLog::SOURCE_SOCKET_STREAM:
+ socket_stream_tracker_.OnAddEntry(entry);
+ break;
+ case net::NetLog::SOURCE_CONNECT_JOB:
+ connect_job_tracker_.OnAddEntry(entry);
+ break;
+ default:
+ // Drop all other logged events.
+ break;
+ }
+}
+
+void PassiveLogCollector::Clear() {
+ connect_job_tracker_.Clear();
+ url_request_tracker_.Clear();
+ socket_stream_tracker_.Clear();
+}
+
+//----------------------------------------------------------------------------
+// RequestTrackerBase
+//----------------------------------------------------------------------------
+
+PassiveLogCollector::RequestTrackerBase::RequestTrackerBase(
+ size_t max_graveyard_size)
+ : max_graveyard_size_(max_graveyard_size),
+ next_graveyard_index_(0),
+ is_unbounded_(false) {
+}
+
+void PassiveLogCollector::RequestTrackerBase::OnAddEntry(
+ const net::NetLog::Entry& entry) {
+ RequestInfo& info = live_requests_[entry.source.id];
+ Action result = DoAddEntry(entry, &info);
+
+ switch (result) {
+ case ACTION_MOVE_TO_GRAVEYARD:
+ InsertIntoGraveyard(info);
+ // (fall-through)
+ case ACTION_DELETE:
+ RemoveFromLiveRequests(info);
+ break;
+ default:
+ break;
+ }
+
+ if (live_requests_.size() > kMaxLiveRequests) {
+ // This is a safety net in case something went wrong, to avoid continually
+ // growing memory.
+ LOG(WARNING) << "The passive log data has grown larger "
+ "than expected, resetting";
+ live_requests_.clear();
+ }
+}
+
+PassiveLogCollector::RequestInfoList
+PassiveLogCollector::RequestTrackerBase::GetLiveRequests() const {
+ RequestInfoList list;
+
+ // Copy all of the live requests into the vector.
+ for (SourceIDToInfoMap::const_iterator it = live_requests_.begin();
+ it != live_requests_.end();
+ ++it) {
+ list.push_back(it->second);
+ }
+
+ std::sort(list.begin(), list.end(), OrderBySourceID);
+ return list;
+}
+
+void PassiveLogCollector::RequestTrackerBase::ClearRecentlyDeceased() {
+ next_graveyard_index_ = 0;
+ graveyard_.clear();
+}
+
+// Returns a list of recently completed Requests.
+PassiveLogCollector::RequestInfoList
+PassiveLogCollector::RequestTrackerBase::GetRecentlyDeceased() const {
+ RequestInfoList list;
+
+ // Copy the items from |graveyard_| (our circular queue of recently
+ // deceased request infos) into a vector, ordered from oldest to newest.
+ for (size_t i = 0; i < graveyard_.size(); ++i) {
+ size_t index = (next_graveyard_index_ + i) % graveyard_.size();
+ list.push_back(graveyard_[index]);
+ }
+ return list;
+}
+
+const PassiveLogCollector::RequestInfo*
+PassiveLogCollector::RequestTrackerBase::GetRequestInfoFromGraveyard(
+ int source_id) const {
+ // Scan through the graveyard to find an entry for |source_id|.
+ for (size_t i = 0; i < graveyard_.size(); ++i) {
+ if (graveyard_[i].entries[0].source.id == source_id) {
+ return &graveyard_[i];
+ }
+ }
+ return NULL;
+}
+
+void PassiveLogCollector::RequestTrackerBase::RemoveFromLiveRequests(
+ const RequestInfo& info) {
+ // Remove from |live_requests_|.
+ SourceIDToInfoMap::iterator it = live_requests_.find(
+ info.entries[0].source.id);
+ DCHECK(it != live_requests_.end());
+ live_requests_.erase(it);
+}
+
+void PassiveLogCollector::RequestTrackerBase::SetUnbounded(
+ bool unbounded) {
+ // No change.
+ if (is_unbounded_ == unbounded)
+ return;
+
+ // If we are going from unbounded to bounded, we need to trim the
+ // graveyard. For simplicity we will simply clear it.
+ if (is_unbounded_ && !unbounded)
+ ClearRecentlyDeceased();
+
+ is_unbounded_ = unbounded;
+}
+
+void PassiveLogCollector::RequestTrackerBase::Clear() {
+ ClearRecentlyDeceased();
+ live_requests_.clear();
+}
+
+void PassiveLogCollector::RequestTrackerBase::InsertIntoGraveyard(
+ const RequestInfo& info) {
+ if (is_unbounded_) {
+ graveyard_.push_back(info);
+ return;
+ }
+
+ // Otherwise enforce a bound on the graveyard size, by treating it as a
+ // circular buffer.
+ if (graveyard_.size() < max_graveyard_size_) {
+ // Still growing to maximum capacity.
+ DCHECK_EQ(next_graveyard_index_, graveyard_.size());
+ graveyard_.push_back(info);
+ } else {
+ // At maximum capacity, overwite the oldest entry.
+ graveyard_[next_graveyard_index_] = info;
+ }
+ next_graveyard_index_ = (next_graveyard_index_ + 1) % max_graveyard_size_;
+}
+
+//----------------------------------------------------------------------------
+// ConnectJobTracker
+//----------------------------------------------------------------------------
+
+const size_t PassiveLogCollector::ConnectJobTracker::kMaxGraveyardSize = 3;
+
+PassiveLogCollector::ConnectJobTracker::ConnectJobTracker()
+ : RequestTrackerBase(kMaxGraveyardSize) {
+}
+
+PassiveLogCollector::RequestTrackerBase::Action
+PassiveLogCollector::ConnectJobTracker::DoAddEntry(
+ const net::NetLog::Entry& entry,
+ RequestInfo* out_info) {
+ // Save the entry (possibly truncating).
+ AddEntryToRequestInfo(entry, is_unbounded(), out_info);
+
+ // If this is the end of the connect job, move the request to the graveyard.
+ if (entry.type == net::NetLog::Entry::TYPE_EVENT &&
+ entry.event.type == net::NetLog::TYPE_SOCKET_POOL_CONNECT_JOB &&
+ entry.event.phase == net::NetLog::PHASE_END) {
+ return ACTION_MOVE_TO_GRAVEYARD;
+ }
+
+ return ACTION_NONE;
+}
+
+//----------------------------------------------------------------------------
+// RequestTracker
+//----------------------------------------------------------------------------
+
+const size_t PassiveLogCollector::RequestTracker::kMaxGraveyardSize = 25;
+const size_t PassiveLogCollector::RequestTracker::kMaxGraveyardURLSize = 1000;
+
+PassiveLogCollector::RequestTracker::RequestTracker(
+ ConnectJobTracker* connect_job_tracker)
+ : RequestTrackerBase(kMaxGraveyardSize),
+ connect_job_tracker_(connect_job_tracker) {
+}
+
+PassiveLogCollector::RequestTrackerBase::Action
+PassiveLogCollector::RequestTracker::DoAddEntry(
+ const net::NetLog::Entry& entry,
+ RequestInfo* out_info) {
+
+ if (entry.type == net::NetLog::Entry::TYPE_EVENT &&
+ entry.event.type == net::NetLog::TYPE_SOCKET_POOL_CONNECT_JOB_ID) {
+ // If this was notification that a ConnectJob was bound to the request,
+ // copy all the logged data for that ConnectJob.
+ AddConnectJobInfo(entry, out_info);
+ } else {
+ // Otherwise just append this entry to the request info.
+ AddEntryToRequestInfo(entry, is_unbounded(), out_info);
+ }
+
+ // If this was the start of a URLRequest/SocketStream, extract the URL.
+ if (out_info->entries.size() == 1 &&
+ entry.type == net::NetLog::Entry::TYPE_EVENT &&
+ entry.event.type == net::NetLog::TYPE_REQUEST_ALIVE &&
+ entry.event.phase == net::NetLog::PHASE_BEGIN) {
+ out_info->url = entry.string;
+ out_info->entries[0].string = std::string();
+
+ // Paranoia check: truncate the URL if it is really big.
+ if (out_info->url.size() > kMaxGraveyardURLSize)
+ out_info->url = out_info->url.substr(0, kMaxGraveyardURLSize);
+ }
+
+ // If the request has ended, move it to the graveyard.
+ if (entry.type == net::NetLog::Entry::TYPE_EVENT &&
+ entry.event.type == net::NetLog::TYPE_REQUEST_ALIVE &&
+ entry.event.phase == net::NetLog::PHASE_END) {
+ if (StartsWithASCII(out_info->url, "chrome://", false)) {
+ // Avoid sending "chrome://" requests to the graveyard, since it just
+ // adds to clutter.
+ return ACTION_DELETE;
+ }
+ return ACTION_MOVE_TO_GRAVEYARD;
+ }
+
+ return ACTION_NONE;
+}
+
+void PassiveLogCollector::RequestTracker::AddConnectJobInfo(
+ const net::NetLog::Entry& entry,
+ RequestInfo* live_entry) {
+ // We have just been notified of which ConnectJob the
+ // URLRequest/SocketStream was assigned. Lookup all the data we captured
+ // for the ConnectJob, and append it to the URLRequest/SocketStream's
+ // RequestInfo.
+
+ // TODO(eroman): This should NOT be plumbed through via |error_code| !
+ int connect_job_id = entry.error_code;
+
+ const RequestInfo* connect_job_info =
+ connect_job_tracker_->GetRequestInfoFromGraveyard(connect_job_id);
+
+ if (connect_job_info) {
+ // Append the ConnectJob information we found.
+ AppendToRequestInfo(*connect_job_info, is_unbounded(), live_entry);
+ } else {
+ // If we couldn't find the information for the ConnectJob, append a
+ // generic message instead.
+ net::NetLog::Entry e(entry);
+ e.type = net::NetLog::Entry::TYPE_STRING;
+ e.string = StringPrintf("Used ConnectJob id=%d", connect_job_id);
+ AddEntryToRequestInfo(e, is_unbounded(), live_entry);
+ }
+}
diff --git a/chrome/browser/net/passive_log_collector.h b/chrome/browser/net/passive_log_collector.h
new file mode 100644
index 0000000..c8f6977
--- /dev/null
+++ b/chrome/browser/net/passive_log_collector.h
@@ -0,0 +1,138 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROME_BROWSER_NET_PASSIVE_LOG_COLLECTOR_H_
+#define CHROME_BROWSER_NET_PASSIVE_LOG_COLLECTOR_H_
+
+#include <vector>
+
+#include "base/hash_tables.h"
+#include "chrome/browser/net/chrome_net_log.h"
+#include "net/base/net_log.h"
+
+class PassiveLogCollector : public ChromeNetLog::Observer {
+ public:
+ struct RequestInfo {
+ RequestInfo() : num_entries_truncated(0) {}
+ std::string url;
+ std::vector<net::NetLog::Entry> entries;
+ size_t num_entries_truncated;
+ };
+
+ typedef std::vector<RequestInfo> RequestInfoList;
+
+ // This class stores and manages the passively logged information for
+ // URLRequests/SocketStreams/ConnectJobs.
+ class RequestTrackerBase {
+ public:
+ explicit RequestTrackerBase(size_t max_graveyard_size);
+
+ void OnAddEntry(const net::NetLog::Entry& entry);
+
+ RequestInfoList GetLiveRequests() const;
+ void ClearRecentlyDeceased();
+ RequestInfoList GetRecentlyDeceased() const;
+ void SetUnbounded(bool unbounded);
+
+ bool IsUnbounded() const { return is_unbounded_; }
+
+ void Clear();
+
+ const RequestInfo* GetRequestInfoFromGraveyard(int id) const;
+
+ protected:
+ enum Action {
+ ACTION_NONE,
+ ACTION_DELETE,
+ ACTION_MOVE_TO_GRAVEYARD,
+ };
+
+ // Updates |out_info| with the information from |entry|. Returns an action
+ // to perform for this map entry on completion.
+ virtual Action DoAddEntry(const net::NetLog::Entry& entry,
+ RequestInfo* out_info) = 0;
+
+ bool is_unbounded() const { return is_unbounded_; }
+
+ private:
+ typedef base::hash_map<int, RequestInfo> SourceIDToInfoMap;
+
+ bool HandleNotificationOfConnectJobID(const net::NetLog::Entry& entry,
+ RequestInfo* live_entry);
+
+ void RemoveFromLiveRequests(const RequestInfo& info);
+ void InsertIntoGraveyard(const RequestInfo& info);
+
+ SourceIDToInfoMap live_requests_;
+ size_t max_graveyard_size_;
+ size_t next_graveyard_index_;
+ RequestInfoList graveyard_;
+ bool is_unbounded_;
+
+ DISALLOW_COPY_AND_ASSIGN(RequestTrackerBase);
+ };
+
+ // Specialization of RequestTrackerBase for handling ConnectJobs.
+ class ConnectJobTracker : public RequestTrackerBase {
+ public:
+ static const size_t kMaxGraveyardSize;
+
+ ConnectJobTracker();
+
+ protected:
+ virtual Action DoAddEntry(const net::NetLog::Entry& entry,
+ RequestInfo* out_info);
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ConnectJobTracker);
+ };
+
+ // Specialization of RequestTrackerBase for handling URLRequest/SocketStream.
+ class RequestTracker : public RequestTrackerBase {
+ public:
+ static const size_t kMaxGraveyardSize;
+ static const size_t kMaxGraveyardURLSize;
+
+ explicit RequestTracker(ConnectJobTracker* connect_job_tracker);
+
+ protected:
+ virtual Action DoAddEntry(const net::NetLog::Entry& entry,
+ RequestInfo* out_info);
+
+ private:
+ // Searches through |connect_job_tracker_| for information on the
+ // ConnectJob specified in |entry|, and appends it to |live_entry|.
+ void AddConnectJobInfo(const net::NetLog::Entry& entry,
+ RequestInfo* live_entry);
+
+ ConnectJobTracker* connect_job_tracker_;
+
+ DISALLOW_COPY_AND_ASSIGN(RequestTracker);
+ };
+
+ PassiveLogCollector();
+ ~PassiveLogCollector();
+
+ // Observer implementation:
+ virtual void OnAddEntry(const net::NetLog::Entry& entry);
+
+ // Clears all of the passively logged data.
+ void Clear();
+
+ RequestTracker* url_request_tracker() {
+ return &url_request_tracker_;
+ }
+
+ RequestTracker* socket_stream_tracker() {
+ return &socket_stream_tracker_;
+ }
+
+ private:
+ ConnectJobTracker connect_job_tracker_;
+ RequestTracker url_request_tracker_;
+ RequestTracker socket_stream_tracker_;
+
+ DISALLOW_COPY_AND_ASSIGN(PassiveLogCollector);
+};
+
+#endif // CHROME_BROWSER_NET_PASSIVE_LOG_COLLECTOR_H_
diff --git a/chrome/browser/net/passive_log_collector_unittest.cc b/chrome/browser/net/passive_log_collector_unittest.cc
new file mode 100644
index 0000000..ae18839
--- /dev/null
+++ b/chrome/browser/net/passive_log_collector_unittest.cc
@@ -0,0 +1,213 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "chrome/browser/net/passive_log_collector.h"
+
+#include "base/compiler_specific.h"
+#include "base/format_macros.h"
+#include "base/string_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+typedef PassiveLogCollector::RequestTracker RequestTracker;
+typedef PassiveLogCollector::RequestInfoList RequestInfoList;
+
+const net::NetLog::SourceType kSourceType = net::NetLog::SOURCE_NONE;
+
+net::NetLog::Entry MakeStartLogEntryWithURL(int source_id,
+ const std::string& url) {
+ net::NetLog::Entry entry;
+ entry.source.type = kSourceType;
+ entry.source.id = source_id;
+ entry.type = net::NetLog::Entry::TYPE_EVENT;
+ entry.event = net::NetLog::Event(net::NetLog::TYPE_REQUEST_ALIVE,
+ net::NetLog::PHASE_BEGIN);
+ entry.string = url;
+ return entry;
+}
+
+net::NetLog::Entry MakeStartLogEntry(int source_id) {
+ return MakeStartLogEntryWithURL(source_id,
+ StringPrintf("http://req%d", source_id));
+}
+
+net::NetLog::Entry MakeEndLogEntry(int source_id) {
+ net::NetLog::Entry entry;
+ entry.source.type = kSourceType;
+ entry.source.id = source_id;
+ entry.type = net::NetLog::Entry::TYPE_EVENT;
+ entry.event = net::NetLog::Event(net::NetLog::TYPE_REQUEST_ALIVE,
+ net::NetLog::PHASE_END);
+ return entry;
+}
+
+static const int kMaxNumLoadLogEntries = 1;
+
+TEST(RequestTrackerTest, BasicBounded) {
+ RequestTracker tracker(NULL);
+ EXPECT_FALSE(tracker.IsUnbounded());
+ EXPECT_EQ(0u, tracker.GetLiveRequests().size());
+ EXPECT_EQ(0u, tracker.GetRecentlyDeceased().size());
+
+ tracker.OnAddEntry(MakeStartLogEntry(1));
+ tracker.OnAddEntry(MakeStartLogEntry(2));
+ tracker.OnAddEntry(MakeStartLogEntry(3));
+ tracker.OnAddEntry(MakeStartLogEntry(4));
+ tracker.OnAddEntry(MakeStartLogEntry(5));
+
+ RequestInfoList live_reqs = tracker.GetLiveRequests();
+
+ ASSERT_EQ(5u, live_reqs.size());
+ EXPECT_EQ("http://req1", live_reqs[0].url);
+ EXPECT_EQ("http://req2", live_reqs[1].url);
+ EXPECT_EQ("http://req3", live_reqs[2].url);
+ EXPECT_EQ("http://req4", live_reqs[3].url);
+ EXPECT_EQ("http://req5", live_reqs[4].url);
+
+ tracker.OnAddEntry(MakeEndLogEntry(1));
+ tracker.OnAddEntry(MakeEndLogEntry(5));
+ tracker.OnAddEntry(MakeEndLogEntry(3));
+
+ ASSERT_EQ(3u, tracker.GetRecentlyDeceased().size());
+
+ live_reqs = tracker.GetLiveRequests();
+
+ ASSERT_EQ(2u, live_reqs.size());
+ EXPECT_EQ("http://req2", live_reqs[0].url);
+ EXPECT_EQ("http://req4", live_reqs[1].url);
+}
+
+TEST(RequestTrackerTest, GraveyardBounded) {
+ RequestTracker tracker(NULL);
+ EXPECT_FALSE(tracker.IsUnbounded());
+ EXPECT_EQ(0u, tracker.GetLiveRequests().size());
+ EXPECT_EQ(0u, tracker.GetRecentlyDeceased().size());
+
+ // Add twice as many requests as will fit in the graveyard.
+ for (size_t i = 0; i < RequestTracker::kMaxGraveyardSize * 2; ++i) {
+ tracker.OnAddEntry(MakeStartLogEntry(i));
+ tracker.OnAddEntry(MakeEndLogEntry(i));
+ }
+
+ // Check that only the last |kMaxGraveyardSize| requests are in-memory.
+
+ RequestInfoList recent_reqs = tracker.GetRecentlyDeceased();
+
+ ASSERT_EQ(RequestTracker::kMaxGraveyardSize, recent_reqs.size());
+
+ for (size_t i = 0; i < RequestTracker::kMaxGraveyardSize; ++i) {
+ size_t req_number = i + RequestTracker::kMaxGraveyardSize;
+ std::string url = StringPrintf("http://req%" PRIuS, req_number);
+ EXPECT_EQ(url, recent_reqs[i].url);
+ }
+}
+
+TEST(RequestTrackerTest, GraveyardUnbounded) {
+ RequestTracker tracker(NULL);
+ EXPECT_FALSE(tracker.IsUnbounded());
+ EXPECT_EQ(0u, tracker.GetLiveRequests().size());
+ EXPECT_EQ(0u, tracker.GetRecentlyDeceased().size());
+
+ tracker.SetUnbounded(true);
+
+ EXPECT_TRUE(tracker.IsUnbounded());
+
+ // Add twice as many requests as would fit in the bounded graveyard.
+
+ size_t kMaxSize = RequestTracker::kMaxGraveyardSize * 2;
+ for (size_t i = 0; i < kMaxSize; ++i) {
+ tracker.OnAddEntry(MakeStartLogEntry(i));
+ tracker.OnAddEntry(MakeEndLogEntry(i));
+ }
+
+ // Check that all of them got saved.
+
+ RequestInfoList recent_reqs = tracker.GetRecentlyDeceased();
+
+ ASSERT_EQ(kMaxSize, recent_reqs.size());
+
+ for (size_t i = 0; i < kMaxSize; ++i) {
+ std::string url = StringPrintf("http://req%" PRIuS, i);
+ EXPECT_EQ(url, recent_reqs[i].url);
+ }
+}
+
+// Check that very long URLs are truncated.
+TEST(RequestTrackerTest, GraveyardURLBounded) {
+ RequestTracker tracker(NULL);
+ EXPECT_FALSE(tracker.IsUnbounded());
+
+ std::string big_url("http://");
+ big_url.resize(2 * RequestTracker::kMaxGraveyardURLSize, 'x');
+
+ tracker.OnAddEntry(MakeStartLogEntryWithURL(1, big_url));
+ tracker.OnAddEntry(MakeEndLogEntry(1));
+
+ ASSERT_EQ(1u, tracker.GetRecentlyDeceased().size());
+ EXPECT_EQ(RequestTracker::kMaxGraveyardURLSize,
+ tracker.GetRecentlyDeceased()[0].url.size());
+}
+
+// Check that we exclude "chrome://" URLs from being saved into the recent
+// requests list (graveyard).
+TEST(RequestTrackerTest, GraveyardIsFiltered) {
+ RequestTracker tracker(NULL);
+ EXPECT_FALSE(tracker.IsUnbounded());
+
+ // This will be excluded.
+ std::string url1 = "chrome://dontcare/";
+ tracker.OnAddEntry(MakeStartLogEntryWithURL(1, url1));
+ tracker.OnAddEntry(MakeEndLogEntry(1));
+
+ // This will be be added to graveyard.
+ std::string url2 = "chrome2://dontcare/";
+ tracker.OnAddEntry(MakeStartLogEntryWithURL(2, url2));
+ tracker.OnAddEntry(MakeEndLogEntry(2));
+
+ // This will be be added to graveyard.
+ std::string url3 = "http://foo/";
+ tracker.OnAddEntry(MakeStartLogEntryWithURL(3, url3));
+ tracker.OnAddEntry(MakeEndLogEntry(3));
+
+ ASSERT_EQ(2u, tracker.GetRecentlyDeceased().size());
+ EXPECT_EQ(url2, tracker.GetRecentlyDeceased()[0].url);
+ EXPECT_EQ(url3, tracker.GetRecentlyDeceased()[1].url);
+}
+
+// Convert an unbounded tracker back to being bounded.
+TEST(RequestTrackerTest, ConvertUnboundedToBounded) {
+ RequestTracker tracker(NULL);
+ EXPECT_FALSE(tracker.IsUnbounded());
+ EXPECT_EQ(0u, tracker.GetLiveRequests().size());
+ EXPECT_EQ(0u, tracker.GetRecentlyDeceased().size());
+
+ tracker.SetUnbounded(true);
+ EXPECT_TRUE(tracker.IsUnbounded());
+
+ // Add twice as many requests as would fit in the bounded graveyard.
+
+ size_t kMaxSize = RequestTracker::kMaxGraveyardSize * 2;
+ for (size_t i = 0; i < kMaxSize; ++i) {
+ tracker.OnAddEntry(MakeStartLogEntry(i));
+ tracker.OnAddEntry(MakeEndLogEntry(i));
+ }
+
+ // Check that all of them got saved.
+ ASSERT_EQ(kMaxSize, tracker.GetRecentlyDeceased().size());
+
+ // Now make the tracker bounded, and add more entries to its graveyard.
+ tracker.SetUnbounded(false);
+
+ kMaxSize = RequestTracker::kMaxGraveyardSize;
+ for (size_t i = kMaxSize; i < 2 * kMaxSize; ++i) {
+ tracker.OnAddEntry(MakeStartLogEntry(i));
+ tracker.OnAddEntry(MakeEndLogEntry(i));
+ }
+
+ // We should only have kMaxGraveyardSize entries now.
+ ASSERT_EQ(kMaxSize, tracker.GetRecentlyDeceased().size());
+}
+
+} // namespace
diff --git a/chrome/browser/net/view_net_internals_job_factory.cc b/chrome/browser/net/view_net_internals_job_factory.cc
index ab58dfcf..987a54b 100644
--- a/chrome/browser/net/view_net_internals_job_factory.cc
+++ b/chrome/browser/net/view_net_internals_job_factory.cc
@@ -9,11 +9,14 @@
#include "base/format_macros.h"
#include "base/stl_util-inl.h"
#include "base/string_util.h"
+#include "chrome/browser/net/chrome_net_log.h"
+#include "chrome/browser/net/chrome_url_request_context.h"
+#include "chrome/browser/net/passive_log_collector.h"
#include "chrome/common/url_constants.h"
#include "net/base/escape.h"
#include "net/base/host_resolver_impl.h"
-#include "net/base/load_log_util.h"
#include "net/base/net_errors.h"
+#include "net/base/net_log_util.h"
#include "net/base/net_util.h"
#include "net/base/sys_addrinfo.h"
#include "net/proxy/proxy_service.h"
@@ -27,6 +30,26 @@ namespace {
const char kViewHttpCacheSubPath[] = "view-cache";
+PassiveLogCollector* GetPassiveLogCollector(URLRequestContext* context) {
+ // Really this is the same as:
+ // g_browser_process->io_thread()->globals()->
+ // net_log.get()
+ // (But we can't access g_browser_process from the IO thread).
+ ChromeNetLog* chrome_net_log = static_cast<ChromeNetLog*>(
+ static_cast<ChromeURLRequestContext*>(context)->net_log());
+ return chrome_net_log->passive_collector();
+}
+
+PassiveLogCollector::RequestTracker* GetURLRequestTracker(
+ URLRequestContext* context) {
+ return GetPassiveLogCollector(context)->url_request_tracker();
+}
+
+PassiveLogCollector::RequestTracker* GetSocketStreamTracker(
+ URLRequestContext* context) {
+ return GetPassiveLogCollector(context)->socket_stream_tracker();
+}
+
std::string GetDetails(const GURL& url) {
DCHECK(ViewNetInternalsJobFactory::IsSupportedURL(url));
size_t start = strlen(chrome::kNetworkViewInternalsURL);
@@ -232,12 +255,8 @@ class ProxyServiceLastInitLogSubSection : public SubSection {
virtual void OutputBody(URLRequestContext* context, std::string* out) {
net::ProxyService* proxy_service = context->proxy_service();
- net::LoadLog* log = proxy_service->init_proxy_resolver_log();
- if (log) {
- OutputTextInPre(net::LoadLogUtil::PrettyPrintAsEventTree(log), out);
- } else {
- out->append("<i>None.</i>");
- }
+ OutputTextInPre(net::NetLogUtil::PrettyPrintAsEventTree(
+ proxy_service->init_proxy_resolver_log().entries(), 0), out);
}
};
@@ -418,15 +437,15 @@ class HostResolverTraceSubSection : public SubSection {
DrawCommandButton("Enable tracing", "hostresolver-trace-enable", out);
}
- scoped_refptr<net::LoadLog> log = resolver->GetRequestsTrace();
-
- if (log) {
+ std::vector<net::NetLog::Entry> entries;
+ if (resolver->GetRequestsTrace(&entries)) {
out->append(
"<p>To make sense of this trace, process it with the Python script "
"formatter.py at "
"<a href='http://src.chromium.org/viewvc/chrome/trunk/src/net/tools/"
"dns_trace_formatter/'>net/tools/dns_trace_formatter</a></p>");
- OutputTextInPre(net::LoadLogUtil::PrettyPrintAsEventTree(log), out);
+ OutputTextInPre(net::NetLogUtil::PrettyPrintAsEventTree(entries, 0),
+ out);
} else {
out->append("<p><i>No trace information, must enable tracing.</i></p>");
}
@@ -443,15 +462,17 @@ class HostResolverSubSection : public SubSection {
};
// Helper for the URLRequest "outstanding" and "live" sections.
-void OutputURLAndLoadLog(const GURL& url,
- const net::LoadLog* log,
+void OutputURLAndLoadLog(const PassiveLogCollector::RequestInfo& request,
std::string* out) {
out->append("<li>");
out->append("<nobr>");
- out->append(EscapeForHTML(url.possibly_invalid_spec()));
+ out->append(EscapeForHTML(request.url));
out->append("</nobr>");
- if (log)
- OutputTextInPre(net::LoadLogUtil::PrettyPrintAsEventTree(log), out);
+ OutputTextInPre(
+ net::NetLogUtil::PrettyPrintAsEventTree(
+ request.entries,
+ request.num_entries_truncated),
+ out);
out->append("</li>");
}
@@ -462,16 +483,14 @@ class URLRequestLiveSubSection : public SubSection {
}
virtual void OutputBody(URLRequestContext* context, std::string* out) {
- std::vector<URLRequest*> requests =
- context->url_request_tracker()->GetLiveRequests();
+ PassiveLogCollector::RequestInfoList requests =
+ GetURLRequestTracker(context)->GetLiveRequests();
out->append("<ol>");
for (size_t i = 0; i < requests.size(); ++i) {
// Reverse the list order, so we dispay from most recent to oldest.
size_t index = requests.size() - i - 1;
- OutputURLAndLoadLog(requests[index]->original_url(),
- requests[index]->load_log(),
- out);
+ OutputURLAndLoadLog(requests[index], out);
}
out->append("</ol>");
}
@@ -484,8 +503,8 @@ class URLRequestRecentSubSection : public SubSection {
}
virtual void OutputBody(URLRequestContext* context, std::string* out) {
- RequestTracker<URLRequest>::RecentRequestInfoList recent =
- context->url_request_tracker()->GetRecentlyDeceased();
+ PassiveLogCollector::RequestInfoList recent =
+ GetURLRequestTracker(context)->GetRecentlyDeceased();
DrawCommandButton("Clear", "clear-urlrequest-graveyard", out);
@@ -493,8 +512,7 @@ class URLRequestRecentSubSection : public SubSection {
for (size_t i = 0; i < recent.size(); ++i) {
// Reverse the list order, so we dispay from most recent to oldest.
size_t index = recent.size() - i - 1;
- OutputURLAndLoadLog(recent[index].original_url,
- recent[index].load_log, out);
+ OutputURLAndLoadLog(recent[index], out);
}
out->append("</ol>");
}
@@ -542,16 +560,14 @@ class SocketStreamLiveSubSection : public SubSection {
}
virtual void OutputBody(URLRequestContext* context, std::string* out) {
- std::vector<net::SocketStream*> sockets =
- context->socket_stream_tracker()->GetLiveRequests();
+ PassiveLogCollector::RequestInfoList sockets =
+ GetSocketStreamTracker(context)->GetLiveRequests();
out->append("<ol>");
for (size_t i = 0; i < sockets.size(); ++i) {
// Reverse the list order, so we dispay from most recent to oldest.
size_t index = sockets.size() - i - 1;
- OutputURLAndLoadLog(sockets[index]->url(),
- sockets[index]->load_log(),
- out);
+ OutputURLAndLoadLog(sockets[index], out);
}
out->append("</ol>");
}
@@ -564,8 +580,8 @@ class SocketStreamRecentSubSection : public SubSection {
}
virtual void OutputBody(URLRequestContext* context, std::string* out) {
- RequestTracker<net::SocketStream>::RecentRequestInfoList recent =
- context->socket_stream_tracker()->GetRecentlyDeceased();
+ PassiveLogCollector::RequestInfoList recent =
+ GetSocketStreamTracker(context)->GetRecentlyDeceased();
DrawCommandButton("Clear", "clear-socketstream-graveyard", out);
@@ -573,8 +589,7 @@ class SocketStreamRecentSubSection : public SubSection {
for (size_t i = 0; i < recent.size(); ++i) {
// Reverse the list order, so we dispay from most recent to oldest.
size_t index = recent.size() - i - 1;
- OutputURLAndLoadLog(recent[index].original_url,
- recent[index].load_log, out);
+ OutputURLAndLoadLog(recent[index], out);
}
out->append("</ol>");
}
@@ -600,11 +615,12 @@ class AllSubSections : public SubSection {
}
};
-bool HandleCommand(const std::string& command, URLRequestContext* context) {
+bool HandleCommand(const std::string& command,
+ URLRequestContext* context) {
if (StartsWithASCII(command, "full-logging-", true)) {
bool enable_full_logging = (command == "full-logging-enable");
- context->url_request_tracker()->SetUnbounded(enable_full_logging);
- context->socket_stream_tracker()->SetUnbounded(enable_full_logging);
+ GetURLRequestTracker(context)->SetUnbounded(enable_full_logging);
+ GetSocketStreamTracker(context)->SetUnbounded(enable_full_logging);
return true;
}
@@ -616,12 +632,12 @@ bool HandleCommand(const std::string& command, URLRequestContext* context) {
}
if (command == "clear-urlrequest-graveyard") {
- context->url_request_tracker()->ClearRecentlyDeceased();
+ GetURLRequestTracker(context)->ClearRecentlyDeceased();
return true;
}
if (command == "clear-socketstream-graveyard") {
- context->socket_stream_tracker()->ClearRecentlyDeceased();
+ GetSocketStreamTracker(context)->ClearRecentlyDeceased();
return true;
}
@@ -674,8 +690,8 @@ void ProcessQueryStringCommands(URLRequestContext* context,
// logging, and clear some of the already logged data.
void DrawControlsHeader(URLRequestContext* context, std::string* data) {
bool is_full_logging_enabled =
- context->url_request_tracker()->IsUnbounded() &&
- context->socket_stream_tracker()->IsUnbounded();
+ GetURLRequestTracker(context)->IsUnbounded() &&
+ GetSocketStreamTracker(context)->IsUnbounded();
data->append("<div style='margin-bottom: 10px'>");
@@ -703,7 +719,8 @@ bool ViewNetInternalsJob::GetData(std::string* mime_type,
mime_type->assign("text/html");
charset->assign("UTF-8");
- URLRequestContext* context = request_->context();
+ URLRequestContext* context =
+ static_cast<URLRequestContext*>(request_->context());
data->clear();
diff --git a/chrome/browser/sync/notifier/communicator/ssl_socket_adapter.cc b/chrome/browser/sync/notifier/communicator/ssl_socket_adapter.cc
index 14f4392..875052b 100644
--- a/chrome/browser/sync/notifier/communicator/ssl_socket_adapter.cc
+++ b/chrome/browser/sync/notifier/communicator/ssl_socket_adapter.cc
@@ -237,7 +237,7 @@ TransportSocket::TransportSocket(talk_base::AsyncSocket* socket,
}
int TransportSocket::Connect(net::CompletionCallback* callback,
- net::LoadLog* /* load_log */) {
+ const net::BoundNetLog& /* net_log */) {
connect_callback_ = callback;
return socket_->Connect(addr_);
}
diff --git a/chrome/browser/sync/notifier/communicator/ssl_socket_adapter.h b/chrome/browser/sync/notifier/communicator/ssl_socket_adapter.h
index 12dfc74..2e7d618 100644
--- a/chrome/browser/sync/notifier/communicator/ssl_socket_adapter.h
+++ b/chrome/browser/sync/notifier/communicator/ssl_socket_adapter.h
@@ -15,7 +15,7 @@
#include "talk/base/ssladapter.h"
namespace net {
-class LoadLog;
+class BoundNetLog;
} // namespace net
namespace notifier {
@@ -38,7 +38,7 @@ class TransportSocket : public net::ClientSocket, public sigslot::has_slots<> {
// net::ClientSocket implementation
virtual int Connect(net::CompletionCallback* callback,
- net::LoadLog* /* load_log */);
+ const net::BoundNetLog& /* net_log */);
virtual void Disconnect();
virtual bool IsConnected() const;
virtual bool IsConnectedAndIdle() const;
diff --git a/chrome/chrome_browser.gypi b/chrome/chrome_browser.gypi
index 22010e6..761a351 100755
--- a/chrome/chrome_browser.gypi
+++ b/chrome/chrome_browser.gypi
@@ -1477,6 +1477,8 @@
'browser/net/browser_url_util.h',
'browser/net/chrome_cookie_policy.cc',
'browser/net/chrome_cookie_policy.h',
+ 'browser/net/chrome_net_log.cc',
+ 'browser/net/chrome_net_log.h',
'browser/net/chrome_url_request_context.cc',
'browser/net/chrome_url_request_context.h',
'browser/net/url_request_context_getter.cc',
@@ -1489,6 +1491,8 @@
'browser/net/dns_master.h',
'browser/net/metadata_url_request.cc',
'browser/net/metadata_url_request.h',
+ 'browser/net/passive_log_collector.cc',
+ 'browser/net/passive_log_collector.h',
'browser/net/referrer.cc',
'browser/net/referrer.h',
'browser/net/resolve_proxy_msg_helper.cc',
diff --git a/chrome/chrome_tests.gypi b/chrome/chrome_tests.gypi
index 9cb92f7..3693f06 100644
--- a/chrome/chrome_tests.gypi
+++ b/chrome/chrome_tests.gypi
@@ -800,6 +800,7 @@
'browser/net/chrome_url_request_context_unittest.cc',
'browser/net/dns_host_info_unittest.cc',
'browser/net/dns_master_unittest.cc',
+ 'browser/net/passive_log_collector_unittest.cc',
'browser/net/resolve_proxy_msg_helper_unittest.cc',
'browser/net/test_url_fetcher_factory.cc',
'browser/net/test_url_fetcher_factory.h',